From d4939d51d5a33ad88910d54a7f50e3bb310c473d Mon Sep 17 00:00:00 2001 From: Trajan0x Date: Sat, 8 Jun 2024 14:38:14 -0400 Subject: [PATCH 1/7] terraform move first pass --- contrib/release-copier-action/.goreleaser.yml | 78 - contrib/release-copier-action/Makefile | 1 - contrib/release-copier-action/README.md | 36 - .../release-copier-action/copier/copier.go | 210 - .../release-copier-action/copier/copy_test.go | 60 - contrib/release-copier-action/copier/doc.go | 3 - .../copier/export_test.go | 28 - .../copier/suite_test.go | 25 - contrib/release-copier-action/go.mod | 48 - contrib/release-copier-action/go.sum | 228 - contrib/release-copier-action/main.go | 34 - contrib/release-copier-action/util/parse.go | 13 - .../release-copier-action/util/parse_test.go | 14 - .../terraform-provider-helmproxy/.gitignore | 2 - .../.goreleaser.yml | 75 - contrib/terraform-provider-helmproxy/Makefile | 29 - .../examples/google.tf | 13 - .../examples/main.tf | 30 - .../examples/variables.tf | 47 - contrib/terraform-provider-helmproxy/go.mod | 236 - contrib/terraform-provider-helmproxy/go.sum | 2042 - contrib/terraform-provider-helmproxy/main.go | 17 - .../provider/provider.go | 89 - .../provider/provider_test.go | 14 - .../terraform-provider-helmproxy/readme.md | 22 - .../scripts/add-tfmac.sh | 7 - .../scripts/build-tf.sh | 17 - .../terraform-registry-manifest.json | 6 - contrib/terraform-provider-iap/.gitignore | 2 - .../terraform-provider-iap/.goreleaser.yml | 74 - contrib/terraform-provider-iap/Makefile | 29 - contrib/terraform-provider-iap/README.md | 21 - contrib/terraform-provider-iap/assets/img.png | Bin 22482 -> 0 bytes .../docs/data-sources/tunnel_proxy.md | 32 - contrib/terraform-provider-iap/docs/index.md | 119 - .../terraform-provider-iap/examples/google.tf | 13 - .../terraform-provider-iap/examples/main.tf | 54 - .../examples/variables.tf | 46 - contrib/terraform-provider-iap/go.mod | 118 - contrib/terraform-provider-iap/go.sum | 1568 - contrib/terraform-provider-iap/main.go | 17 - .../provider/iap-tunnel.go | 183 - .../provider/keep_alive.go | 96 - .../provider/provider.go | 23 - .../provider/provider_test.go | 14 - .../provider/validate.go | 18 - .../scripts/add-tfmac.sh | 7 - .../scripts/build-tf.sh | 17 - .../terraform-registry-manifest.json | 6 - .../terraform-provider-kubeproxy/.gitignore | 1 - .../.goreleaser.yml | 74 - contrib/terraform-provider-kubeproxy/Makefile | 29 - .../terraform-provider-kubeproxy/README.md | 30 - .../examples/.terraform.lock.hcl | 29 - .../examples/google.tf | 13 - .../examples/main.tf | 50 - .../examples/variables.tf | 47 - .../configschema/configschema_gen.go | 411 - .../generated/configschema/generate.go | 4 - .../generated/convert/convert_gen.go | 444 - .../generated/convert/generate.go | 4 - .../generated/logging/generate.go | 4 - .../generated/logging/logging_gen.go | 146 - .../generated/manifest/generate.go | 12 - .../generated/manifest/helpers.go | 8 - .../generated/manifest/manifest_gen.go | 4072 - contrib/terraform-provider-kubeproxy/go.mod | 193 - contrib/terraform-provider-kubeproxy/go.sum | 1935 - contrib/terraform-provider-kubeproxy/main.go | 97 - .../provider/doc.go | 2 - .../provider/export_test.go | 23 - .../provider/main_provider.go | 87 - .../provider/manifest_provider.go | 262 - .../provider/provider_test.go | 30 - .../provider/resource.go | 65 - .../provider/validate.go | 69 - .../provider/validate_test.go | 93 - .../scripts/add-tfmac.sh | 7 - .../scripts/build-tf.sh | 17 - .../terraform-registry-manifest.json | 6 - contrib/tfcore/.goreleaser.yml | 45 - contrib/tfcore/Makefile | 1 - contrib/tfcore/README.md | 6 - contrib/tfcore/generated/google/doc.go | 2 - contrib/tfcore/generated/google/generate.go | 12 - contrib/tfcore/generated/google/getters.go | 15 - contrib/tfcore/generated/google/google_gen.go | 255823 --------------- contrib/tfcore/generated/tunnel/doc.go | 2 - contrib/tfcore/generated/tunnel/generate.go | 7 - .../tfcore/generated/tunnel/tokensource.go | 8 - contrib/tfcore/generated/tunnel/tunnel_gen.go | 238 - .../generated/tunnel/tunnel_gen_test.go | 63 - contrib/tfcore/go.mod | 120 - contrib/tfcore/go.sum | 1376 - contrib/tfcore/utils/combine_schemas.go | 107 - contrib/tfcore/utils/combined_schemas_test.go | 120 - contrib/tfcore/utils/doc.go | 2 - contrib/tfcore/utils/tunnel.go | 104 - contrib/tfcore/utils/utils.go | 13 - contrib/tfcore/utils/utils_test.go | 33 - contrib/tfcore/utils/wrapper.go | 215 - contrib/tfcore/utils/wrapper_test.go | 54 - tools/modulecopier/README.md | 3 - tools/modulecopier/cmd/doc.go | 8 - tools/modulecopier/cmd/flags.go | 21 - tools/modulecopier/cmd/main.go | 64 - tools/modulecopier/internal/copy.go | 183 - tools/modulecopier/internal/copy_test.go | 73 - tools/modulecopier/internal/doc.go | 2 - tools/modulecopier/internal/module.go | 143 - tools/modulecopier/internal/module_test.go | 28 - tools/modulecopier/internal/suite_test.go | 23 - tools/modulecopier/main.go | 20 - 113 files changed, 273079 deletions(-) delete mode 100644 contrib/release-copier-action/.goreleaser.yml delete mode 120000 contrib/release-copier-action/Makefile delete mode 100644 contrib/release-copier-action/README.md delete mode 100644 contrib/release-copier-action/copier/copier.go delete mode 100644 contrib/release-copier-action/copier/copy_test.go delete mode 100644 contrib/release-copier-action/copier/doc.go delete mode 100644 contrib/release-copier-action/copier/export_test.go delete mode 100644 contrib/release-copier-action/copier/suite_test.go delete mode 100644 contrib/release-copier-action/go.mod delete mode 100644 contrib/release-copier-action/go.sum delete mode 100644 contrib/release-copier-action/main.go delete mode 100644 contrib/release-copier-action/util/parse.go delete mode 100644 contrib/release-copier-action/util/parse_test.go delete mode 100644 contrib/terraform-provider-helmproxy/.gitignore delete mode 100644 contrib/terraform-provider-helmproxy/.goreleaser.yml delete mode 100644 contrib/terraform-provider-helmproxy/Makefile delete mode 100644 contrib/terraform-provider-helmproxy/examples/google.tf delete mode 100644 contrib/terraform-provider-helmproxy/examples/main.tf delete mode 100644 contrib/terraform-provider-helmproxy/examples/variables.tf delete mode 100644 contrib/terraform-provider-helmproxy/go.mod delete mode 100644 contrib/terraform-provider-helmproxy/go.sum delete mode 100644 contrib/terraform-provider-helmproxy/main.go delete mode 100644 contrib/terraform-provider-helmproxy/provider/provider.go delete mode 100644 contrib/terraform-provider-helmproxy/provider/provider_test.go delete mode 100644 contrib/terraform-provider-helmproxy/readme.md delete mode 100755 contrib/terraform-provider-helmproxy/scripts/add-tfmac.sh delete mode 100755 contrib/terraform-provider-helmproxy/scripts/build-tf.sh delete mode 100644 contrib/terraform-provider-helmproxy/terraform-registry-manifest.json delete mode 100644 contrib/terraform-provider-iap/.gitignore delete mode 100644 contrib/terraform-provider-iap/.goreleaser.yml delete mode 100644 contrib/terraform-provider-iap/Makefile delete mode 100644 contrib/terraform-provider-iap/README.md delete mode 100644 contrib/terraform-provider-iap/assets/img.png delete mode 100644 contrib/terraform-provider-iap/docs/data-sources/tunnel_proxy.md delete mode 100644 contrib/terraform-provider-iap/docs/index.md delete mode 100644 contrib/terraform-provider-iap/examples/google.tf delete mode 100644 contrib/terraform-provider-iap/examples/main.tf delete mode 100644 contrib/terraform-provider-iap/examples/variables.tf delete mode 100644 contrib/terraform-provider-iap/go.mod delete mode 100644 contrib/terraform-provider-iap/go.sum delete mode 100644 contrib/terraform-provider-iap/main.go delete mode 100644 contrib/terraform-provider-iap/provider/iap-tunnel.go delete mode 100644 contrib/terraform-provider-iap/provider/keep_alive.go delete mode 100644 contrib/terraform-provider-iap/provider/provider.go delete mode 100644 contrib/terraform-provider-iap/provider/provider_test.go delete mode 100644 contrib/terraform-provider-iap/provider/validate.go delete mode 100755 contrib/terraform-provider-iap/scripts/add-tfmac.sh delete mode 100755 contrib/terraform-provider-iap/scripts/build-tf.sh delete mode 100644 contrib/terraform-provider-iap/terraform-registry-manifest.json delete mode 100644 contrib/terraform-provider-kubeproxy/.gitignore delete mode 100644 contrib/terraform-provider-kubeproxy/.goreleaser.yml delete mode 100644 contrib/terraform-provider-kubeproxy/Makefile delete mode 100644 contrib/terraform-provider-kubeproxy/README.md delete mode 100644 contrib/terraform-provider-kubeproxy/examples/.terraform.lock.hcl delete mode 100644 contrib/terraform-provider-kubeproxy/examples/google.tf delete mode 100644 contrib/terraform-provider-kubeproxy/examples/main.tf delete mode 100644 contrib/terraform-provider-kubeproxy/examples/variables.tf delete mode 100644 contrib/terraform-provider-kubeproxy/generated/configschema/configschema_gen.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/configschema/generate.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/convert/convert_gen.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/convert/generate.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/logging/generate.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/logging/logging_gen.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/manifest/generate.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/manifest/helpers.go delete mode 100644 contrib/terraform-provider-kubeproxy/generated/manifest/manifest_gen.go delete mode 100644 contrib/terraform-provider-kubeproxy/go.mod delete mode 100644 contrib/terraform-provider-kubeproxy/go.sum delete mode 100644 contrib/terraform-provider-kubeproxy/main.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/doc.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/export_test.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/main_provider.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/manifest_provider.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/provider_test.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/resource.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/validate.go delete mode 100644 contrib/terraform-provider-kubeproxy/provider/validate_test.go delete mode 100755 contrib/terraform-provider-kubeproxy/scripts/add-tfmac.sh delete mode 100755 contrib/terraform-provider-kubeproxy/scripts/build-tf.sh delete mode 100644 contrib/terraform-provider-kubeproxy/terraform-registry-manifest.json delete mode 100644 contrib/tfcore/.goreleaser.yml delete mode 120000 contrib/tfcore/Makefile delete mode 100644 contrib/tfcore/README.md delete mode 100644 contrib/tfcore/generated/google/doc.go delete mode 100644 contrib/tfcore/generated/google/generate.go delete mode 100644 contrib/tfcore/generated/google/getters.go delete mode 100644 contrib/tfcore/generated/google/google_gen.go delete mode 100644 contrib/tfcore/generated/tunnel/doc.go delete mode 100644 contrib/tfcore/generated/tunnel/generate.go delete mode 100644 contrib/tfcore/generated/tunnel/tokensource.go delete mode 100644 contrib/tfcore/generated/tunnel/tunnel_gen.go delete mode 100644 contrib/tfcore/generated/tunnel/tunnel_gen_test.go delete mode 100644 contrib/tfcore/go.mod delete mode 100644 contrib/tfcore/go.sum delete mode 100644 contrib/tfcore/utils/combine_schemas.go delete mode 100644 contrib/tfcore/utils/combined_schemas_test.go delete mode 100644 contrib/tfcore/utils/doc.go delete mode 100644 contrib/tfcore/utils/tunnel.go delete mode 100644 contrib/tfcore/utils/utils.go delete mode 100644 contrib/tfcore/utils/utils_test.go delete mode 100644 contrib/tfcore/utils/wrapper.go delete mode 100644 contrib/tfcore/utils/wrapper_test.go delete mode 100644 tools/modulecopier/README.md delete mode 100644 tools/modulecopier/cmd/doc.go delete mode 100644 tools/modulecopier/cmd/flags.go delete mode 100644 tools/modulecopier/cmd/main.go delete mode 100644 tools/modulecopier/internal/copy.go delete mode 100644 tools/modulecopier/internal/copy_test.go delete mode 100644 tools/modulecopier/internal/doc.go delete mode 100644 tools/modulecopier/internal/module.go delete mode 100644 tools/modulecopier/internal/module_test.go delete mode 100644 tools/modulecopier/internal/suite_test.go delete mode 100644 tools/modulecopier/main.go diff --git a/contrib/release-copier-action/.goreleaser.yml b/contrib/release-copier-action/.goreleaser.yml deleted file mode 100644 index 755e3085a6..0000000000 --- a/contrib/release-copier-action/.goreleaser.yml +++ /dev/null @@ -1,78 +0,0 @@ -project_name: release-copier-action - -monorepo: - tag_prefix: contrib/release-copier-action/ - dir: contrib/release-copier-action/ - -builds: - # Linux AMD64 - - id: release-copier-action - binary: release-copier-action - ldflags: -installsuffix static - env: - - CGO_ENABLED=0 - main: main.go - flags: - - -trimpath - tags: - - netgo - - osusergo - goos: - - linux - goarch: - - amd64 - -# add a source archive at release time -source: - enabled: true - -# Archives -archives: - - format: tar.gz - wrap_in_directory: true - format_overrides: - - goos: windows - format: zip - name_template: '{{.ProjectName}}-{{.Version}}_{{.Os}}_{{.Arch}}' - files: - - README.md - -checksum: - name_template: checksums.txt - -# Add a changelog -changelog: - sort: asc - -dockers: - # Docker AMD64 - - goos: linux - goarch: amd64 - image_templates: - - 'ghcr.io/synapsecns/sanguine/release-copier-action:latest' - - 'ghcr.io/synapsecns/sanguine/release-copier-action:{{ .FullCommit }}' - - 'ghcr.io/synapsecns/sanguine/release-copier-action:{{ .Tag }}' - build_flag_templates: - - '--label=org.opencontainers.image.created={{.Date}}' - - '--label=org.opencontainers.image.name={{.ProjectName}}' - - '--label=org.opencontainers.image.revision={{.FullCommit}}' - - '--label=org.opencontainers.image.version={{.Version}}' - - '--label=org.opencontainers.image.source={{.GitURL}}' - dockerfile: ../../docker/release-copier-action.Dockerfile - ids: - - release-copier-action - -# track sizes -report_sizes: true - -# modified timestamps -metadata: - # Set the modified timestamp on the metadata files. - # - # Templates: allowed. - mod_timestamp: '{{ .CommitTimestamp }}' - -# produce software bill of lading -sboms: - - artifacts: archive - diff --git a/contrib/release-copier-action/Makefile b/contrib/release-copier-action/Makefile deleted file mode 120000 index 15e4536f4b..0000000000 --- a/contrib/release-copier-action/Makefile +++ /dev/null @@ -1 +0,0 @@ -../../make/go.Makefile \ No newline at end of file diff --git a/contrib/release-copier-action/README.md b/contrib/release-copier-action/README.md deleted file mode 100644 index e13f692ca3..0000000000 --- a/contrib/release-copier-action/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# Release Copier - -[![Go Reference](https://pkg.go.dev/badge/github.com/synapsecns/sanguine/contrib/release-copier-action.svg)](https://pkg.go.dev/github.com/synapsecns/sanguine/contrib/release-copier-action) -[![Go Report Card](https://goreportcard.com/badge/github.com/synapsecns/sanguine/contrib/release-copier-action)](https://goreportcard.com/report/github.com/synapsecns/sanguine/contrib/release-copier-action) - -This is a tool to help with the release process. It copies the release from a tag to a new repository. This is used for terraform releases since terraform requires a separate repository for each provider in a specific format. It should be able to be used for any other release you want to copy. - -## Usage - - -```yaml - - name: Bump version and push tag - id: tag_version - if: steps.branch-name.outputs.is_default == 'true' - uses: mathieudutour/github-tag-action@v6.0 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - tag_prefix: my-package-prefix/v - release_branches: master - fetch_all_tags: true - - name: Copy Releases - uses: docker://ghcr.io/synapsecns/sanguine/release-copier-action:latest - with: - # this token must have access to both the original repository and the new repository so GITHUB_TOKEN will not work - github_token: ${{ secrets.PUBLISH_TOKEN }} - # destination repo - destination_repo: 'my-destination-repo/destination-package' - # you can take this from anywhere - tag_name: ${{ steps.tag_version.outputs.new_tag }} - # we strip away anything package relative here - strip_prefix: 'my-package-prefix/' -``` - -## A note on actions - -This action is currently not published to the marketplace, partially because the [requirements](https://docs.github.com/en/actions/creating-actions/publishing-actions-in-github-marketplace#about-publishing-actions) require that each repository contain a single action and the action.yml must be in the root directory. We can get around this with a subdirectory copier in a future version. diff --git a/contrib/release-copier-action/copier/copier.go b/contrib/release-copier-action/copier/copier.go deleted file mode 100644 index bcdd3315a0..0000000000 --- a/contrib/release-copier-action/copier/copier.go +++ /dev/null @@ -1,210 +0,0 @@ -package copier - -import ( - "context" - "errors" - "fmt" - "github.com/google/go-github/v41/github" - "golang.org/x/oauth2" - "io" - "net/http" - "os" - "strings" - "sync" -) - -// ReleaseCopier contains the release copier client. -type ReleaseCopier struct { - client *github.Client - sourceOwner, sourceRepo, targetOwner, targetRepo string - // mux ensures only one copy can be made at a time - mux sync.Mutex -} - -// NewReleaseCopier creates a new release copier client. -func NewReleaseCopier(ctx context.Context, token string) *ReleaseCopier { - ts := oauth2.StaticTokenSource( - &oauth2.Token{AccessToken: token}) - - tc := oauth2.NewClient(ctx, ts) - - return &ReleaseCopier{ - client: github.NewClient(tc), - } -} - -// CopyRelease copies a release from a sourceRepo to a targetRepo and strips a prefix. -// nolint: cyclop -func (r *ReleaseCopier) CopyRelease(ctx context.Context, sourceOwner, sourceRepo, targetOwner, targetRepo, tagName, stripPrefix string) error { - // make sure only one copy process runs at a time - r.mux.Lock() - defer r.mux.Unlock() - - r.sourceRepo = sourceRepo - r.sourceOwner = sourceOwner - r.targetOwner = targetOwner - r.targetRepo = targetRepo - - // Get the release - ogRelease, _, err := r.client.Repositories.GetReleaseByTag(ctx, sourceOwner, sourceRepo, tagName) - if err != nil { - return fmt.Errorf("could not get origin release: %w", err) - } - - // Get the tag for the release - ogTag, err := r.GetTagForRelease(ctx, ogRelease) - if err != nil { - return fmt.Errorf("could not get tag for release: %w", err) - } - - if ogRelease.TagName == nil { - return errors.New("could not get origin release tag name, tag is required for copying a release") - } - - strippedTag := strings.TrimPrefix(tagName, stripPrefix) - // releaes name is optional, so we only set it if it exists - // we also strip the prefix here - name := "" - if ogRelease.Name != nil { - name = strings.TrimPrefix(*ogRelease.Name, stripPrefix) - } - - commits, _, err := r.client.Repositories.ListCommits(ctx, targetOwner, targetRepo, &github.CommitsListOptions{}) - if err != nil { - return fmt.Errorf("could not get commits for repo %s/%s: %w", targetOwner, targetRepo, err) - } - if len(commits) == 0 { - return errors.New("at least one commit is required for a release") - } - - // Create the tag - tag, _, err := r.client.Git.CreateTag(ctx, targetOwner, targetRepo, &github.Tag{ - Tag: &strippedTag, - Message: ogTag.Commit.Message, - Object: &github.GitObject{ - Type: github.String("commit"), - SHA: commits[0].SHA, - URL: commits[0].URL, - }, - }) - if err != nil { - return fmt.Errorf("could not create tag %s: %w", strippedTag, err) - } - - // Create the release - newRelease := &github.RepositoryRelease{ - TagName: tag.Tag, - Name: &name, - Body: ogRelease.Body, - } - - newRelease, _, err = r.client.Repositories.CreateRelease(ctx, targetOwner, targetRepo, newRelease) - if err != nil { - return fmt.Errorf("could not create release: %w", err) - } - - for _, asset := range ogRelease.Assets { - err = r.copyReleaseAsset(ctx, asset, newRelease) - if err != nil { - return fmt.Errorf("could not copy release asset: %w", err) - } - } - return nil -} - -// GetTagForRelease gets the tag for a given release. It does this by iterating through all tags to find -// a matching tag name. We need to do this because the github api does not return the tag by name -// only sha. -func (r *ReleaseCopier) GetTagForRelease(ctx context.Context, release *github.RepositoryRelease) (*github.RepositoryTag, error) { - page := 1 - for { - tags, res, err := r.client.Repositories.ListTags(ctx, r.sourceOwner, r.sourceRepo, &github.ListOptions{ - PerPage: 100, - Page: page, - }) - if err != nil { - return nil, fmt.Errorf("could not get tags: %w", err) - } - - for _, tag := range tags { - if tag.GetName() == release.GetTagName() { - // some fields aren't populated in this response, so we need to get the full tag commit metadata - commit, _, err := r.client.Git.GetCommit(ctx, r.sourceOwner, r.sourceRepo, tag.GetCommit().GetSHA()) - if err != nil { - return nil, fmt.Errorf("could not get commit: %w", err) - } - - tag.Commit = commit - - return tag, nil - } - } - - if page == res.LastPage { - return nil, fmt.Errorf("could not find tag for release %s/%s", r.sourceOwner, r.sourceRepo) - } - page = res.NextPage - } -} - -// copyReleaseAsset copies a release asset from the source repo to the target repo. -func (r *ReleaseCopier) copyReleaseAsset(ctx context.Context, asset *github.ReleaseAsset, targetRelease *github.RepositoryRelease) error { - // Download the original release asset - reader, _, err := r.client.Repositories.DownloadReleaseAsset(ctx, r.sourceOwner, r.sourceRepo, *asset.ID, http.DefaultClient) - if err != nil { - return fmt.Errorf("could not download asset %s: %w", *asset.Name, err) - } - - folderName, err := os.MkdirTemp("", "release-copier") - if err != nil { - return fmt.Errorf("could not create temp folder: %w", err) - } - - // create a temp file, we'll use this to store the contents of the original - fileName := fmt.Sprintf("%s/%s", folderName, *asset.Name) - - //nolint: gosec - tmpFile, err := os.OpenFile(fileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) - if err != nil { - return fmt.Errorf("could not create temp file %s: %w", fileName, err) - } - - defer func() { - _ = tmpFile.Close() - _ = os.Remove(tmpFile.Name()) - }() - - toWrite, err := io.ReadAll(reader) - if err != nil { - return fmt.Errorf("could not read asset %s: %w", *asset.Name, err) - } - - _, err = tmpFile.Write(toWrite) - if err != nil { - return fmt.Errorf("could not write asset %s: %w", *asset.Name, err) - } - - // release the file handle - _ = tmpFile.Close() - - // open as readonly - //nolint:gosec - tmpFile, err = os.Open(fileName) - if err != nil { - return fmt.Errorf("could not open temp file %s: %w", fileName, err) - } - - // Upload the resulting release asset - _, res, err := r.client.Repositories.UploadReleaseAsset(ctx, r.targetOwner, r.targetRepo, targetRelease.GetID(), &github.UploadOptions{ - Name: asset.GetName(), - Label: asset.GetLabel(), - }, tmpFile) - - if err != nil { - return fmt.Errorf("could not upload: %w", err) - } - - _ = res - - return nil -} diff --git a/contrib/release-copier-action/copier/copy_test.go b/contrib/release-copier-action/copier/copy_test.go deleted file mode 100644 index e020119126..0000000000 --- a/contrib/release-copier-action/copier/copy_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package copier_test - -import ( - "github.com/brianvoe/gofakeit/v6" - "github.com/google/go-github/v41/github" - "github.com/migueleliasweb/go-github-mock/src/mock" - . "github.com/stretchr/testify/assert" - "github.com/synapsecns/sanguine/contrib/release-copier-action/copier" -) - -func makeRepoTags(count int) []github.RepositoryTag { - var tags []github.RepositoryTag - for i := 0; i < count; i++ { - tags = append(tags, github.RepositoryTag{ - Name: github.String(gofakeit.Name()), - }) - } - return tags -} - -func (c *CopierSuite) TestGetTagsForRelease() { - const targetTag = "v1.0.0" - const targetCommit = "i-am-a-commit" - const targetMessage = "i-am-a-message" - - mockedHTTPClient := mock.NewMockedHTTPClient( - mock.WithRequestMatchPages( - mock.GetReposTagsByOwnerByRepo, - makeRepoTags(50), - makeRepoTags(50), - []github.RepositoryTag{ - { - Name: github.String(targetTag), - Commit: &github.Commit{ - SHA: github.String(targetCommit), - }, - }, - }, - ), - mock.WithRequestMatch( - mock.GetReposGitCommitsByOwnerByRepoByCommitSha, - github.Commit{ - SHA: github.String(targetCommit), - Message: github.String(targetMessage), - }, - )) - - cp := copier.NewReleaseCopier(c.GetTestContext(), "") - cp.SetSourceOwner("testowner") - cp.SetSourceRepo("testrepo") - - cp.SetClient(github.NewClient(mockedHTTPClient)) - - tag, err := cp.GetTagForRelease(c.GetTestContext(), &github.RepositoryRelease{ - TagName: github.String("v1.0.0"), - }) - - Nil(c.T(), err) - Equal(c.T(), targetMessage, tag.GetCommit().GetMessage()) -} diff --git a/contrib/release-copier-action/copier/doc.go b/contrib/release-copier-action/copier/doc.go deleted file mode 100644 index cf65066cc5..0000000000 --- a/contrib/release-copier-action/copier/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package copier provides a client for copying Github releases from one repository to another. -// It uses the Github API to get a release by its tag name, create a new tag and release in the target repository, and upload the assets of the original release in the target release. -package copier diff --git a/contrib/release-copier-action/copier/export_test.go b/contrib/release-copier-action/copier/export_test.go deleted file mode 100644 index 653213cfa0..0000000000 --- a/contrib/release-copier-action/copier/export_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package copier - -import "github.com/google/go-github/v41/github" - -// SetClient allows the client ot be overridden so we can take advantage of mocking. -func (r *ReleaseCopier) SetClient(client *github.Client) { - r.client = client -} - -// SetSourceOwner allows the source owner to be overridden so we can take advantage of mocking. -func (r *ReleaseCopier) SetSourceOwner(sourceOwner string) { - r.sourceOwner = sourceOwner -} - -// SetSourceRepo allows the source repo to be overridden so we can take advantage of mocking. -func (r *ReleaseCopier) SetSourceRepo(sourceRepo string) { - r.sourceRepo = sourceRepo -} - -// SetTargetOwner allows the target owner to be overridden so we can take advantage of mocking. -func (r *ReleaseCopier) SetTargetOwner(targetOwner string) { - r.targetOwner = targetOwner -} - -// SetTargetRepo allows the target repo to be overridden so we can take advantage of mocking. -func (r *ReleaseCopier) SetTargetRepo(targetRepo string) { - r.targetRepo = targetRepo -} diff --git a/contrib/release-copier-action/copier/suite_test.go b/contrib/release-copier-action/copier/suite_test.go deleted file mode 100644 index 8bbfa75638..0000000000 --- a/contrib/release-copier-action/copier/suite_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package copier_test - -import ( - "github.com/stretchr/testify/suite" - "github.com/synapsecns/sanguine/core/testsuite" - "testing" -) - -// CopierSuite defines the basic test suite. -type CopierSuite struct { - *testsuite.TestSuite -} - -// NewTestSuite creates a new test suite and performs some basic checks afterward. -// Every test suite in the synapse library should inherit from this suite and override where necessary. -func NewTestSuite(tb testing.TB) *CopierSuite { - tb.Helper() - return &CopierSuite{ - testsuite.NewTestSuite(tb), - } -} - -func TestCopierSuite(t *testing.T) { - suite.Run(t, NewTestSuite(t)) -} diff --git a/contrib/release-copier-action/go.mod b/contrib/release-copier-action/go.mod deleted file mode 100644 index 0d0426f3d2..0000000000 --- a/contrib/release-copier-action/go.mod +++ /dev/null @@ -1,48 +0,0 @@ -module github.com/synapsecns/sanguine/contrib/release-copier-action - -go 1.21 - -require ( - github.com/brianvoe/gofakeit/v6 v6.27.0 - github.com/google/go-github/v41 v41.0.0 - github.com/migueleliasweb/go-github-mock v0.0.16 - github.com/sethvargo/go-githubactions v1.1.0 - github.com/stretchr/testify v1.8.4 - github.com/synapsecns/sanguine/core v0.0.0-00010101000000-000000000000 - golang.org/x/oauth2 v0.16.0 -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/gogo/protobuf v1.3.3 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/go-github/v50 v50.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect - github.com/gorilla/mux v1.8.1 // indirect - github.com/ipfs/go-log v1.0.5 // indirect - github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect - github.com/sethvargo/go-envconfig v0.8.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/sys v0.17.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apimachinery v0.25.5 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect -) - -replace ( - github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/synapsecns/sanguine/core => ../../core -) diff --git a/contrib/release-copier-action/go.sum b/contrib/release-copier-action/go.sum deleted file mode 100644 index f0ef03c687..0000000000 --- a/contrib/release-copier-action/go.sum +++ /dev/null @@ -1,228 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/brianvoe/gofakeit/v6 v6.27.0 h1:rI6rhEtXnMfdRHc1pE1tdXN/LRnDlRzFZXL2ArDV3Wk= -github.com/brianvoe/gofakeit/v6 v6.27.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg= -github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg= -github.com/google/go-github/v50 v50.0.0 h1:gdO1AeuSZZK4iYWwVbjni7zg8PIQhp7QfmPunr016Jk= -github.com/google/go-github/v50 v50.0.0/go.mod h1:Ev4Tre8QoKiolvbpOSG3FIi4Mlon3S2Nt9W5JYqKiwA= -github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= -github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= -github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= -github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/migueleliasweb/go-github-mock v0.0.16 h1:iEx6iqYASRJVoEO5eMOYpQZFTc00cZ6ysynOArUKM3A= -github.com/migueleliasweb/go-github-mock v0.0.16/go.mod h1:CjrgPd8s5sf5g3XSESAQqxufae+PZbgM/F317C3uD7g= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/sethvargo/go-envconfig v0.8.0 h1:AcmdAewSFAc7pQ1Ghz+vhZkilUtxX559QlDuLLiSkdI= -github.com/sethvargo/go-envconfig v0.8.0/go.mod h1:Iz1Gy1Sf3T64TQlJSvee81qDhf7YIlt8GMUX6yyNFs0= -github.com/sethvargo/go-githubactions v1.1.0 h1:mg03w+b+/s5SMS298/2G6tHv8P0w0VhUFaqL1THIqzY= -github.com/sethvargo/go-githubactions v1.1.0/go.mod h1:qIboSF7yq2Qnaw2WXDsqCReM0Lo1gU4QXUWmhBC3pxE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/apimachinery v0.25.5 h1:SQomYHvv+aO43qdu3QKRf9YuI0oI8w3RrOQ1qPbAUGY= -k8s.io/apimachinery v0.25.5/go.mod h1:1S2i1QHkmxc8+EZCIxe/fX5hpldVXk4gvnJInMEb8D4= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= diff --git a/contrib/release-copier-action/main.go b/contrib/release-copier-action/main.go deleted file mode 100644 index 85bec356bd..0000000000 --- a/contrib/release-copier-action/main.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package main provides an entrypoint for a Github Action to copy a release from one repository to another. -// It uses the Github API to get a release by its tag name, create a new tag and release in the target repository, and upload the assets of the original release in the target release. -package main - -import ( - "context" - "github.com/sethvargo/go-githubactions" - "github.com/synapsecns/sanguine/contrib/release-copier-action/copier" - "github.com/synapsecns/sanguine/contrib/release-copier-action/util" - "os" -) - -func main() { - // here we parse a number of variables we use in the action: - // - // GITHUB_TOKEN: the github access token - // GITHUB_REPOSITORY: the repository we are running the action on in the format owner/repo - repoOwner, repoPath := util.ParseGithubRepository(os.Getenv("GITHUB_REPOSITORY")) - token := githubactions.GetInput("github_token") - - // we also parse the source and target repositories - destOwner, destRepo := util.ParseGithubRepository(githubactions.GetInput("destination_repo")) - // and the tag to copy - tagName := githubactions.GetInput("tag_name") - // the prefix to strip - stripPrefix := githubactions.GetInput("strip_prefix") - - client := copier.NewReleaseCopier(context.Background(), token) - - err := client.CopyRelease(context.Background(), repoOwner, repoPath, destOwner, destRepo, tagName, stripPrefix) - if err != nil { - panic(err) - } -} diff --git a/contrib/release-copier-action/util/parse.go b/contrib/release-copier-action/util/parse.go deleted file mode 100644 index d3b2ded62d..0000000000 --- a/contrib/release-copier-action/util/parse.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package util contains utility functions for parsing action inputs -package util - -import "strings" - -// ParseGithubRepository parses ghte igthub repository from the GITHUB_REPOSITORY environment variable -// this comes in the format owner/repo. This function returns the owner and repo as separate strings. -func ParseGithubRepository(githubRepo string) (repoOwner, repoName string) { - //nolint: gocritic - repoOwner = githubRepo[:strings.Index(githubRepo, "/")] - repoName = githubRepo[strings.Index(githubRepo, "/")+1:] - return -} diff --git a/contrib/release-copier-action/util/parse_test.go b/contrib/release-copier-action/util/parse_test.go deleted file mode 100644 index 088ae1895f..0000000000 --- a/contrib/release-copier-action/util/parse_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package util_test - -import ( - . "github.com/stretchr/testify/assert" - "github.com/synapsecns/sanguine/contrib/release-copier-action/util" - "testing" -) - -func TestParseGithubRepository(t *testing.T) { - testCase := "octocat/Hello-World" - repoOwner, repoName := util.ParseGithubRepository(testCase) - Equal(t, repoOwner, "octocat") - Equal(t, repoName, "Hello-World") -} diff --git a/contrib/terraform-provider-helmproxy/.gitignore b/contrib/terraform-provider-helmproxy/.gitignore deleted file mode 100644 index 0526cc68e3..0000000000 --- a/contrib/terraform-provider-helmproxy/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -terraform-provider-helmproxy -.terraform.lock.hcl diff --git a/contrib/terraform-provider-helmproxy/.goreleaser.yml b/contrib/terraform-provider-helmproxy/.goreleaser.yml deleted file mode 100644 index d70e6288c9..0000000000 --- a/contrib/terraform-provider-helmproxy/.goreleaser.yml +++ /dev/null @@ -1,75 +0,0 @@ -project_name: terraform-provider-helmproxy - -monorepo: - tag_prefix: contrib/terraform-provider-helmproxy/ - dir: contrib/terraform-provider-helmproxy/ - -builds: - - env: - # goreleaser does not work with CGO, it could also complicate - # usage by users in CI/CD systems like Terraform Cloud where - # they are unable to install libraries. - - CGO_ENABLED=0 - mod_timestamp: '{{ .CommitTimestamp }}' - flags: - - -trimpath - ldflags: - - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}' - goos: -# TODO: reenable -# - freebsd -# - windows - - linux - - darwin - goarch: - - amd64 -# - '386' -# - arm - - arm64 - ignore: - - goos: darwin - goarch: '386' - binary: '{{ .ProjectName }}_v{{ .Version }}' -archives: - - format: zip - name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' -checksum: - extra_files: - - glob: 'terraform-registry-manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' - algorithm: sha256 -signs: - - artifacts: checksum - args: - # if you are using this in a GitHub action or some other automated pipeline, you - # need to pass the batch flag to indicate its not interactive. - - '--batch' - - '--local-user' - - '{{ .Env.GPG_FINGERPRINT }}' # set this environment variable for your signing key - - '--output' - - '${signature}' - - '--detach-sign' - - '${artifact}' -release: - extra_files: - - glob: 'terraform-registry-manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' - # If you want to manually examine the release before its live, uncomment this line: - # draft: true - - -# track sizes -report_sizes: true - -# modified timestamps -metadata: - # Set the modified timestamp on the metadata files. - # - # Templates: allowed. - mod_timestamp: '{{ .CommitTimestamp }}' - -# produce software bill of lading -sboms: - - artifacts: archive - diff --git a/contrib/terraform-provider-helmproxy/Makefile b/contrib/terraform-provider-helmproxy/Makefile deleted file mode 100644 index 0340e715e3..0000000000 --- a/contrib/terraform-provider-helmproxy/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -include ../../make/go.Makefile - -install-plugin-local: # will install the terraform provider as a local plugin for testing. - ./scripts/build-tf.sh - -run-example: install-plugin-local cleanup-examples # runs an example - echo "running terraform init, if this fails, you might have to specify amd64 as the arch before using terraform, please see: https://github.com/tfutils/tfenv/issues/337" - echo "on osx arm64, you can run run-example-m1 as a workaround." - cd examples && terraform init - -cleanup-examples: - rm -rf examples/.terraform rm -rf examples/.terraform.lock.hcl - -run-example-m1: install-plugin-local cleanup-examples # runs an example on osx arm64 - ./scripts/add-tfmac.sh - source ~/.zshrc - echo "please run: cd examples && tfmac init" - - -tfenv-install: - @#Brew - MacOS - @if [ "$(shell which tflint)" = "" ] && [ "$(shell which brew)" != "" ]; then brew install rflint; fi; - # default - @if [ "$(shell which tflint)" = "" ]; then curl -s https://raw.githubusercontent.com/terraform-linters/tflint/master/install_linux.sh | bash; fi; - - -lint-tf: tfenv-install ## Run golangci-lint and go fmt ./... - cd examples && tflint --init - cd examples && tflint diff --git a/contrib/terraform-provider-helmproxy/examples/google.tf b/contrib/terraform-provider-helmproxy/examples/google.tf deleted file mode 100644 index bc5727f544..0000000000 --- a/contrib/terraform-provider-helmproxy/examples/google.tf +++ /dev/null @@ -1,13 +0,0 @@ -# google provider used for kube access -provider "google" { -} - -# token -data "google_service_account_access_token" "kube_sa" { - target_service_account = var.service_account - lifetime = "1000s" - scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/userinfo.email" - ] -} diff --git a/contrib/terraform-provider-helmproxy/examples/main.tf b/contrib/terraform-provider-helmproxy/examples/main.tf deleted file mode 100644 index 5555e660ef..0000000000 --- a/contrib/terraform-provider-helmproxy/examples/main.tf +++ /dev/null @@ -1,30 +0,0 @@ -terraform { - required_providers { - helmproxy = { - version = "~> 1.0.0" - source = "example-helm.com/provider/helmproxy" - } - } -} - - -provider "helmproxy" { - instance = var.instance - zone = var.zone - interface = var.interface - project = var.project - remote_port = var.remote_port - - kubernetes { - // TODO: this needs to be changed to work cross cluster - host = "" - token = data.google_service_account_access_token.kube_sa.access_token - config_path = var.config_path - config_context = var.config_context - } -} - -resource "helmproxy_release" "omnirpc_example" { - name = "omnirpc-example" - chart = "../../../charts/omnirpc/" -} diff --git a/contrib/terraform-provider-helmproxy/examples/variables.tf b/contrib/terraform-provider-helmproxy/examples/variables.tf deleted file mode 100644 index f807da14b8..0000000000 --- a/contrib/terraform-provider-helmproxy/examples/variables.tf +++ /dev/null @@ -1,47 +0,0 @@ -variable "service_account" { - type = string - description = "The service account to impersonate" -} - -variable "config_path" { - type = string - description = "The path to the kube config file" - default = "~/.kube/config" -} - -variable "config_context" { - type = string - description = "The context to use in the kube config file" -} - -variable "zone" { - type = string - description = "The zone of the bastion proxy" -} - - -variable "instance" { - type = string - description = "The instance to use for the bastion proxy" - default = "rpc-bastion" -} - -variable "interface" { - type = string - description = "The interface to use for the bastion proxy" - default = "nic0" -} - -variable "project" { - type = string - description = "The project of the bastion proxy" -} - -variable "remote_port" { - type = string - description = "The remote_port of the bastion proxy" - # tiny proxy default - default = 8888 -} - - diff --git a/contrib/terraform-provider-helmproxy/go.mod b/contrib/terraform-provider-helmproxy/go.mod deleted file mode 100644 index 1bf6a97fbc..0000000000 --- a/contrib/terraform-provider-helmproxy/go.mod +++ /dev/null @@ -1,236 +0,0 @@ -module github.com/synapsecns/sanguine/contrib/terraform-provider-helmproxy - -go 1.21 - -replace ( - github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/synapsecns/sanguine/contrib/tfcore => ../../contrib/tfcore - golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - k8s.io/kubectl => k8s.io/kubectl v0.24.2 -) - -require ( - github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 - github.com/hashicorp/terraform-provider-helm v1.3.3-0.20230117165241-19fa52fdcd9a - github.com/synapsecns/sanguine/contrib/tfcore v0.0.0-00010101000000-000000000000 -) - -require ( - bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.111.0 // indirect - cloud.google.com/go/bigtable v1.10.1 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - dario.cat/mergo v1.0.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/BurntSushi/toml v1.3.2 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 // indirect - github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.1.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.2 // indirect - github.com/Masterminds/squirrel v1.5.3 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect - github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-cidr v1.1.0 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect - github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/containerd v1.6.14 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/cli v20.10.17+incompatible // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.23+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.5.0 // indirect - github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/emirpasic/gods v1.18.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/frankban/quicktest v1.14.6 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect - github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect - github.com/gartnera/gcloud v0.0.15 // indirect - github.com/go-errors/errors v1.4.2 // indirect - github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-gorp/gorp/v3 v3.0.2 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-sql-driver/mysql v1.7.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gogo/protobuf v1.3.3 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.1.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/gomodule/redigo v2.0.0+incompatible // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.5.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/gosuri/uitable v0.0.4 // indirect - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.8 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/hcl/v2 v2.15.0 // indirect - github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.17.3 // indirect - github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.14.2 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect - github.com/hashicorp/terraform-provider-google/v4 v4.2.0 // indirect - github.com/hashicorp/terraform-registry-address v0.1.0 // indirect - github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/huandu/xstrings v1.3.2 // indirect - github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jmoiron/sqlx v1.3.5 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f // indirect - github.com/klauspost/compress v1.17.6 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect - github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/lib/pq v1.10.6 // indirect - github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect - github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/hashstructure v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/locker v1.0.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.2 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/ginkgo/v2 v2.15.0 // indirect - github.com/onsi/gomega v1.30.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect - github.com/prometheus/common v0.47.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/rivo/uniseg v0.2.0 // indirect - github.com/rubenv/sql-migrate v1.1.1 // indirect - github.com/russross/blackfriday v1.6.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - github.com/xlab/treeprint v1.2.0 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.23.1 // indirect - go.opentelemetry.io/otel/metric v1.23.1 // indirect - go.opentelemetry.io/otel/trace v1.23.1 // indirect - go.starlark.net v0.0.0-20221205180719-3fd0dac74452 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.18.0 // indirect - google.golang.org/api v0.149.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - helm.sh/helm/v3 v3.9.4 // indirect - k8s.io/api v0.25.5 // indirect - k8s.io/apiextensions-apiserver v0.25.5 // indirect - k8s.io/apimachinery v0.25.5 // indirect - k8s.io/apiserver v0.25.5 // indirect - k8s.io/cli-runtime v0.25.5 // indirect - k8s.io/client-go v0.25.5 // indirect - k8s.io/component-base v0.25.5 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect - k8s.io/kubectl v0.25.5 // indirect - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect - oras.land/oras-go v1.2.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect -) diff --git a/contrib/terraform-provider-helmproxy/go.sum b/contrib/terraform-provider-helmproxy/go.sum deleted file mode 100644 index 215cd0f8c7..0000000000 --- a/contrib/terraform-provider-helmproxy/go.sum +++ /dev/null @@ -1,2042 +0,0 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= -bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= -bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= -cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g= -cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 h1:tFdFasG+VDpnn+BfVbZrfGcoH6pw6s7ODYlZlhTO3UM= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518/go.mod h1:oEeBHikdF/NrnUy0ornVaY1OT+jGvTqm+LQS0+ZDKzU= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= -github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= -github.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= -github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= -github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93 h1:z6k1vb5L2wqLK4SIk3fpUiXnhNWSZ6Oyy8AaLqr0B+A= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93/go.mod h1:ps2Vk8wMZarkeIPtUqW/FUvwVVdeRDbewMYz+EmuEgk= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/containerd v1.6.14 h1:W+d0AJKVG3ioTZZyQwcw1Y3vvo6ZDYzAcjDcY4tkgGI= -github.com/containerd/containerd v1.6.14/go.mod h1:U2NnBPIhzJDm59xF7xB2MMHnKtggpZ+phKg8o2TKj2c= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269 h1:hbCT8ZPPMqefiAWD2ZKjn7ypokIGViTvBBg/ExLSdCk= -github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.23+incompatible h1:1ZQUUYAdh+oylOT85aA2ZcfRp22jmLhoaEcVEfK8dyA= -github.com/docker/docker v20.10.23+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= -github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= -github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 h1:R+19WKQClnfMXS60cP5BmMe1wjZ4u0evY2p2Ar0ZTXo= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 h1:EipXK6U05IQ2wtuFRn4k3h0+2lXypzItoXGVyf4r9Io= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= -github.com/gartnera/gcloud v0.0.15 h1:/PkEnxPczVRS78MkMDz6wfdRR8YDDjzr0VF6ri6cGVs= -github.com/gartnera/gcloud v0.0.15/go.mod h1:i9wWa1ndPbE8AhduqRMX9nAv9X9HqN9xgqydfEdFLGo= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= -github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= -github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= -github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= -github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.40.1/go.mod h1:OyFTr1muxaWeGTcHQcL3B7C4rETnDphTKYenZDgH2/g= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= -github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gookit/color v1.3.8/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= -github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= -github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= -github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= -github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= -github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= -github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.13.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI= -github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= -github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.14.2 h1:rhsVEOGCnY04msNymSvbUsXfRLKh9znXZmHlf5e8mhE= -github.com/hashicorp/terraform-plugin-go v0.14.2/go.mod h1:Q12UjumPNGiFsZffxOsA40Tlz1WVXt2Evh865Zj0+UA= -github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= -github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.5.0/go.mod h1:z+cMZ0iswzZOahBJ3XmNWgWkVnAd2bl8g+FhyyuPDH4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 h1:zHcMbxY0+rFO9gY99elV/XC/UnQVg7FhRCbj1i5b7vM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1/go.mod h1:+tNlb0wkfdsDJ7JEiERLz4HzM19HyiuIoGzTsM7rPpw= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0 h1:w0r/YEy7ZM5mTMAarRUpS7eyYrXTN5mazwHtLnEGAk8= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0/go.mod h1:eUbSXbhfBMNiOuofFo688iPhk42O782vze8drAN2sPA= -github.com/hashicorp/terraform-provider-helm v1.3.3-0.20230117165241-19fa52fdcd9a h1:044aeLPQougT2CWVPGK7PmHnrmCfEjl4S0Gtt65G31A= -github.com/hashicorp/terraform-provider-helm v1.3.3-0.20230117165241-19fa52fdcd9a/go.mod h1:U3NK7vWlemvAEagGY2pZsyfG0wThfm8D3TY4xesp9eg= -github.com/hashicorp/terraform-registry-address v0.1.0 h1:W6JkV9wbum+m516rCl5/NjKxCyTVaaUBbzYcMzBDO3U= -github.com/hashicorp/terraform-registry-address v0.1.0/go.mod h1:EnyO2jYO6j29DTHbJcm00E5nQTFeTtyZH3H5ycydQ5A= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= -github.com/jhump/protoreflect v1.14.1/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= -github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= -github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= -github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f h1:dKccXx7xA56UNqOcFIbuqFjAWPVtP688j5QMgmo6OHU= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f/go.mod h1:4rEELDSfUAlBSyUjPG0JnaNGjf13JySHFeRdD/3dLP0= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= -github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= -github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= -github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.6/go.mod h1:Lj5gIVxjBlH8REa3icEOkdfchwYc291nShzZ4QYWyMo= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= -github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= -github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rubenv/sql-migrate v1.1.1 h1:haR5Hn8hbW9/SpAICrXoZqXnywS7Q5WijwkQENPeNWY= -github.com/rubenv/sql-migrate v1.1.1/go.mod h1:/7TZymwxN8VWumcIxw1jjHEcR1djpdkMHQPT4FWdnbQ= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanposhiho/wastedassign v1.0.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.4/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.4.6/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= -github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= -github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= -github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.5.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= -go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= -go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= -go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.starlark.net v0.0.0-20221205180719-3fd0dac74452 h1:JZtNuL6LPB+scU5yaQ6hqRlJFRiddZm2FwRt2AQqtHA= -go.starlark.net v0.0.0-20221205180719-3fd0dac74452/go.mod h1:kIVgS18CjmEC3PqMd5kaJSGEifyV/CeB9x506ZJ1Vbk= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -helm.sh/helm/v3 v3.9.4 h1:TCI1QhJUeLVOdccfdw+vnSEO3Td6gNqibptB04QtExY= -helm.sh/helm/v3 v3.9.4/go.mod h1:3eaWAIqzvlRSD06gR9MMwmp2KBKwlu9av1/1BZpjeWY= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.25.5 h1:mqyHf7aoaYMpdvO87mqpol+Qnsmo+y09S0PMIXwiZKo= -k8s.io/api v0.25.5/go.mod h1:RzplZX0Z8rV/WhSTfEvnyd91bBhBQTRWo85qBQwRmb8= -k8s.io/apiextensions-apiserver v0.25.5 h1:iHkMyFGzRgXO8AQlCYPVTVsKLqXvruswirIW8hRji+g= -k8s.io/apiextensions-apiserver v0.25.5/go.mod h1:TWAHgFssGm050Oe6MhN+Jaeav+ISEl9M/qWsPzq2s3k= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.25.5 h1:SQomYHvv+aO43qdu3QKRf9YuI0oI8w3RrOQ1qPbAUGY= -k8s.io/apimachinery v0.25.5/go.mod h1:1S2i1QHkmxc8+EZCIxe/fX5hpldVXk4gvnJInMEb8D4= -k8s.io/apiserver v0.25.5 h1:oC6pd5Z/q8WKksJApbnnK0mODqIz/jKWw3Jk4QqIVXM= -k8s.io/apiserver v0.25.5/go.mod h1:iCRtEN+C0EsNBcbhmDJp41M7cspJM54VWoKyl62gGOU= -k8s.io/cli-runtime v0.24.2/go.mod h1:1LIhKL2RblkhfG4v5lZEt7FtgFG5mVb8wqv5lE9m5qY= -k8s.io/cli-runtime v0.25.5 h1:5Q37ITYtPtSw2JQcN6EBsdOQBnGvvo/D1g93Da4ceYI= -k8s.io/cli-runtime v0.25.5/go.mod h1:o7lT2rFyfbLrQOzTFsV828OyxKsTE/FmVc3ag1nx0IU= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/client-go v0.25.5 h1:7QWVK0Ph4bLn0UwotPTc2FTgm8shreQXyvXnnHDd8rE= -k8s.io/client-go v0.25.5/go.mod h1:bOeoaUUdpyz3WDFGo+Xm3nOQFh2KuYXRDwrvbAPtFQA= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/component-base v0.25.5 h1:tVni0kgpceq71MDMBSixp8Y621YGvTS/1zq3RABgX9A= -k8s.io/component-base v0.25.5/go.mod h1:9J+e9uIUwUOG2x5q5+aaOR0b8QI5OIqwqPAbeODkYpc= -k8s.io/component-helpers v0.24.2/go.mod h1:TRQPBQKfmqkmV6c0HAmUs8cXVNYYYLsXy4zu8eODi9g= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kubectl v0.24.2 h1:+RfQVhth8akUmIc2Ge8krMl/pt66V7210ka3RE/p0J4= -k8s.io/kubectl v0.24.2/go.mod h1:+HIFJc0bA6Tzu5O/YcuUt45APAxnNL8LeMuXwoiGsPg= -k8s.io/metrics v0.24.2/go.mod h1:5NWURxZ6Lz5gj8TFU83+vdWIVASx7W8lwPpHYCqopMo= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -oras.land/oras-go v1.2.0 h1:yoKosVIbsPoFMqAIFHTnrmOuafHal+J/r+I5bdbVWu4= -oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= -sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/contrib/terraform-provider-helmproxy/main.go b/contrib/terraform-provider-helmproxy/main.go deleted file mode 100644 index 06a65f0090..0000000000 --- a/contrib/terraform-provider-helmproxy/main.go +++ /dev/null @@ -1,17 +0,0 @@ -// Package main starts the terraform provider -package main - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" - "github.com/synapsecns/sanguine/contrib/terraform-provider-helmproxy/provider" -) - -// Generate the Terraform provider documentation using `tfplugindocs`: -// this is temporarily disabled until tfexec compatibility issue is fixed (this was removed in 0.16.0) -// we can do this manually for now -// go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: provider.Provider}) -} diff --git a/contrib/terraform-provider-helmproxy/provider/provider.go b/contrib/terraform-provider-helmproxy/provider/provider.go deleted file mode 100644 index 7d4bef3aba..0000000000 --- a/contrib/terraform-provider-helmproxy/provider/provider.go +++ /dev/null @@ -1,89 +0,0 @@ -// Package provider gets the provider for the iap tunnel. -package provider - -import ( - "context" - provider_diag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-helm/helm" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/google" - "github.com/synapsecns/sanguine/contrib/tfcore/utils" - "log" - "os" -) - -type configuredProvider struct { - // googleIface is the google interface - googleIface *google.Config - // helmIface is the helm interface - helmIface *helm.Meta -} - -func (c configuredProvider) GoogleProvider() interface{} { - return c.googleIface -} - -func (c configuredProvider) UnderlyingProvider() interface{} { - return c.helmIface -} - -var _ utils.WrappedProvider = &configuredProvider{} - -// Provider gets the provider for the iap tunnel. -func Provider() *schema.Provider { - combinedSchema := utils.CombineSchemas(google.Provider(), helm.Provider(), "helm", "helmproxy") - underlyingGoogleProvider := google.Provider() - underlyingHelmProvider := helm.Provider() - return &schema.Provider{ - Schema: combinedSchema.Schema, - ProviderMetaSchema: combinedSchema.MetaSchema, - ResourcesMap: combinedSchema.ResourceMap, - DataSourcesMap: combinedSchema.DataSourceMap, - ConfigureContextFunc: func(ctx context.Context, data *schema.ResourceData) (_ interface{}, dg provider_diag.Diagnostics) { - cp := &configuredProvider{} - var gdg, hdg provider_diag.Diagnostics - var giface, hiface interface{} - var ok bool - - giface, gdg = underlyingGoogleProvider.ConfigureContextFunc(ctx, data) - if gdg.HasError() { - return nil, gdg - } - dg = append(dg, gdg...) - cp.googleIface, ok = giface.(*google.Config) - if !ok { - return nil, append(gdg, provider_diag.Diagnostic{ - Severity: provider_diag.Error, - Summary: "failed to cast google interface", - }) - } - - hiface, hdg = underlyingHelmProvider.ConfigureContextFunc(ctx, data) - if hdg.HasError() { - return nil, hdg - } - cp.helmIface, ok = hiface.(*helm.Meta) - if !ok { - return nil, append(gdg, provider_diag.Diagnostic{ - Severity: provider_diag.Error, - Summary: "failed to cast helm interface", - }) - } - - proxyURL, err := utils.StartTunnel(ctx, data, cp.googleIface) - if err != nil { - return nil, append(gdg, provider_diag.FromErr(err)[0]) - } - - // set the proxy url - log.Printf("[INFO] setting proxy url to %s", proxyURL) - err = os.Setenv("KUBE_PROXY_URL", proxyURL) - if err != nil { - return nil, append(gdg, provider_diag.FromErr(err)[0]) - } - - dg = append(dg, hdg...) - return cp, dg - }, - } -} diff --git a/contrib/terraform-provider-helmproxy/provider/provider_test.go b/contrib/terraform-provider-helmproxy/provider/provider_test.go deleted file mode 100644 index 73f59fc98f..0000000000 --- a/contrib/terraform-provider-helmproxy/provider/provider_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package provider_test - -import ( - "github.com/synapsecns/sanguine/contrib/terraform-provider-helmproxy/provider" - "testing" -) - -// Make sure the provider loads. -func TestProviderLoad(t *testing.T) { - prov := provider.Provider() - if prov == nil { - t.Fatal("Provider should not be nil") - } -} diff --git a/contrib/terraform-provider-helmproxy/readme.md b/contrib/terraform-provider-helmproxy/readme.md deleted file mode 100644 index 5a5e69daaa..0000000000 --- a/contrib/terraform-provider-helmproxy/readme.md +++ /dev/null @@ -1,22 +0,0 @@ -# Terraform IAP Proxy Provider - -[![Go Reference](https://pkg.go.dev/badge/github.com/synapsecns/sanguine/contrib/terraform-provider-helmproxy.svg)](https://pkg.go.dev/github.com/synapsecns/sanguine/contrib/terraform-provider-helmproxy) -[![Go Report Card](https://goreportcard.com/badge/github.com/synapsecns/sanguine/contrib/terraform-provider-helmproxy)](https://goreportcard.com/report/github.com/synapsecns/sanguine/contrib/terraform-provider-helmproxy) - -This provider is a wrapper for the Helm provider that allows for the use of an IAP (Identity-Aware Proxy) when interacting with GCP resources. This is necessary because Terraform resources are short-lived, so spinning up the IAP proxy separately and having it provide access to the resources is not an option. - -## Why use an IAP proxy? -IAP (Identity-Aware Proxy) is a feature of GCP that allows you to authenticate and authorize access to resources in a more fine-grained manner than just using a service account. By using IAP, you can ensure that only authorized users and applications can access your resources. - -## How does the provider work? -The provider wraps the Helm provider and combines the schemas of the two providers. It also adds some new fields to the schema, such as the project, zone, service_account, instance, and remote_port fields, which are necessary for configuring the IAP proxy. - -When the provider is used to create or update resources, it first starts the IAP proxy on the specified instance, using the specified service account and project. It then passes the requests for resources through the proxy, allowing for authenticated and authorized access. - -When the resources are destroyed, the provider stops the IAP proxy on the specified instance. - -## How to use the provider - -To use the provider, you will need to specify the project, zone, service_account, instance, and remote_port fields in your Terraform configuration. You will also need to provide credentials for the service account that will be used to start the IAP proxy. Please see the example folder for an example. - - diff --git a/contrib/terraform-provider-helmproxy/scripts/add-tfmac.sh b/contrib/terraform-provider-helmproxy/scripts/add-tfmac.sh deleted file mode 100755 index 4067060a39..0000000000 --- a/contrib/terraform-provider-helmproxy/scripts/add-tfmac.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/zsh - -# if not already present in zshrc -if [ "$(grep -c -w "alias tfmac='TFENV_ARCH=arm64 TFENV_TERRAFORM_VERSION=latest:^1.3 terraform'" ~/.zshrc)" -le 0 ]; then - echo "adding tfmac command to zshrc. You might have to source ~/.zshrc or open a new tab" - echo "alias tfmac='TFENV_ARCH=arm64 TFENV_TERRAFORM_VERSION=latest:^1.3 terraform'" >> ~/.zshrc -fi diff --git a/contrib/terraform-provider-helmproxy/scripts/build-tf.sh b/contrib/terraform-provider-helmproxy/scripts/build-tf.sh deleted file mode 100755 index 9b26dfc09a..0000000000 --- a/contrib/terraform-provider-helmproxy/scripts/build-tf.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2086 - -# This gets the arch prefix we use when building a terraform plugin -TF_PREFIX=$( go version | awk '{print $NF}' | sed 's/\//_/') - -# define the plugin directory -PLUGIN_DIR=$(realpath -m ~/.terraform.d/plugins/example-helm.com/provider/helmproxy/1.0.0/$TF_PREFIX) - -# fixes async problems on arm64 https://github.com/hashicorp/terraform-provider-aws/issues/20274#issuecomment-996795241 -# we don't need this for production builds, just darwinarm64. -GODEBUG=asyncpreemptoff=1 go build . - -# make the plugin directory if it doesn't exist -rm -rf $PLUGIN_DIR -mkdir -p $PLUGIN_DIR -cp terraform-provider-helmproxy $PLUGIN_DIR diff --git a/contrib/terraform-provider-helmproxy/terraform-registry-manifest.json b/contrib/terraform-provider-helmproxy/terraform-registry-manifest.json deleted file mode 100644 index 295001a07f..0000000000 --- a/contrib/terraform-provider-helmproxy/terraform-registry-manifest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "version": 1, - "metadata": { - "protocol_versions": ["6.0"] - } -} diff --git a/contrib/terraform-provider-iap/.gitignore b/contrib/terraform-provider-iap/.gitignore deleted file mode 100644 index 3f879e420c..0000000000 --- a/contrib/terraform-provider-iap/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -terraform-provider-iap -examples/.terraform.lock.hcl diff --git a/contrib/terraform-provider-iap/.goreleaser.yml b/contrib/terraform-provider-iap/.goreleaser.yml deleted file mode 100644 index fd8cc97b7d..0000000000 --- a/contrib/terraform-provider-iap/.goreleaser.yml +++ /dev/null @@ -1,74 +0,0 @@ -project_name: terraform-provider-iap - -monorepo: - tag_prefix: contrib/terraform-provider-iap/ - dir: contrib/terraform-provider-iap/ - -builds: - - env: - # goreleaser does not work with CGO, it could also complicate - # usage by users in CI/CD systems like Terraform Cloud where - # they are unable to install libraries. - - CGO_ENABLED=0 - mod_timestamp: '{{ .CommitTimestamp }}' - flags: - - -trimpath - ldflags: - - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}' - goos: - - freebsd - - windows - - linux - - darwin - goarch: - - amd64 - - '386' - - arm - - arm64 - ignore: - - goos: darwin - goarch: '386' - binary: '{{ .ProjectName }}_v{{ .Version }}' -archives: - - format: zip - name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' -checksum: - extra_files: - - glob: 'terraform-registry-manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' - algorithm: sha256 -signs: - - artifacts: checksum - args: - # if you are using this in a GitHub action or some other automated pipeline, you - # need to pass the batch flag to indicate its not interactive. - - '--batch' - - '--local-user' - - '{{ .Env.GPG_FINGERPRINT }}' # set this environment variable for your signing key - - '--output' - - '${signature}' - - '--detach-sign' - - '${artifact}' -release: - extra_files: - - glob: 'terraform-registry-manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' - # If you want to manually examine the release before its live, uncomment this line: - # draft: true - - -# track sizes -report_sizes: true - -# modified timestamps -metadata: - # Set the modified timestamp on the metadata files. - # - # Templates: allowed. - mod_timestamp: '{{ .CommitTimestamp }}' - -# produce software bill of lading -sboms: - - artifacts: archive - diff --git a/contrib/terraform-provider-iap/Makefile b/contrib/terraform-provider-iap/Makefile deleted file mode 100644 index 0340e715e3..0000000000 --- a/contrib/terraform-provider-iap/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -include ../../make/go.Makefile - -install-plugin-local: # will install the terraform provider as a local plugin for testing. - ./scripts/build-tf.sh - -run-example: install-plugin-local cleanup-examples # runs an example - echo "running terraform init, if this fails, you might have to specify amd64 as the arch before using terraform, please see: https://github.com/tfutils/tfenv/issues/337" - echo "on osx arm64, you can run run-example-m1 as a workaround." - cd examples && terraform init - -cleanup-examples: - rm -rf examples/.terraform rm -rf examples/.terraform.lock.hcl - -run-example-m1: install-plugin-local cleanup-examples # runs an example on osx arm64 - ./scripts/add-tfmac.sh - source ~/.zshrc - echo "please run: cd examples && tfmac init" - - -tfenv-install: - @#Brew - MacOS - @if [ "$(shell which tflint)" = "" ] && [ "$(shell which brew)" != "" ]; then brew install rflint; fi; - # default - @if [ "$(shell which tflint)" = "" ]; then curl -s https://raw.githubusercontent.com/terraform-linters/tflint/master/install_linux.sh | bash; fi; - - -lint-tf: tfenv-install ## Run golangci-lint and go fmt ./... - cd examples && tflint --init - cd examples && tflint diff --git a/contrib/terraform-provider-iap/README.md b/contrib/terraform-provider-iap/README.md deleted file mode 100644 index d7b8b898a8..0000000000 --- a/contrib/terraform-provider-iap/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# IAP Tunnel Provider - -[![Go Reference](https://pkg.go.dev/badge/github.com/synapsecns/sanguine/contrib/terraform-provider-iap.svg)](https://pkg.go.dev/github.com/synapsecns/sanguine/contrib/terraform-provider-iap) -[![Go Report Card](https://goreportcard.com/badge/github.com/synapsecns/sanguine/contrib/terraform-provider-iap)](https://goreportcard.com/report/github.com/synapsecns/sanguine/contrib/terraform-provider-iap) - -The goal of the iap provider is to allow the use of an identity-aware proxy to connect to a GCP through a bastion host using terraform. This looks like this: - -![Architecture Diagram](./assets/img.png) - -This provider is written in pure go and is based on the google terraform provider. - -## Future Work - - - *SSH Tunnels*: Right now: this works for creating a proxy through an ip tunnel. Eventually, we want to allow the use of an ssh tunnel using os-loging to connect through the bastion host with more robust logging. The challenge here is `gcloud compute beta ssh --tunel-through-iap` (which this provider has been reverse engineered from) uses a stdio proxy, so we need to implement our ssh provider using that mechanism. This will be implemented in a future version - - *Public Provider*: This is going to require mirroring to another repo to work with [the requirements](https://developer.hashicorp.com/terraform/cloud-docs/registry/publish-providers) around publishing - - *Better Documentation*: This is a first pass at documentation, but it needs to be improved, including references to examples folder - -## Disadvantages: - -Terraform resources are limited to the lifecycle of the proxy. This means that if you want to use this to connect to a GCP instance, you need to create a proxy resource, then create a compute instance resource, then create a connection resource. This is because the proxy resource is ephemeral and will be destroyed when the connection resource is destroyed. This is a limitation of the IAP tunneling mechanism, not the provider. The helm provider is exported witha configurable proxy for this reason - diff --git a/contrib/terraform-provider-iap/assets/img.png b/contrib/terraform-provider-iap/assets/img.png deleted file mode 100644 index 5f6b5d1bb1b5e93b5fa38725b38b74a3c521f15f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22482 zcmd?Rc{tSn|Nm>LBwJBruZU#JzEcrpeV2qRQ+8RiZ-W*|wvg~Ff)`j`#Lnt zOj*L%nHh{Rzn6NaKHqbl>-wGFIoJ36&UOB?yq4$sSZ>ez;}!Lfp$^kAu47bGR7^Ln zYu%!vqQO#8QAZy=0{rFKVPPgJD$}$ZT32odSTAMyzp@<3dP8n6tZu0Ow%EP6To%g_ z6X|qGKD)Usy!lyWJ0aXHap`_{c>56)4{f#3#oWwlr&nxe=nub&c>46R`O%q7W6_US zOHbY9#nqA<%1ulBK_{SEx^%sD^kP^`xSTAA3S`?wkV%v)?7YO1i^ZTHSE?*(Pc#RU4LL&!4A) z!J6T;Q~@n~Cy$?yBJDh*qN7@Uu6E(fIUB-%%Zp@@=#8$KUg{Aq9m*zJ?VWBLr0zIW zsjA(pQ5v1UYVT0*36Ia{OLdze2IljOu1{4(8cj8 zSl_AG7K4LwOuchZaN5Z3ADpVw2IlzrYOayHDw7jzS;duZp8Kd# zuW|AlhcKL1srSy5zAeNJ{fbX5C>?b1PKEVQS$p4b{`sQrA6s)%Mpse6n~Ne=4ZcYOwryYD4cNZ* zz=Y1$&C58Zkz?ssM_fj~bdP&qMak|q_^wD;)%zvWC!v$9NX%Nxk!H{TjOXZ(jz&^B z|LzBgMx0|4WZ7C=)IZXwOf`7~6(MS%o81?%Oz_rbUc<`tGkZe~vx2 zT0DeS(D>ZqmQlNMiAZsseAcn{JfiIPrnzccZUxTo*zM~qU2&D8-ut?#w+1fK>v~&Q zBfkeNDRp@}-95gwmmaola^aIG$tPYl6kB41O#O#j#?RFbq*;ObU|u-fn$vqpa?dU* zs;7lq?;bM#B3XJ3*13*;B~rCe{V}2af_l(Iw;^5iunJ*OuIs+v@}vg=b)uvimLy~( zRat+(YqVpknCmSBu~)ySk-5VWf0?vKnC{{{K(I)hvOIt6D+eC?FN>;RriP3Wu8>% zx!!?T30u@GUFy4mNC~1GX5dI$+ELxy@&V<9hQc6wG}Wv5t+U%W>DPUuB?qNyu+H?xyZY%Xng+DW4;y+vJ&^tCJ_#RML)d z5BgkB38zl5ea6IpB3|7pdU9=@KIf6!*AG&Z5jG8c>Y1x@v-p&!Sj8-nQA%%V^evLh zpuLc7TU@lOk;Cd%$)0qcaCVI|>P`@0@w_Brw`y}?q*UT=bkCkTcqe^EF^Y(ko2j?* z;8*qPE>l4MD11~7>dNq7Pm7nKU*${*V-F_s$Ao3oTWOl-ba8t$-d`?4$<;q;zp=Xq zlk<5O7#D@Qt=25Rh8rRs24VI-<3SkgSj)$E)jr z;dqddY?Iu&?<@#h{jP(jXnA0wt`uKzz4{i#k%LZbTM+r0g?xX(QU;`^K^C4E35l zM%l-{v%!SSKnFkYawALko}EjRZ(tl6l=612eP(t|E#n@r#W&WW zm!z5`nIvDRhQ8_5r|aUD{j^Aa#Pxy8Dya2j7IfrgO+2ScXrq$DRN>CJDU&kkG;BSx zEx&>eX}c-?D2n83;9W(9#E$+mT$(-?hOc@~C?L~BGILA7f{1ZH_a0aB&KMyyHkj>A zFj1JvlWbOtTW3>qpI<+f(B9!gubUS|U16#_kdv_Ie|J*ys&aHW$)PvRh2Y4F-NAo! z;UMp*!*;#dHetp>nq>$ZNE->rV+&!Xr53BB^osX1NyG zQ2bU)(wQFnm3m)=+;?Xodfc*GK7qQPF}>EfP^LR-owxWuTi3j)uJ5F=bPUDUR4k3M z(fVfeGNNSa9~|#d4t7~F#(SfL14FBP(x9;R^bzE^1NkdnP#adF`=00gKlyU0)JTw+ z>@zkZPC9-EQshis5oXX&S=cyAnD>^dUZSUV?^8!n^NDfm67eUnW=H>c$5uTH5WVCz z^BpA-rR1GhU&E1Br$rUavBO*2f#1%b=p2@7toO6f)I;8L(VtKEjMdKa9pSWOs=#wA zWK2CN^_ja7pgaXQfwrwO*Y|f6%?dBw!=5oW=4c6;bjwmgx;5Wo0Mn*p)m;?arCpkR z=XJsFpV3-s)S^e(sC}6}SUcPE*^9!u;NH$tIdA4|NDX-hRUjAIsT(Q7% zC8(@TUx#Ve_J#}d<(u9bpnRovSO4*S?#HnI%MX%ar{7i2HzSh*G?>g_!`bAm`rT^+ zw`%9TPLd%N{wxk_zMQ%ZeoF>pxQ8WhTc_jL9pn?N-52bhZF{WD!--)kBr2%0X{X)Q z1s=JhwE0EgN;qzY9S>m(im4D*07aj|riFkHDY8F4F!ToaU3BF)b5GwZtVN-QHXzu% zZaBsZ2Ia{m2QU>&m6{^^?+xSBQAn8KZAe+JFx^5W%5U+-qN#6jq;<1aO2DC%3m-_zWfiz}sM>U5Vi@Ig#PDv78C$wT#m*QTr_f21rZ9eSXJ>#pI;adof? zbMA*+Lnv@nC};49InrC!-cJe)ZD!>KXGaN(m8cMxT~?ujB^VGHeaKXOz1>cls&REb>al~;K5#2$`-L^r(b?) zyC;1TQ|U1&QAGF=+SHqYXlkjkZq`hBFqf>iX=~;0&L90eHR@nYd+^_T;papbtiPt{ zls_Pq4exIB;mCN6PG|(fIbxp){%QB}WbiQPSf#CwjHtV0#$~PU+rN$F|90uo|I>Nc zTw9_{!L7S~%nVxOz^@upvvYqENcRaumhD>j05KbbcKQs9=^Iv_sgl=6DT$fatg>@#=IJ)TGI zZd{JCfQ&=WYaj#=0d{oC{m(ib3mFm?IrtRc{?h8^0tQAYVM-SZJw`w zs~rRNeJZ!wV#6%ZA=TGsdqDSUB!TEcl^-1UB$6?K9Qm2(5_2fv+hril4mSpJ^MMGQ zg8S>d+GlMV=ti)PRX?1QVn!SAW!QPP=EzEVmsG`rT?r6Wd2j8TK)e1MqY#fOpMWW* zG;@yiX7yNHg;m3iTK7r&$!7)*DAJH)>Prt)O_qMAw<+}jPc@LPNp zYf+nq0s_D55NlZXQfrP_R6C`CO=wgt-eip=d*|<&o4cygq_MNbmUf$?pYZU3O}LAo zKMp-9#zarxFJoAhcl(-Ze-TJik_@v{^nkFDt)Ht`xdG-rs33s(xgYUSh7!msZ4 zDMZz?|K50wi~sb-k=`ugBmEpD(GLbGgPV)vT`dT3NTGvFlgk6!TR}IaYsw}<4>>Uc z(1J>_6#x)cPIOy&t@E^&DWy(kET1N zF&#ReAE}dwpsy)nSS?5S>#nGSr#qsOa+mVkW8|v85!Pa*EU4ALRi}m&_{WA+T1s+Xzt#)rak`t{*%QC=+H#yY?|ZZXT}EqmDd^W^*2JvI1sAg`>WTZ82j*IKUSuy zN|(s9j>LX+a-O2G+~*Y2&2;=96!cOgtftYyD{{599vBHA^8IM(Kk4f=-EjhUJt2-) z<;zE;wWyKm+EZB9rNDK&z`5MWw_p-s9T`SOt^-dPiwt-IU?i#{fIa0gBpu%RGLn=2 zl26g;Wh0O+xlWpEY-HXDuXRE8S}I*jiEh-ebqL8pk8--J(R%}dp+g2QRyn_%i2As4 zIV3|uGLVc%+V^A6KCE>`-G5x=2(eysD}LQ$mxrUusX5CYHD4Iu4dGI{Gg@~PRP7KO zg`=foZmDfu2tB(stAG%BvJjfBdmL+|hJUrWgi;92ymj;D*;;f^sVaGMNQBmsU-Gi! zU>bFw#BX*cg<7D0MX2!+lDJX{8RvC%nX8IurcVfXcSSYgA=mntib z#_VEd$1%j!&n#W3QnuZo(??f>4sg$L?*Tg*478$@O2g;wbUoidxO+u>Y%!BK$?5s$ zNS2HybfsNW@O%knb~@;^#2Dwek0>+JrM27(_NKMOEUmYH&L7|j(Cp*d?(Ez3l?`cuqHxv7?9}+1Vz-iHw~DT5+S}HpgB)@FG^EQwG7MZ=ugu9(>Yq2 zVw!&BP_}ctzXC9{zl=&FNS?e{`_=J&ERXmI=jsks4(yw5cx{>=`cQzcRR>P`6^KzI zN&ZuqeAOEAu!q4uX~v!e@N~DYf(PdQIV5`7B1HyUzej?fori>anIIoP8;qP)n~H?H z!x8CrM^q>E10tc_;Y9~l!=@r5n%cd+*4@Q0T(nxD2tlKp!eUcJo=fKR#Op2Lm`1}* zZ?|^uwMV`#jPZme>e^z`;UBO3_OMdOo{v;U?3f3@fJ1oJx5kF6u}FVNVkBBswAJy! z+o>4u6Air;h+|(A>a$+CNaUoZiG7NGEh_;R6*UIcUYdl5>~rv+1`KG59yOmOT?lzr z+#Jfh)g~~He2f{(*F7R8mO?A+Vz>Lk%RKD;m$Jj4#{65amr7m{R&t`B`seu*MUvJ_ zp3ycI7HR_~!500r$hgQ5@Xsvcuwefuq)lCsu$`ftw;yDAGKUHclFX|eB5G!&51gU* zq~)w_%$)~hz{tski5j<(MugrXar zz-I5xm}?{xP+psJ12M$I)bNK>+T+!ZMiWxDZO5_M2j>k?cNa!at0Hh8_Q>lz*>j&j z*&*n?lT3NJYPM~R)s1#@{kdL?W0gmMN2uRxqT-acyFLMU9M=E(2>a#NnCS{SGffVF8bQe`szQsE!TvvdWyL2ZT?ALpwa0el-$o$<@xRE zc!R&kt}T;G?4nhJU%{kKc+mk&pq4_ql~Y%^Dsv^dERGr4Cmu!At~eK$xo=fHk*{{_ zlh@PnDURBAi-Ge6DbrhPQ_jQ_*y2Byv)k9gJ6zNl**;yPe&ex98vTjTFv?KX()C$k zYoS|KR&IGvz-r^n6ox_hU&j_)x%uzbyLXC?8hWz)fFQQn&4OvO?&n3THiI zn>CU5r8@||e{SeMNM(Dg?^1oHd7%hXa2ziVxy5M+zs>fKh*ZbX?i z>@FHiwA$vUOog4MOd?4B>-#^(giS!bf4m~8lNMz*#wfFx-Hfdo4=~PRu*isi;~D)6 z__fu(usv_+Y`RPL57HhHAt`K>{UQ6t-g_{~Z$pitZUs*2CQ@;=0imJyzwLYz?sTDv zZF@#C%o%-OjI)z49uEs9x^I6)!iAEAjpX?;zDTQe${vOyvvTW>xw!~}G`TX}uCe!U zcPk{{u`hcaSmSLP5|WZeV+`}}fn>lkg(~tju-z}5S-j5Np&c$tdxY_2BwtX+1biw^ z#csMIVGXPk$CGgX^RreB!?xCFj+rRo?3ak$F2t+-P_4>u>FXB!X1;c3j{-)FvJtjt z6u3Q;Qczgf=1N)iM~DJ*oGcl(-R1R}NueWn1?E_Ti@{I&W5glwos!+{913)IPDew! zdY#&~JuVpsL0IqgQMNT^-C}9g$(zR8z5Z~4FuZBldCCriauMi)T+RxDLZnCl7f~Fy zzp;cl#Rc!Kwc9UGKxYZB#Vw}yNGmB60%k8`cM?O+I?2J2vBabnIr{iNrZG1mWGYtS zOFMrU&n-|Q6%B@b0a$A9oPm6ujPsIT3Jo%*y>m#JfI0Zwf>2jra<-oz zELd-q3>{D%{gN!&BPm#9m?d4wduSWDVZ@rD%qoer=U60sye!TiuthbW^5L?<*=!I+Q0o&l@ajwoVf@7EvW{*&-xrYiP zd1M``>(#~$COYPT@3=!}*xB;9DyWq+%3KP3$y+O7lwhdO{H;#{TH6o`9+8fpp{{3! zm6hpd2hMVOQ>bFSvqVf~n>4nk*}N@M8g?TotNh)JAKDw{i+Q*OelvP(6M{R(J}qE3XN%S1+jLI7}<+ zNx7mDhs{Ns7BFwz1MHS%;oey-SjU55l(XsMV-A^c`EC0=+1mm&w5OFy!uW@ zH)GLMNoj@j8qXV3gMreLxvE&g5~{t{DD~16cNqSTcY_G^9`*@~Hk{9Xq{Jlm@@OBg zCG@;WI4%Yzu|eOTB!e%JsvlLE{69=u@i`?Fb;aB*W-Cg$>E(ffvrAt;CP6}GL<#rL z`iSVEYTSC(f=o_Xse3O^D4AEWYx!)d3$Hn!lSb(;z&HRY_i#EGy(eVoa8bfBMJi z9LnyG9+T^jMbn+S7w&q;x-i+PDc$;Pz2B;Cf=3w1V%vx^uaPF2J9i_qIV!Jy_?pqJ z5zrZ*@oqSn)a?g;IQ#?ugnzdwB=a;DaQ`x(qU@$(#DzJNo3DKOa^>y=Xa(vHKcr}* zyzdVJv><0#Tf7SAMy|`&yKO$?QckFrnwvT=kwo9$SKimG{h7SGy+N2&R90cR2{49z zmr!~9;7AS##ppf74*lg>0CUQ4MCPD3{*ab`Tb^`^(9;Y%;XJv(M| zwxoI^%IPH?X%oU^urFlTcNRZ;*Y6{HeewDgvFj|uS?aYb=NC*6IIeqv+K{dg54G9^W>ahQ*{EM)+I^(znbK zfkg4=(EnUQtiO1DWMS@2)+m;itHw{1zby#YcME#O6j)y?I`JRP=Q1FRoiSB!zH{$W zy*t}?!rsaBQd;|c<;A|I{mF=P2U2}2ZsgHc_A`nL54h9{D+v|TcirLoZ9sl?F<{eZ z(a-Y?d-~21y%edl=nlkA%Y)L#4MqDz< zj|KnZdA~F5mWa4YrL~1|JyskT#qvySM zGCozf?SX`DW}zjyrDh@K?PtD|mZ_T<$qx^XdbYd=_WW6q8(|X3I{6PDncaK#I_5TC z^rFXyQ#yjYgg4kvZz-X_&-UZH8#i{mp+1+mIG#t?T@`C+oCS;B5iS1tW-{<* zAfEd5<`-n|sMZpJn~%&qm8~hfsqVaWY8UGI_lOLae?O@*Ov1s+HSzsrF+ok_QiVFl zIL3~xLe$-s<(R|=3Yq3O_W6Ba-)i1Er@_G?oPTz+#ll(+{Lkj=6#}G4wXMHg=9Qj)g}7oP+=V_ zfVb*iB2C^O%gTgkdypgPUsGRMzlsqTt>vc< zIT%O!!*$l&<8kyYG()=u!f)2K?82%?YCjtNY^(*OlubRl(=-U#!Ow z?76S>`geG*YH(Cg$CWqrpFdjta3tr&=S0T+MJI1`iUh5t2zFxnrFq&6zs7~_dxQlF2 zLCVXM6BAjL!qm4<9X+G;jp~O%!270;jzw~>muz`Xr`sc9_Lm4AxA1|f^18l4Yew)$ zM`hIfKe@`rBassa(xY{b>2kO3*>4Fr1XcV)-uoO*!jYp!I)`}>xaf8sB{={8IPIvfko?Ql>va9JQ`nRJW97! zdHwns_E+@6*VFyWJ`h!;?^vAw{S+_vTNqghEdKZE)?a>g&It$`rBPjy3I5KfB; zqku^g_fG6UZ`*x)D%)ati}J-g&DENyKHhNu>;AyND)^Mey{zJ$E-N70+lGa#;)ag@ z%rVk5!r(lLo=xZpJ6%0J`}OHg!fa#YuSPPi0wMIZJe2`7EEwf~qYDT>W&r2Tncs<8~r-%+O&e2pADDz*>F=xX`g}fUQ|kPCRAjD3RB~jp}M>J zc|F!6L=#Sgt)$I7H8H`583Zg=w%he)Dvhe8$HCFKAC>0t5a9NZoByVatW8h@UIC@C>~`5P?;WWv%kH4*6QoMKn^LqjWxlsyHi_t1KqvKLmc0qTC}=2Q_`Dq zj(Bs0%S@CbKkWGPH^KuL04T2mlq;QcwW>;*Z4!u;uPrfRzvGHXySws3>61PlAq8abk`$-h+1m9!!Cv8qT=XhYt#Yb!aq zitJRbZ*3|7TD~bjWU6?T)m}1FQX@Paoh6~YT*_=Yu`0Ip!Fg!9Q#gmwZRqqU(&)St z5RHL=>E6=c&bt=FxxuI{j#pGCo-Y|hUNCiF*BMyi`sRYHNCp^7OJpzoQBPNs*@xk!%{YtI zhu$DlJg?-ESq*tKoJ+~`qL9XEAMH@7-Iy3lubUOl{#UgnVHfgKOjFa=Jn%}6G^!>~ zG5yurFZ70V0)F&n6#xJp2<4)v(Qt5Z;P)uuZt0dNHsLR(h#iv+<+2GN!>tM>?`)AP z5PLxz$XnMy)pRU;V(sz#DJh^(r70>Ta;@B=HXX#84JM3l5$vetRHpqolJl5T|dCv+O+h6jaPfPlc&o+ zM6=^bIy!eU;(ZDAq{e8WipVfacl69Up+a~@Z^@|RQIhY&$%0`X{}FC`XMx70#Z5pA zQn7$RAYe*{HsQD+oJr}OR-$tZumbxh3{PQ$35)cH{R{Q}k#`-tXWf1ZR`q%>d1GZ>n-QcTBaK)M>xwJkwxw0AG>zLb*cJWM z)WI{KGB)aiHeJ`pS3oc;Q3NMQ=(D;=N~yAG$EJZ)pv`TtCFTCE`Ft(*@jk2D-kQov zUVlPux>s_^8DH?~#9np6yzn)jBf9_%uT^NLr?I5a6hk_jV`_h5k*{TMkFDNF66!QI zQagpob`JNS>u+vE;}&a!C}d(gD7=5>B?0s_qm)b$vQeMU zc+BFNmDaBzx)AMc+;>^SYx+68GdPCIN+w_hModR>VddoHW)V9!p_9JIG`7e>qZ|p1 z-ES;Y#2bSAG>rfQOW?3nxiGlIFZBWRmF$MnLe6h&slt)%DWZD+RIP1A0U07DMeVX6 z3R7wPX1LV5hBJ=>lV`p~KZeQ_!-Ar8Cd&ZZpG8}e4tdN<+YQ+9%IHi6wXMs)ffz{O@E z(LtfHd0ncZ>q$AO3Kqt!w2gp4cWAQ!75R!_k%&ECfCOZX6Te4G{)7qT5E#484*7bkRO;O+;qYlggL1X$|@J+sNQ0Zc*=h{Yv98Pc%In z!=x&L@&0N$d&tBD(gc<`JY}0-jrDtqxKyc6NEz;EVv)Q~BKCDo#HQHw>|K;0fjx4J z*Gk{K_3F?W@IPYkEW4}%lD5xedR#wR(^*m$-U$L)zPn}A<&dGNw%BNmXK?faZUZ>( z`iB_(#l>sLK%v#>)3CW8xohSLi@H-q#o#~Txaqj;evf?NyUaX_FR668X~D*qs4J?6 z{O&n^hlG(`HiX;t2keD9(Xj>tdW?lZ{t8)geZrrpL(j3YK0j<94Li}7jPIl-yy-W? z`--J*Gw-|Iwd?C2fu7*RY9%xW%yy)PigeZF4+qo6-RKj zz3Frw%f`=t8P4O>6~bCy?@yi{%DRuaOHa1+@w;?G{|tK!5RRW=FO6FEw}Zn@kxON6 z^*t8W4lvGyp51|kFJ9S?_YJNb+KS`FOCB*W!D!KadP=*iFDQ7N;menN*5lD5+WRpJ zx7vvbAZDKX?rWlC&7PDIeC=}s^u*p=|4tYFO4skF)KyH4%7G0=eq;ztU(C(X^EDR~ zyjv~zgLr*@t>=VquxIytKKFUQhaS6AB~?~yQ=2v9ROsAxqPQ>UGrWWF_`~<-Pu~+& z23uee`@v=_?!o1O!A-rJT-@J+4kyP(<|Ce8_WL)Sq2ZZG+VYn2;7b$~x?9@L93tcq zAK3M7C7okJ{h?Eqs}&`o(PS7T1Xr_c8*wAqeIw1Pcd_@qvQ}$IpK6PjNbF?O)-|`n zevMy3ZC4AodGuxP#bqj2o%Pbca`)l&tCq?q)!HI%>?cq4Ayy;4Ux|!{h?50UC-&B- zyGI{Bu{g+Ba;f^hb0E*Q_R1xT|;muHgO+OXc z`Gy+SZM!&LGwh1+^eWvhcmvAeTc2N7TI;I~XqKHHfJMjf$Y0lGhREbtgyo)Aa(y$d z5xQvMRTTiv&(_=vx_s4crl+W^;~;{c5HFV08%sTkD7KBtROBFwQrYnzWDkl>rAaM^ zN&x*M^&|om-ysEPE=*r7%gTJ?MuwbLc4+B;RJAv>?I6-+;hS4p-KWQHpin(O^20CV zLXOIYmA5MfJ!$V|)q=DhbNRHr)%>x_e)8PBaV>0p(Qq8P%_c(&X^-Qzvg0!ilR9n2 z@gt>o@g=Q_Y3e_S0EY&pI~tLEq&DuQsXm>2EB;!kzUIC9jn4k{m#&sU#`)?1(7;|r zLP~?KC4k;S#qJlLu>Mdt5$fc0_OIE1aEAy;J!)&ALi%FJJA@dX{0HH+ZlrpjrNM!x zxW`IK`u%P3A{b#_gTLHg;!p-t_=j)O?_jGqRaEx_(3g+Povq~+9Y}H}Q&`Xyk z^-`x(9nw4tlC>7EG)IgFSWWuK3LypQm+ge2SA$8}ufHs$lHd1VJxKJX>f6N{ncGE-dhK`K0mDf`dqhjXcr z0V^Zxdk%C|*m~z@GzyT4MAmJn-IUS#;Wxb4`a3~AFQBtEz3g-+ zCjJR8t+&&fGV=&iB&sRZxfGP$Rn}qef#ejA%AvVhgH+6Sg%~_xGi<^w>BbC=kl zE0hgAo>b-K0Q06kSfkTFZuu&6ojgtN$z;hiL`@D(|7Wt%FLylqMmYZj>`NIflH@Fl zbFBu@rLZD9)4VV&*f(18)D3n-F->E@%7cx%Ag_X;yueC=Yp_LoOBIfW1g}y80utU0TOq4 zc6Zv5{da!>-u>X<-EViHb*fTKQXC^LXpqpIDeMkW1WhfoKYFR_057_Gu-}VAC1!iH z7Mu1#6|N}*vG~POQ-f)Lb>9YN3V-!C(7HkL{^s@6te=k&GVb0>Ss`SQ1oy`{l>j_W z_vhm}qo~>^WY0H>tFhMs$uO%<%1c2dAp!hg7Sp}`Zj*p+pWmSzqK*r=h6!Nt?9f=i zoZ7f2D}S7!^{GOybMMSi?z^FR`{PaTc>s6YBxf*NMvCx^4QVjANNmZDsbm$)f+C3IR$IRtQBjq_b zEs|jSe*QNO7BbdgK&M`~(k*E#H^W*aC2Z^9xo#h>cH!Q}@K)$TagiM%6Dq`7)R%hg zsmkl$2%M$j`Qr%J)|>S2C=oJANk9tKAQXyt2DnMr`YR!lfIjeKlvbU^zgSEl3By4B%=Z zF6dwcul=QJcTClQnPxwK%3yQlUD(WxrbAHpcyECw{t6nfd8^d`YMryDi-8|nBOy@Ljmu5QM;7vI`u|+0J_&F zT<^{f@%Z~ZM1eJGJZVMzFDdq%_BpBGby(|Pry&2)+@DpE|IyK3^^`sW38`w^Xq%r) z<5wl--$VARw6Z@|%Z4x=9Q{Yj{(q}ceIeWE*O?;8gH_&UXBPT~_Ba)E4$g9XY~flq z>gkdg6q!V{r7k|y?a8nx{N5AmR1{q*R! z_T}JOacXPD13`hpte)U({`NZT{9iiYxwep~mJnGGy!LOY>tw2ALTdbM7x6WfB+qXN z!W-_5^Q;>KKp*}V&pV7+h-=Krq7%4bkp8*?WOMkiL<&kzJI z+aAc951NxvPL;E2t`8q<&<5&KAFBUzTL2Z;x)|A|^uWcXATRF|dhyh6QN^F389%!E zF;b9qGsNZnCLS=lR<11#WrQm;1+zD1fWRfK%@S zJZ?WX>fBFvf>UyVb872=Z-=4F^|Z57>y7UkTL;8rG0@ zlSR>I?qy)3ofPJej^$w4`Ep|a#@TmC#o=Ww-bBCBVtNAtzQI(3Mn7_+!8X2g(1ks-asjHNSNqyNoWXm4=RetiWn&9?LyqFZtILfWhUV|@T?xT_X3-F{ZHtl8yCY4% zny(X|&bh&&vD=THIs9j!EMr=(N~OSS+l{gkp#0q_8hdMj8;z_)=&@DJqH7hn6HIfn zJ7cMLt78O>nfcXSX@Y^70F*-FAktT)-IrG)Yh<9NBqjh6O5O*fvM79j6-R{&6a@%% z?}g*Ml5=*ah1b`+BvB2G2Wq&JB|oIQ+tN8E=f7ZUF#n0;eQGXknydS zE+q>sxXL@rXA;q!LsMNd_%x|NTR;10MO0dbG>hj<(%MeMUs2>$LnH(-fO`bVxO!I zZG8dHOR5CeLb&D;ib8aTt#%VHyv9A>-E}C3yo~p|-8pELJm{w#IISLqmV19k;?CC` zIk_YugT#gvILcCDwUq80Cs$(sifp0sh>Uaz@jEl^<~q=^T=ROmNj4!a}Tp8xLr@>HQN&5 zO1Z0swP*&jNtcP6(P_k3g*6PU+0I&A7S~z${bFE9OGF2mR&F|4PbS{e$}msZd_m=o zkPThkvyG)%jKsBT*Vb$Y4{s6SK=K;~P;EjjAy=IFz#<4xqjx5}u`hQ4*dwDS^CCXS z=}x%mNXv8_^?+FotklEYD%>;;8yYs5EH?zv2cYyWwm3Z?Gc5P>IPhLnZ?9^Tg}|R7 zJrsZ&jPsZr&HP`NhbpjQ%iF<`F9g=g<%OGm0>nWOdAg)x#LGWtFd*E>D3v-)u9GpK zK5doRJB`}t=jR#rHL5;9ZCD$(N@raxQj z*WKd*g55$%z8y`-?$+8`d!O$gkIkUg6rZUmvR?O^LL z)G*OyrB%fO(z#bg8f014u`e%9iN5t;LC^YCk?rm69kgLp($yfUkOQ^jq}d!b9H_(H zuiv?;d9e=WhIF3-zLAg~uM^8BFog+i*D6OB0VLajw0c6oZZoyA%KdX&hpoa0Z!n=# zhhH+vHHY%;DyMu%T_d@-!8gbM?MZ$~piU>+e(wE!>E)%`o{eqbyA9rp>&d0|t1lJo z7wgu1ADP>sH+5=p?K_$6F}$J->=G|R;_T{=c@{W1?-X2!ja}`l6-|hZEL}26VpVAZ z1X-f{@;)B*5Jl&Exb;NfLj|3UN%mFb40*Q~hrkm5(Vc9kj2bshewW1xOr6r+nht;1 zIc|A3DQa|BQ}`A^jl`T+QzyHGU4RxR* zX}rd($5AK`15^u{H~JL&ERFV*$z<>BZT8zfNCgGD3rtu0a_gJ%Ur@KHahrr2A7SLN zb7$d|SKZTVPOY4>n^`dG*}L*2vV~tW$D(T7$0CKg0{3BsbE4_iD{ZZ<)}q$?I*PW7mExG255pK{(Igs`ltME$r=EL3r4wr}WZ*$K5zx&u+;t1Nsv^x%N<_ zi@hC1)Pq#LEP?Z#n5|6c#&njy@YA@oOY>@h9_uHo!+<&{>2ire-|hV<#J;PB*I`>X zo=>f33dnhMfdYQ}iqGXKw8Pht9mUn`+AaQ?yq4v0FI>p_PW`|jhEn|$wV zb2v-rr8+<55SOUsufMW*{sM5ildYenJThU(j*Vs)PXruc(L|2NpYWz;7Ql3aIpr4o zlz^$(@+#-98i0V=2_4zm|AGkXDJIV>D0AF^Vm7IHH-=VqO|Z*fSpOAJGA1Q6Te7Vh z`J%uD=|1a6Wk`?tD|ypO4tM152c$x+BQT z_BdSmb0e6T9YY(UZMAhIKA#`A#HSXVE?P2YOIXpkJnA-flHN1cHY>M=-&jE1Y^5Am z?+e};oDsDf3{L8)$qFWBut@?n+ITcH7y>QVyQgkmFSe%`bogZKdgWBRj=GL?^$F~k zVZ0Qy+$a%i=y3HZ21EJasxtVYfZs1%RGC5j;rEt+dpP=ccz!rSSniQGyF=}uMZa_y6C0x*Ex>ky|pph~RE6?t!Tsb1Z}XmHrgFB~W4 zTf01IKf9=u+iqE7V?wrNKh-*A?)V=6;=)eg5f)C?oJvr&AI$u59tv+ah^=~}%m9g3 zbKgaIZLKHYmx4%#k zhl4=V|B0CF-(j($3vd$?ijOIY)$+9!zpEP#9D4tv&cEJt6-lk`rYXicWUw}CE$3Ah zbCgK^FKhn!-LK{d3EJI|h>k4Ue5uLXp!AzAVaJ@P|3$dJKKUi#!-)lR*vpf^ntgLM zx7{z)XfW9Qm)Ws8yp=n24vBw$#3#?6r6f5))W)mE(vC=<;8$(S)V5`7``|pg%kHgL zFJ2rAPU_Cl00C#S_dMLC3knLPfr`*zZ}e|)PqqJ{GSA#nsxWum1vW{3J`>nSU)DJG z4cIoJ7U4!Xfqfo&P!{BQ|F2R*r#@>962Tl=vP3^M+$Rx1Ab*>b-oDztkGbWiVJ4aV0fKM3B54NItgr%AQ z03o?w5gdj3EmVT~6z_37Zdk6@8-;-Q5g0rYB-z@EcYLzINCL>g3{cjv4=Nmau*6CK z9M{SEk=?}A#p=GcT9EPU#LQ{a1eF&{yo$~nMsuP|wfqV0M*()}2{J(kP%XQZ4k{>{$ zBfwzd05yYVPL^(#%wOt1-!3gI##8hGXkcUcyeh4IwwD>+Us%}8Su^)<7;t}2-Dbbo ziwxJ7s-4>V{a$w3Np+tY3gGoI+xGu^y@H* zpRIZ|qhv;%-@3O2svGuSe7nRazWixXqm}VI3E--S@)@)1*kY#Vyy)!f%qUyT z_zyT{IdigwFtD?`=uXAs-Xh@c!hi;O+o~%%{ukqwz7%T$qqH71)t`Q9YiIUX=f|g7 z!Wozo3B>Mewa1uS3Hvtb4+v8 z;c(_CeO`;aJ3D?PXOz@`-+g~7yIh4sT=iSi&3x7toWbcgzg}wKxBIbR=Pobc`ZMEu zz-5OU`UHL+mj9;!+_TQZx?_=dXw~<3vj1l=xPO74-`K=GTl&E%@V?Xf|7)czE7yg| z)oSlKcDv>4q3%up5~kK{j5XQC3|YN$>3rSo7o9yb7i@mE@?NI#&E1(=R@riUcKR#A zCg`9gayLVG+}w$EVt-ziukvkqZB-CkR#bg`veDDc-kUy0Y>nj3b1z?HY@TBFn>Pvx|#yLZvUxi9#;v{HV~)3OW{e^g~UrSSCWr@-FS#0P#~E-w5!{dM!S zX4LvCoN=9quJ3*k*S)JgtN!$xf4Qi5`Im=l>-NQV=GesK)=QlLZh!?(cS5VoUC`~K zz{M#|9M^YjYrg&M>B*UEAG2+TPUu4>SYm*?=%?FVdQ&MBb@0OWF3%>V!Z diff --git a/contrib/terraform-provider-iap/docs/data-sources/tunnel_proxy.md b/contrib/terraform-provider-iap/docs/data-sources/tunnel_proxy.md deleted file mode 100644 index d51732b5dd..0000000000 --- a/contrib/terraform-provider-iap/docs/data-sources/tunnel_proxy.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -# generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "iap_tunnel_proxy Data Source - terraform-provider-iap" -subcategory: "" -description: |- - ---- - -# iap_tunnel_proxy (Data Source) - - - - - - -## Schema - -### Required - -- `hostname` (String) -- `instance` (String) -- `interface` (String) -- `project` (String) -- `remote_port` (Number) -- `zone` (String) - -### Read-Only - -- `id` (String) The ID of this resource. -- `proxy_url` (String) - - diff --git a/contrib/terraform-provider-iap/docs/index.md b/contrib/terraform-provider-iap/docs/index.md deleted file mode 100644 index a189779380..0000000000 --- a/contrib/terraform-provider-iap/docs/index.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -# generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "iap Provider" -subcategory: "" -description: |- - ---- - -# iap Provider - - - - - - -## Schema - -### Optional - -- `access_approval_custom_endpoint` (String) -- `access_context_manager_custom_endpoint` (String) -- `access_token` (String) -- `active_directory_custom_endpoint` (String) -- `apigee_custom_endpoint` (String) -- `app_engine_custom_endpoint` (String) -- `assured_workloads_custom_endpoint` (String) -- `batching` (Block List, Max: 1) (see [below for nested schema](#nestedblock--batching)) -- `big_query_custom_endpoint` (String) -- `bigquery_data_transfer_custom_endpoint` (String) -- `bigquery_reservation_custom_endpoint` (String) -- `bigtable_custom_endpoint` (String) -- `billing_custom_endpoint` (String) -- `billing_project` (String) -- `binary_authorization_custom_endpoint` (String) -- `cloud_asset_custom_endpoint` (String) -- `cloud_billing_custom_endpoint` (String) -- `cloud_build_custom_endpoint` (String) -- `cloud_functions_custom_endpoint` (String) -- `cloud_identity_custom_endpoint` (String) -- `cloud_iot_custom_endpoint` (String) -- `cloud_resource_manager_custom_endpoint` (String) -- `cloud_run_custom_endpoint` (String) -- `cloud_scheduler_custom_endpoint` (String) -- `cloud_tasks_custom_endpoint` (String) -- `composer_custom_endpoint` (String) -- `compute_custom_endpoint` (String) -- `container_analysis_custom_endpoint` (String) -- `container_custom_endpoint` (String) -- `credentials` (String) -- `data_catalog_custom_endpoint` (String) -- `data_loss_prevention_custom_endpoint` (String) -- `dataflow_custom_endpoint` (String) -- `dataproc_custom_endpoint` (String) -- `datastore_custom_endpoint` (String) -- `deployment_manager_custom_endpoint` (String) -- `dialogflow_custom_endpoint` (String) -- `dialogflow_cx_custom_endpoint` (String) -- `dns_custom_endpoint` (String) -- `essential_contacts_custom_endpoint` (String) -- `eventarc_custom_endpoint` (String) -- `filestore_custom_endpoint` (String) -- `firestore_custom_endpoint` (String) -- `game_services_custom_endpoint` (String) -- `gke_hub_custom_endpoint` (String) -- `gkehub_feature_custom_endpoint` (String) -- `healthcare_custom_endpoint` (String) -- `iam_credentials_custom_endpoint` (String) -- `iam_custom_endpoint` (String) -- `iap_custom_endpoint` (String) -- `identity_platform_custom_endpoint` (String) -- `impersonate_service_account` (String) -- `impersonate_service_account_delegates` (List of String) -- `kms_custom_endpoint` (String) -- `logging_custom_endpoint` (String) -- `memcache_custom_endpoint` (String) -- `ml_engine_custom_endpoint` (String) -- `monitoring_custom_endpoint` (String) -- `network_management_custom_endpoint` (String) -- `network_services_custom_endpoint` (String) -- `notebooks_custom_endpoint` (String) -- `org_policy_custom_endpoint` (String) -- `os_config_custom_endpoint` (String) -- `os_login_custom_endpoint` (String) -- `privateca_custom_endpoint` (String) -- `project` (String) -- `pubsub_custom_endpoint` (String) -- `pubsub_lite_custom_endpoint` (String) -- `redis_custom_endpoint` (String) -- `region` (String) -- `request_reason` (String) -- `request_timeout` (String) -- `resource_manager_custom_endpoint` (String) -- `resource_manager_v2_custom_endpoint` (String) -- `scopes` (List of String) -- `secret_manager_custom_endpoint` (String) -- `security_center_custom_endpoint` (String) -- `service_management_custom_endpoint` (String) -- `service_networking_custom_endpoint` (String) -- `service_usage_custom_endpoint` (String) -- `source_repo_custom_endpoint` (String) -- `spanner_custom_endpoint` (String) -- `sql_custom_endpoint` (String) -- `storage_custom_endpoint` (String) -- `storage_transfer_custom_endpoint` (String) -- `tags_custom_endpoint` (String) -- `tpu_custom_endpoint` (String) -- `user_project_override` (Boolean) -- `vertex_ai_custom_endpoint` (String) -- `vpc_access_custom_endpoint` (String) -- `workflows_custom_endpoint` (String) -- `zone` (String) - - -### Nested Schema for `batching` - -Optional: - -- `enable_batching` (Boolean) -- `send_after` (String) diff --git a/contrib/terraform-provider-iap/examples/google.tf b/contrib/terraform-provider-iap/examples/google.tf deleted file mode 100644 index bc5727f544..0000000000 --- a/contrib/terraform-provider-iap/examples/google.tf +++ /dev/null @@ -1,13 +0,0 @@ -# google provider used for kube access -provider "google" { -} - -# token -data "google_service_account_access_token" "kube_sa" { - target_service_account = var.service_account - lifetime = "1000s" - scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/userinfo.email" - ] -} diff --git a/contrib/terraform-provider-iap/examples/main.tf b/contrib/terraform-provider-iap/examples/main.tf deleted file mode 100644 index 0f072f81e5..0000000000 --- a/contrib/terraform-provider-iap/examples/main.tf +++ /dev/null @@ -1,54 +0,0 @@ -terraform { - required_providers { - iap = { - version = "~> 1.0.0" - source = "example-iap.com/provider/iap" - } - } -} - -provider "iap" { -} - - -provider "kubernetes" { - // TODO: this needs to be changed to work cross cluster - host = "" - token = data.google_service_account_access_token.kube_sa.access_token - proxy_url=resource.iap_tunnel_proxy.tunnel_proxy.proxy_url - config_path=var.config_path - config_context=var.config_context -} - - -resource "iap_tunnel_proxy" "tunnel_proxy" { - zone = var.zone - instance = var.instance - interface = var.interface - project = var.project - remote_port = var.remote_port -} - -output "tunnel_proxy" { - value = resource.iap_tunnel_proxy.tunnel_proxy.proxy_url -} - - -# this will likely error witha permission/not found error. This means the proxy is working -data "kubernetes_storage_class" "example" { - metadata { - name = "terraform-example" - } - - depends_on = [resource.iap_tunnel_proxy.tunnel_proxy] -} - -data "iap_tunnel_keep_alive" "keep_alive" { - # keep alive for at least 100 seconds - timeout = 100 - proxy_url = resource.iap_tunnel_proxy.tunnel_proxy - - for_each = { - timestamp = "${timestamp()}" - } -} diff --git a/contrib/terraform-provider-iap/examples/variables.tf b/contrib/terraform-provider-iap/examples/variables.tf deleted file mode 100644 index abccbb3172..0000000000 --- a/contrib/terraform-provider-iap/examples/variables.tf +++ /dev/null @@ -1,46 +0,0 @@ -variable "service_account" { - type = string - description = "The service account to impersonate" -} - -variable "config_path" { - type = string - description = "The path to the kube config file" - default = "~/.kube/config" -} - -variable "config_context" { - type = string - description = "The context to use in the kube config file" -} - -variable "zone" { - type = string - description = "The zone of the bastion proxy" -} - - -variable "instance" { - type = string - description = "The instance to use for the bastion proxy" -} - -variable "interface" { - type = string - description = "The interface to use for the bastion proxy" - default = "nic0" -} - -variable "project" { - type = string - description = "The project of the bastion proxy" -} - -variable "remote_port" { - type = string - description = "The remote_port of the bastion proxy" - # tiny proxy default - default = 8888 -} - - diff --git a/contrib/terraform-provider-iap/go.mod b/contrib/terraform-provider-iap/go.mod deleted file mode 100644 index 17ede04eae..0000000000 --- a/contrib/terraform-provider-iap/go.mod +++ /dev/null @@ -1,118 +0,0 @@ -module github.com/synapsecns/sanguine/contrib/terraform-provider-iap - -go 1.21 - -require ( - github.com/google/uuid v1.5.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 - github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 - github.com/synapsecns/sanguine/contrib/tfcore v0.0.0-00010101000000-000000000000 -) - -require ( - bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.111.0 // indirect - cloud.google.com/go/bigtable v1.10.1 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - dario.cat/mergo v1.0.0 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect - github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-cidr v1.1.0 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/cli v20.10.17+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect - github.com/emirpasic/gods v1.18.1 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect - github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect - github.com/gartnera/gcloud v0.0.15 // indirect - github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/glog v1.1.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.8 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/hcl/v2 v2.15.0 // indirect - github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.17.3 // indirect - github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.14.2 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect - github.com/hashicorp/terraform-provider-google/v4 v4.2.0 // indirect - github.com/hashicorp/terraform-registry-address v0.1.0 // indirect - github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/hashstructure v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.23.1 // indirect - go.opentelemetry.io/otel/metric v1.23.1 // indirect - go.opentelemetry.io/otel/trace v1.23.1 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.18.0 // indirect - google.golang.org/api v0.149.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/grpc v1.60.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect -) - -replace ( - github.com/synapsecns/sanguine/contrib/tfcore => ../tfcore - golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 -) diff --git a/contrib/terraform-provider-iap/go.sum b/contrib/terraform-provider-iap/go.sum deleted file mode 100644 index 8f5b4e79d4..0000000000 --- a/contrib/terraform-provider-iap/go.sum +++ /dev/null @@ -1,1568 +0,0 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= -bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= -bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= -cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g= -cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 h1:tFdFasG+VDpnn+BfVbZrfGcoH6pw6s7ODYlZlhTO3UM= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518/go.mod h1:oEeBHikdF/NrnUy0ornVaY1OT+jGvTqm+LQS0+ZDKzU= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= -github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93 h1:z6k1vb5L2wqLK4SIk3fpUiXnhNWSZ6Oyy8AaLqr0B+A= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93/go.mod h1:ps2Vk8wMZarkeIPtUqW/FUvwVVdeRDbewMYz+EmuEgk= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= -github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 h1:R+19WKQClnfMXS60cP5BmMe1wjZ4u0evY2p2Ar0ZTXo= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 h1:EipXK6U05IQ2wtuFRn4k3h0+2lXypzItoXGVyf4r9Io= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= -github.com/gartnera/gcloud v0.0.15 h1:/PkEnxPczVRS78MkMDz6wfdRR8YDDjzr0VF6ri6cGVs= -github.com/gartnera/gcloud v0.0.15/go.mod h1:i9wWa1ndPbE8AhduqRMX9nAv9X9HqN9xgqydfEdFLGo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.40.1/go.mod h1:OyFTr1muxaWeGTcHQcL3B7C4rETnDphTKYenZDgH2/g= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gookit/color v1.3.8/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= -github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= -github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= -github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= -github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= -github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.13.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI= -github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= -github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.14.2 h1:rhsVEOGCnY04msNymSvbUsXfRLKh9znXZmHlf5e8mhE= -github.com/hashicorp/terraform-plugin-go v0.14.2/go.mod h1:Q12UjumPNGiFsZffxOsA40Tlz1WVXt2Evh865Zj0+UA= -github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= -github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.5.0/go.mod h1:z+cMZ0iswzZOahBJ3XmNWgWkVnAd2bl8g+FhyyuPDH4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 h1:zHcMbxY0+rFO9gY99elV/XC/UnQVg7FhRCbj1i5b7vM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1/go.mod h1:+tNlb0wkfdsDJ7JEiERLz4HzM19HyiuIoGzTsM7rPpw= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0 h1:w0r/YEy7ZM5mTMAarRUpS7eyYrXTN5mazwHtLnEGAk8= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0/go.mod h1:eUbSXbhfBMNiOuofFo688iPhk42O782vze8drAN2sPA= -github.com/hashicorp/terraform-registry-address v0.1.0 h1:W6JkV9wbum+m516rCl5/NjKxCyTVaaUBbzYcMzBDO3U= -github.com/hashicorp/terraform-registry-address v0.1.0/go.mod h1:EnyO2jYO6j29DTHbJcm00E5nQTFeTtyZH3H5ycydQ5A= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= -github.com/jhump/protoreflect v1.14.1/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= -github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= -github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f h1:dKccXx7xA56UNqOcFIbuqFjAWPVtP688j5QMgmo6OHU= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f/go.mod h1:4rEELDSfUAlBSyUjPG0JnaNGjf13JySHFeRdD/3dLP0= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.6/go.mod h1:Lj5gIVxjBlH8REa3icEOkdfchwYc291nShzZ4QYWyMo= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= -github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanposhiho/wastedassign v1.0.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.4/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.4.6/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= -github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= -github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.5.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= -go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= -go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= -go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= -go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/contrib/terraform-provider-iap/main.go b/contrib/terraform-provider-iap/main.go deleted file mode 100644 index 3c8fa87d13..0000000000 --- a/contrib/terraform-provider-iap/main.go +++ /dev/null @@ -1,17 +0,0 @@ -// Terraform provider iap is a provider that allows you to create and manage long lived iap tunnels in terraform. -package main - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" - "github.com/synapsecns/sanguine/contrib/terraform-provider-iap/provider" -) - -// Generate the Terraform provider documentation using `tfplugindocs`: -// this is temporarily disabled until tfexec compatibility issue is fixed (this was removed in 0.16.0) -// we can do this manually for now -// go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs - -func main() { - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: provider.Provider}) -} diff --git a/contrib/terraform-provider-iap/provider/iap-tunnel.go b/contrib/terraform-provider-iap/provider/iap-tunnel.go deleted file mode 100644 index 5e8cc897c0..0000000000 --- a/contrib/terraform-provider-iap/provider/iap-tunnel.go +++ /dev/null @@ -1,183 +0,0 @@ -package provider - -import ( - "context" - "fmt" - "github.com/google/uuid" - provider_diag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/phayes/freeport" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/google" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/tunnel" - "log" - "net/http" - "net/url" - "time" -) - -// dataSourceProxyURL generates a proxy over an iap bastion host. -func dataSourceProxyURL() *schema.Resource { - return &schema.Resource{ - Read: dataSourceProxy, - CreateContext: func(ctx context.Context, data *schema.ResourceData, i interface{}) provider_diag.Diagnostics { - err := dataSourceProxy(data, i) - if err != nil { - return provider_diag.FromErr(err) - } - return provider_diag.Diagnostics{} - }, - Delete: dataSourceProxyDelete, - - Schema: map[string]*schema.Schema{ - // project of the bastion host - "project": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - // zone of the bastion host - "zone": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - // zone of the bastion host - "instance": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - // network interface to use - "interface": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - // port of the host to connect to - "remote_port": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validatePort, - ForceNew: true, - }, - // output proxy url - "proxy_url": { - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceProxyDelete(d *schema.ResourceData, meta interface{}) error { - // Delete the proxy URL. - // This could involve making a call to an API to delete the proxy, or just - // cleaning up any resources created on your end. - // ... - - // Remove all fields in the dataSourceProxyURL resource - d.SetId("") - for k := range dataSourceProxyURL().Schema { - _ = d.Set(k, nil) - } - return nil -} - -// nolint: cyclop -func dataSourceProxy(d *schema.ResourceData, meta interface{}) error { - config, ok := meta.(*google.Config) - if !ok { - return fmt.Errorf("could not cast config of type %T to %T", meta, config) - } - - project, ok := d.Get("project").(string) - if !ok { - return fmt.Errorf("could not cast project of type %T to %T", d.Get("project"), project) - } - zone, ok := d.Get("zone").(string) - if !ok { - return fmt.Errorf("could not cast zone of type %T to %T", d.Get("zone"), zone) - } - instance, ok := d.Get("instance").(string) - if !ok { - return fmt.Errorf("could not cast instance of type %T to %T", d.Get("instance"), instance) - } - iface, ok := d.Get("interface").(string) - if !ok { - return fmt.Errorf("could not cast interface of type %T to %T", d.Get("interface"), iface) - } - remotePort, ok := d.Get("remote_port").(int) - if !ok { - return fmt.Errorf("could not cast remote_port of type %T to %T", d.Get("remote_port"), remotePort) - } - - localPort, err := freeport.GetFreePort() - if err != nil { - return fmt.Errorf("could not get a free port: %w", err) - } - - tm := tunnel.TunnelManager{ - Project: project, - RemotePort: remotePort, - LocalPort: localPort, - Zone: zone, - Instance: instance, - Interface: iface, - } - - tm.SetTokenSource(config.GetTokenSource()) - - errChan := make(chan error) - - log.Printf("[INFO] creating tunnel") - go func() { - startTime := time.Now() - err := tm.StartProxy(context.Background()) - if err != nil { - fmt.Println(err) - log.Printf("[DEBUG] Proxy Error %v", err) - errChan <- err - } - - log.Printf("[DEBUG] Proxy closed after %s", time.Since(startTime)) - }() - - select { - // wait 5 seconds for an error, otherwise just log since this will run in the background for the course of the apply - case <-time.NewTimer(time.Second * 5).C: - break - case err := <-errChan: - log.Printf("[ERROR] Received error while booting provider: %v", err) - return err - } - - log.Printf("[DEBUG] Finished creating proxy on port %d", localPort) - - id := uuid.New().String() - log.Printf("[DEBUG] setting proxy id to %s", id) - d.SetId(id) - - proxyURL := fmt.Sprintf("http://localhost:%d", localPort) - log.Printf("[DEBUG] setting proxy url to %s", proxyURL) - err = d.Set("proxy_url", proxyURL) - if err != nil { - return fmt.Errorf("could not set proxy_url: %w", err) - } - - // test the tunnel - parsedURL, err := url.Parse(proxyURL) - if err != nil { - log.Printf("[ERROR] could not parse proxy url %s: %v", proxyURL, err) - return fmt.Errorf("could not parse url: %w", err) - } - testClient := &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(parsedURL)}} - //nolint: noctx - resp, err := testClient.Get("https://www.google.com/") - if err != nil { - log.Printf("[ERROR] could not connect through proxy %s: %v", proxyURL, err) - } - - _ = resp.Body.Close() - - return nil -} diff --git a/contrib/terraform-provider-iap/provider/keep_alive.go b/contrib/terraform-provider-iap/provider/keep_alive.go deleted file mode 100644 index bb5ce5fac8..0000000000 --- a/contrib/terraform-provider-iap/provider/keep_alive.go +++ /dev/null @@ -1,96 +0,0 @@ -package provider - -import ( - "fmt" - "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/google" - "log" - "net/http" - "net/url" - "time" -) - -// keepAlive is a resource that keeps a tunnel alive -// by delaying the read of the datasource until the timeout is finished. -func keepAlive() *schema.Resource { - return &schema.Resource{ - Read: dataSourceKeepAlive, - Schema: map[string]*schema.Schema{ - // timeout in seconds - "timeout": { - Type: schema.TypeInt, - Required: true, - }, - // port of the host to connect to - "proxy_url": { - Type: schema.TypeString, - Required: true, - }, - // wether or not the keep alive has timed out - "timed_out": { - Type: schema.TypeBool, - Computed: true, - }, - }, - } -} - -// nolint: cyclop -func dataSourceKeepAlive(d *schema.ResourceData, meta interface{}) error { - config, ok := meta.(*google.Config) - if !ok { - return fmt.Errorf("could not cast config of type %T to %T", meta, config) - } - - timeout, ok := d.Get("timeout").(int) - if !ok { - return fmt.Errorf("could not cast timeout of type %T to %T", d.Get("timeout"), timeout) - } - - proxyURL, ok := d.Get("proxy_url").(string) - if !ok { - return fmt.Errorf("could not cast remote_port of type %T to %T", d.Get("proxy_url"), proxyURL) - } - - // test the tunnel - parsedURL, err := url.Parse(proxyURL) - if err != nil { - log.Printf("[ERROR] could not parse proxy url %s: %v", proxyURL, err) - return fmt.Errorf("could not parse proxy url %s: %w", proxyURL, err) - } - - id := uuid.New().String() - log.Printf("[DEBUG] setting proxy id to %s", id) - d.SetId(id) - - log.Printf("[INFO] waiting for %d seconds", timeout) - - timer := time.After(time.Duration(timeout) * time.Second) - - for { - select { - case <-timer: - log.Printf("[INFO] finished waiting %d seconds", timeout) - err := d.Set("timed_out", true) - if err != nil { - return fmt.Errorf("could not set timed_out to true: %w", err) - } - return nil - case <-time.After(time.Second * 5): - log.Printf("[INFO] testing proxy %s", proxyURL) - testClient := &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(parsedURL)}} - //nolint: noctx - resp, err := testClient.Get("https://www.google.com/") - if err != nil { - log.Printf("[ERROR] could not connect through proxy %s: %v", proxyURL, err) - } - _ = resp.Body.Close() - log.Printf("[INFO] successfully connected through proxy %s", proxyURL) - continue - case <-config.GetContext().Done(): - log.Printf("[ERROR] contet canceled before timeout (%d seconds)", timeout) - return fmt.Errorf("context was canceled") - } - } -} diff --git a/contrib/terraform-provider-iap/provider/provider.go b/contrib/terraform-provider-iap/provider/provider.go deleted file mode 100644 index b324835ab9..0000000000 --- a/contrib/terraform-provider-iap/provider/provider.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package provider gets the provider for the iap tunnel. -package provider - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/google" -) - -// Provider gets the provider for the iap tunnel. -func Provider() *schema.Provider { - underlyingProvider := google.Provider() - return &schema.Provider{ - Schema: underlyingProvider.Schema, - ProviderMetaSchema: underlyingProvider.ProviderMetaSchema, - ConfigureContextFunc: underlyingProvider.ConfigureContextFunc, - ResourcesMap: map[string]*schema.Resource{ - "iap_tunnel_proxy": dataSourceProxyURL(), - }, - DataSourcesMap: map[string]*schema.Resource{ - "iap_tunnel_keep_alive": keepAlive(), - }, - } -} diff --git a/contrib/terraform-provider-iap/provider/provider_test.go b/contrib/terraform-provider-iap/provider/provider_test.go deleted file mode 100644 index b6b412b99d..0000000000 --- a/contrib/terraform-provider-iap/provider/provider_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package provider_test - -import ( - "github.com/synapsecns/sanguine/contrib/terraform-provider-iap/provider" - "testing" -) - -// Make sure the provider loads. -func TestProviderLoad(t *testing.T) { - prov := provider.Provider() - if prov == nil { - t.Fatal("Provider should not be nil") - } -} diff --git a/contrib/terraform-provider-iap/provider/validate.go b/contrib/terraform-provider-iap/provider/validate.go deleted file mode 100644 index a13e77f22d..0000000000 --- a/contrib/terraform-provider-iap/provider/validate.go +++ /dev/null @@ -1,18 +0,0 @@ -package provider - -import ( - "fmt" -) - -// TODO: test. -func validatePort(v interface{}, k string) (ws []string, errors []error) { - value, ok := v.(int) - if !ok { - errors = append(errors, fmt.Errorf("expected type of %s to be int", k)) - return - } - if value < 1 || value > 65535 { - errors = append(errors, fmt.Errorf("%q must be between 1 and 65535, got: %d", k, value)) - } - return -} diff --git a/contrib/terraform-provider-iap/scripts/add-tfmac.sh b/contrib/terraform-provider-iap/scripts/add-tfmac.sh deleted file mode 100755 index 4067060a39..0000000000 --- a/contrib/terraform-provider-iap/scripts/add-tfmac.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/zsh - -# if not already present in zshrc -if [ "$(grep -c -w "alias tfmac='TFENV_ARCH=arm64 TFENV_TERRAFORM_VERSION=latest:^1.3 terraform'" ~/.zshrc)" -le 0 ]; then - echo "adding tfmac command to zshrc. You might have to source ~/.zshrc or open a new tab" - echo "alias tfmac='TFENV_ARCH=arm64 TFENV_TERRAFORM_VERSION=latest:^1.3 terraform'" >> ~/.zshrc -fi diff --git a/contrib/terraform-provider-iap/scripts/build-tf.sh b/contrib/terraform-provider-iap/scripts/build-tf.sh deleted file mode 100755 index 7cc26c7061..0000000000 --- a/contrib/terraform-provider-iap/scripts/build-tf.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2086 - -# This gets the arch prefix we use when building a terraform plugin -TF_PREFIX=$( go version | awk '{print $NF}' | sed 's/\//_/') - -# define the plugin directory -PLUGIN_DIR=$(realpath -m ~/.terraform.d/plugins/example-iap.com/provider/iap/1.0.0/$TF_PREFIX) - -# fixes async problems on arm64 https://github.com/hashicorp/terraform-provider-aws/issues/20274#issuecomment-996795241 -# we don't need this for production builds, just darwinarm64. -GODEBUG=asyncpreemptoff=1 go build . - -# make the plugin directory if it doesn't exist -rm -rf $PLUGIN_DIR -mkdir -p $PLUGIN_DIR -cp terraform-provider-iap $PLUGIN_DIR diff --git a/contrib/terraform-provider-iap/terraform-registry-manifest.json b/contrib/terraform-provider-iap/terraform-registry-manifest.json deleted file mode 100644 index 295001a07f..0000000000 --- a/contrib/terraform-provider-iap/terraform-registry-manifest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "version": 1, - "metadata": { - "protocol_versions": ["6.0"] - } -} diff --git a/contrib/terraform-provider-kubeproxy/.gitignore b/contrib/terraform-provider-kubeproxy/.gitignore deleted file mode 100644 index 15253d13be..0000000000 --- a/contrib/terraform-provider-kubeproxy/.gitignore +++ /dev/null @@ -1 +0,0 @@ -terraform-provider-kubeproxy diff --git a/contrib/terraform-provider-kubeproxy/.goreleaser.yml b/contrib/terraform-provider-kubeproxy/.goreleaser.yml deleted file mode 100644 index 4e60b5a29f..0000000000 --- a/contrib/terraform-provider-kubeproxy/.goreleaser.yml +++ /dev/null @@ -1,74 +0,0 @@ -project_name: terraform-provider-kubeproxy - -monorepo: - tag_prefix: contrib/terraform-provider-kubeproxy/ - dir: contrib/terraform-provider-kubeproxy/ - -builds: - - env: - # goreleaser does not work with CGO, it could also complicate - # usage by users in CI/CD systems like Terraform Cloud where - # they are unable to install libraries. - - CGO_ENABLED=0 - mod_timestamp: '{{ .CommitTimestamp }}' - flags: - - -trimpath - ldflags: - - '-s -w -X main.version={{.Version}} -X main.commit={{.Commit}}' - goos: - - freebsd - - windows - - linux - - darwin - goarch: - - amd64 - - '386' - - arm - - arm64 - ignore: - - goos: darwin - goarch: '386' - binary: '{{ .ProjectName }}_v{{ .Version }}' -archives: - - format: zip - name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}' -checksum: - extra_files: - - glob: 'terraform-registry-manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_SHA256SUMS' - algorithm: sha256 -signs: - - artifacts: checksum - args: - # if you are using this in a GitHub action or some other automated pipeline, you - # need to pass the batch flag to indicate its not interactive. - - '--batch' - - '--local-user' - - '{{ .Env.GPG_FINGERPRINT }}' # set this environment variable for your signing key - - '--output' - - '${signature}' - - '--detach-sign' - - '${artifact}' -release: - extra_files: - - glob: 'terraform-registry-manifest.json' - name_template: '{{ .ProjectName }}_{{ .Version }}_manifest.json' - # If you want to manually examine the release before its live, uncomment this line: - # draft: true - - -# track sizes -report_sizes: true - -# modified timestamps -metadata: - # Set the modified timestamp on the metadata files. - # - # Templates: allowed. - mod_timestamp: '{{ .CommitTimestamp }}' - -# produce software bill of lading -sboms: - - artifacts: archive - diff --git a/contrib/terraform-provider-kubeproxy/Makefile b/contrib/terraform-provider-kubeproxy/Makefile deleted file mode 100644 index 0340e715e3..0000000000 --- a/contrib/terraform-provider-kubeproxy/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -include ../../make/go.Makefile - -install-plugin-local: # will install the terraform provider as a local plugin for testing. - ./scripts/build-tf.sh - -run-example: install-plugin-local cleanup-examples # runs an example - echo "running terraform init, if this fails, you might have to specify amd64 as the arch before using terraform, please see: https://github.com/tfutils/tfenv/issues/337" - echo "on osx arm64, you can run run-example-m1 as a workaround." - cd examples && terraform init - -cleanup-examples: - rm -rf examples/.terraform rm -rf examples/.terraform.lock.hcl - -run-example-m1: install-plugin-local cleanup-examples # runs an example on osx arm64 - ./scripts/add-tfmac.sh - source ~/.zshrc - echo "please run: cd examples && tfmac init" - - -tfenv-install: - @#Brew - MacOS - @if [ "$(shell which tflint)" = "" ] && [ "$(shell which brew)" != "" ]; then brew install rflint; fi; - # default - @if [ "$(shell which tflint)" = "" ]; then curl -s https://raw.githubusercontent.com/terraform-linters/tflint/master/install_linux.sh | bash; fi; - - -lint-tf: tfenv-install ## Run golangci-lint and go fmt ./... - cd examples && tflint --init - cd examples && tflint diff --git a/contrib/terraform-provider-kubeproxy/README.md b/contrib/terraform-provider-kubeproxy/README.md deleted file mode 100644 index 6488169243..0000000000 --- a/contrib/terraform-provider-kubeproxy/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Terraform Kubernetes IAP Proxy Provider - -[![Go Reference](https://pkg.go.dev/badge/github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy.svg)](https://pkg.go.dev/github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy) -[![Go Report Card](https://goreportcard.com/badge/github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy)](https://goreportcard.com/report/github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy) - -This provider is a wrapper for the Kubernetes provider that allows for the use of an IAP (Identity-Aware Proxy) when interacting with GCP Kubernetes clusters. This is necessary because Terraform resources are short-lived, so spinning up the IAP proxy separately and having it provide access to the resources is not an option. - -## Why use an IAP proxy? -IAP (Identity-Aware Proxy) is a feature of GCP that allows you to authenticate and authorize access to resources in a more fine-grained manner than just using a service account. By using IAP, you can ensure that only authorized users and applications can access your resources. - -## How does the provider work? -The provider wraps the Kubernetes provider and adds some new fields to the schema, such as the project, zone, service_account, instance, and remote_port fields, which are necessary for configuring the IAP proxy. - -When the provider is used to create or update resources, it first starts the IAP proxy on the specified instance, using the specified service account and project. It then sets the KUBECONFIG environment variable to use the proxy to access the Kubernetes cluster. - -When the resources are destroyed, the provider stops the IAP proxy on the specified instance. - -## How to use the provider -To use the provider, you will need to specify the project, zone, service_account, instance, and remote_port fields in your Terraform configuration. You will also need to provide credentials for the service account that will be used to start the IAP proxy. - -After configuring the provider, you can use it in your Terraform resources just like you would use the Kubernetes provider. The provider will automatically handle starting and stopping the IAP proxy as needed. - -## Conclusion -The Terraform Kubernetes IAP Proxy Provider allows you to use an IAP proxy when interacting with GCP Kubernetes clusters in Terraform, allowing for more fine-grained authentication and authorization of access to your resources. It is easy to use and seamlessly integrates with the Kubernetes provider, making it a great choice for securing your GCP Kubernetes clusters in Terraform. - -It's good to note that this is a conceptual explanation and the implementation may differ and depend on the specific details of the kubernetes provider and how it interacts with the gcp iap. - -## Incompatibilities - -This provider does not support kubernetes_manifest resources. The kubectl provider should be used for this diff --git a/contrib/terraform-provider-kubeproxy/examples/.terraform.lock.hcl b/contrib/terraform-provider-kubeproxy/examples/.terraform.lock.hcl deleted file mode 100644 index 0934bb6af3..0000000000 --- a/contrib/terraform-provider-kubeproxy/examples/.terraform.lock.hcl +++ /dev/null @@ -1,29 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "example-kube.com/provider/kubeproxy" { - version = "1.0.0" - constraints = "~> 1.0.0" - hashes = [ - "h1:Wcf94AqnYUSfFMJS7RWUaDgfmbHbh29bgLcZgFfvtL8=", - ] -} - -provider "registry.terraform.io/hashicorp/google" { - version = "4.50.0" - hashes = [ - "h1:IEgdWy6HHxu7Dnfzm0PeJVTTMJXSdmhYp+snARJWfkk=", - "zh:051b7d64b9808296606475f16cb6848577ba559c704c04af7d24789a7ba96e36", - "zh:2c6af345add3fa4c92521ce49f52c72c1724afad2f47908bcd878136b03436b9", - "zh:2daad7d49bcbf2fe036790249c8504e67b03dded3ea9f00fc0822ae1892d8292", - "zh:3b5d0996f35d251ec5723b300337af17bf229838f6f6b77eb09516676236956f", - "zh:462eeb217fcc0d15453369be1bec8ad9fa59957e7398ef8e82fcf931058805f7", - "zh:5141ef282f69854622221d3bf7580d73d389676fb7362f73c8132f1b82a4f561", - "zh:9a83ab13cd57f50a9ac93fed4b02a3fdb525e69a971b0519583ffefbcdf60ba7", - "zh:b1cd0e10dcef0b7e7f8f4c51296bbe08767671b145d55e38f011f6c52c78f144", - "zh:bc135972d274d8af2c3434ef09ca4028f4ead6cfddb20bfb13809034b0b70730", - "zh:db3b4ce4e864b8246edc9e7c5f3581e2c2f6269740307c254b0e5c37fd6665ee", - "zh:f384bf5749bdabf9419ada9d3c73a5937ee989753bb40f7060dc586973faa3d3", - "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", - ] -} diff --git a/contrib/terraform-provider-kubeproxy/examples/google.tf b/contrib/terraform-provider-kubeproxy/examples/google.tf deleted file mode 100644 index bc5727f544..0000000000 --- a/contrib/terraform-provider-kubeproxy/examples/google.tf +++ /dev/null @@ -1,13 +0,0 @@ -# google provider used for kube access -provider "google" { -} - -# token -data "google_service_account_access_token" "kube_sa" { - target_service_account = var.service_account - lifetime = "1000s" - scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/userinfo.email" - ] -} diff --git a/contrib/terraform-provider-kubeproxy/examples/main.tf b/contrib/terraform-provider-kubeproxy/examples/main.tf deleted file mode 100644 index ab66b69b5e..0000000000 --- a/contrib/terraform-provider-kubeproxy/examples/main.tf +++ /dev/null @@ -1,50 +0,0 @@ -terraform { - required_providers { - kubeproxy = { - version = "~> 1.0.0" - source = "example-kube.com/provider/kubeproxy" - } - } -} - - -provider "kubeproxy" { - instance = var.instance - zone = var.zone - interface = var.interface - project = var.project - remote_port = var.remote_port - - // TODO: this needs to be changed to work cross cluster - host = "" - token = data.google_service_account_access_token.kube_sa.access_token - config_path = var.config_path - config_context = var.config_context -} - -resource "kubeproxy_secret" "example" { - metadata { - name = "basic-auth" - } - - data = { - username = "admin" - password = "P4ssw0rd" - } - - type = "kubernetes.io/basic-auth" -} - -data "kubeproxy_resource" "example" { - api_version = "v1" - kind = "ConfigMap" - - metadata { - name = "example" - namespace = "default" - } -} - -output "test" { - value = data.kubeproxy_resource.example.object.data.TEST -} diff --git a/contrib/terraform-provider-kubeproxy/examples/variables.tf b/contrib/terraform-provider-kubeproxy/examples/variables.tf deleted file mode 100644 index f807da14b8..0000000000 --- a/contrib/terraform-provider-kubeproxy/examples/variables.tf +++ /dev/null @@ -1,47 +0,0 @@ -variable "service_account" { - type = string - description = "The service account to impersonate" -} - -variable "config_path" { - type = string - description = "The path to the kube config file" - default = "~/.kube/config" -} - -variable "config_context" { - type = string - description = "The context to use in the kube config file" -} - -variable "zone" { - type = string - description = "The zone of the bastion proxy" -} - - -variable "instance" { - type = string - description = "The instance to use for the bastion proxy" - default = "rpc-bastion" -} - -variable "interface" { - type = string - description = "The interface to use for the bastion proxy" - default = "nic0" -} - -variable "project" { - type = string - description = "The project of the bastion proxy" -} - -variable "remote_port" { - type = string - description = "The remote_port of the bastion proxy" - # tiny proxy default - default = 8888 -} - - diff --git a/contrib/terraform-provider-kubeproxy/generated/configschema/configschema_gen.go b/contrib/terraform-provider-kubeproxy/generated/configschema/configschema_gen.go deleted file mode 100644 index b0278e63ba..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/configschema/configschema_gen.go +++ /dev/null @@ -1,411 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -// nolint -package configschema - -import ( - coerce_value_fmt "fmt" - nestingmode_string_strconv "strconv" - - coerce_value_cty "github.com/hashicorp/go-cty/cty" - empty_value_cty "github.com/hashicorp/go-cty/cty" - implied_type_cty "github.com/hashicorp/go-cty/cty" - schema_cty "github.com/hashicorp/go-cty/cty" - coerce_value_convert "github.com/hashicorp/go-cty/cty/convert" -) - -func (b *Block) CoerceValue(in coerce_value_cty.Value) (coerce_value_cty.Value, error) { - var path coerce_value_cty.Path - return b.coerceValue(in, path) -} - -func (b *Block) coerceValue(in coerce_value_cty.Value, path coerce_value_cty.Path) (coerce_value_cty.Value, error) { - switch { - case in.IsNull(): - return coerce_value_cty.NullVal(b.ImpliedType()), nil - case !in.IsKnown(): - return coerce_value_cty.UnknownVal(b.ImpliedType()), nil - } - - ty := in.Type() - if !ty.IsObjectType() { - return coerce_value_cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") - } - - for name := range ty.AttributeTypes() { - if _, defined := b.Attributes[name]; defined { - continue - } - if _, defined := b.BlockTypes[name]; defined { - continue - } - return coerce_value_cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) - } - - attrs := make(map[string]coerce_value_cty.Value) - - for name, attrS := range b.Attributes { - var val coerce_value_cty.Value - switch { - case ty.HasAttribute(name): - val = in.GetAttr(name) - case attrS.Computed || attrS.Optional: - val = coerce_value_cty.NullVal(attrS.Type) - default: - return coerce_value_cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) - } - - val, err := attrS.coerceValue(val, append(path, coerce_value_cty.GetAttrStep{Name: name})) - if err != nil { - return coerce_value_cty.UnknownVal(b.ImpliedType()), err - } - - attrs[name] = val - } - for typeName, blockS := range b.BlockTypes { - switch blockS.Nesting { - - case NestingSingle, NestingGroup: - switch { - case ty.HasAttribute(typeName): - var err error - val := in.GetAttr(typeName) - attrs[typeName], err = blockS.coerceValue(val, append(path, coerce_value_cty.GetAttrStep{Name: typeName})) - if err != nil { - return coerce_value_cty.UnknownVal(b.ImpliedType()), err - } - default: - attrs[typeName] = blockS.EmptyValue() - } - - case NestingList: - switch { - case ty.HasAttribute(typeName): - coll := in.GetAttr(typeName) - - switch { - case coll.IsNull(): - attrs[typeName] = coerce_value_cty.NullVal(coerce_value_cty.List(blockS.ImpliedType())) - continue - case !coll.IsKnown(): - attrs[typeName] = coerce_value_cty.UnknownVal(coerce_value_cty.List(blockS.ImpliedType())) - continue - } - - if !coll.CanIterateElements() { - return coerce_value_cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") - } - l := coll.LengthInt() - - if l == 0 { - attrs[typeName] = coerce_value_cty.ListValEmpty(blockS.ImpliedType()) - continue - } - elems := make([]coerce_value_cty.Value, 0, l) - { - path = append(path, coerce_value_cty.GetAttrStep{Name: typeName}) - for it := coll.ElementIterator(); it.Next(); { - var err error - idx, val := it.Element() - val, err = blockS.coerceValue(val, append(path, coerce_value_cty.IndexStep{Key: idx})) - if err != nil { - return coerce_value_cty.UnknownVal(b.ImpliedType()), err - } - elems = append(elems, val) - } - } - attrs[typeName] = coerce_value_cty.ListVal(elems) - default: - attrs[typeName] = coerce_value_cty.ListValEmpty(blockS.ImpliedType()) - } - - case NestingSet: - switch { - case ty.HasAttribute(typeName): - coll := in.GetAttr(typeName) - - switch { - case coll.IsNull(): - attrs[typeName] = coerce_value_cty.NullVal(coerce_value_cty.Set(blockS.ImpliedType())) - continue - case !coll.IsKnown(): - attrs[typeName] = coerce_value_cty.UnknownVal(coerce_value_cty.Set(blockS.ImpliedType())) - continue - } - - if !coll.CanIterateElements() { - return coerce_value_cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") - } - l := coll.LengthInt() - - if l == 0 { - attrs[typeName] = coerce_value_cty.SetValEmpty(blockS.ImpliedType()) - continue - } - elems := make([]coerce_value_cty.Value, 0, l) - { - path = append(path, coerce_value_cty.GetAttrStep{Name: typeName}) - for it := coll.ElementIterator(); it.Next(); { - var err error - idx, val := it.Element() - val, err = blockS.coerceValue(val, append(path, coerce_value_cty.IndexStep{Key: idx})) - if err != nil { - return coerce_value_cty.UnknownVal(b.ImpliedType()), err - } - elems = append(elems, val) - } - } - attrs[typeName] = coerce_value_cty.SetVal(elems) - default: - attrs[typeName] = coerce_value_cty.SetValEmpty(blockS.ImpliedType()) - } - - case NestingMap: - switch { - case ty.HasAttribute(typeName): - coll := in.GetAttr(typeName) - - switch { - case coll.IsNull(): - attrs[typeName] = coerce_value_cty.NullVal(coerce_value_cty.Map(blockS.ImpliedType())) - continue - case !coll.IsKnown(): - attrs[typeName] = coerce_value_cty.UnknownVal(coerce_value_cty.Map(blockS.ImpliedType())) - continue - } - - if !coll.CanIterateElements() { - return coerce_value_cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") - } - l := coll.LengthInt() - if l == 0 { - attrs[typeName] = coerce_value_cty.MapValEmpty(blockS.ImpliedType()) - continue - } - elems := make(map[string]coerce_value_cty.Value) - { - path = append(path, coerce_value_cty.GetAttrStep{Name: typeName}) - for it := coll.ElementIterator(); it.Next(); { - var err error - key, val := it.Element() - if key.Type() != coerce_value_cty.String || key.IsNull() || !key.IsKnown() { - return coerce_value_cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") - } - val, err = blockS.coerceValue(val, append(path, coerce_value_cty.IndexStep{Key: key})) - if err != nil { - return coerce_value_cty.UnknownVal(b.ImpliedType()), err - } - elems[key.AsString()] = val - } - } - - useObject := false - switch { - case coll.Type().IsObjectType(): - useObject = true - default: - - ety := coll.Type().ElementType() - for _, v := range elems { - if !v.Type().Equals(ety) { - useObject = true - break - } - } - } - - if useObject { - attrs[typeName] = coerce_value_cty.ObjectVal(elems) - } else { - attrs[typeName] = coerce_value_cty.MapVal(elems) - } - default: - attrs[typeName] = coerce_value_cty.MapValEmpty(blockS.ImpliedType()) - } - - default: - - panic(coerce_value_fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) - } - } - - return coerce_value_cty.ObjectVal(attrs), nil -} - -func (a *Attribute) coerceValue(in coerce_value_cty.Value, path coerce_value_cty.Path) (coerce_value_cty.Value, error) { - val, err := coerce_value_convert.Convert(in, a.Type) - if err != nil { - return coerce_value_cty.UnknownVal(a.Type), path.NewError(err) - } - return val, nil -} - -func (b *Block) EmptyValue() empty_value_cty.Value { - vals := make(map[string]empty_value_cty.Value) - for name, attrS := range b.Attributes { - vals[name] = attrS.EmptyValue() - } - for name, blockS := range b.BlockTypes { - vals[name] = blockS.EmptyValue() - } - return empty_value_cty.ObjectVal(vals) -} - -func (a *Attribute) EmptyValue() empty_value_cty.Value { - return empty_value_cty.NullVal(a.Type) -} - -func (b *NestedBlock) EmptyValue() empty_value_cty.Value { - switch b.Nesting { - case NestingSingle: - return empty_value_cty.NullVal(b.Block.ImpliedType()) - case NestingGroup: - return b.Block.EmptyValue() - case NestingList: - if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { - return empty_value_cty.EmptyTupleVal - } else { - return empty_value_cty.ListValEmpty(ty) - } - case NestingMap: - if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { - return empty_value_cty.EmptyObjectVal - } else { - return empty_value_cty.MapValEmpty(ty) - } - case NestingSet: - return empty_value_cty.SetValEmpty(b.Block.ImpliedType()) - default: - - return empty_value_cty.NullVal(empty_value_cty.DynamicPseudoType) - } -} - -func (b *Block) ImpliedType() implied_type_cty.Type { - if b == nil { - return implied_type_cty.EmptyObject - } - - atys := make(map[string]implied_type_cty.Type) - - for name, attrS := range b.Attributes { - atys[name] = attrS.Type - } - - for name, blockS := range b.BlockTypes { - if _, exists := atys[name]; exists { - panic("invalid schema, blocks and attributes cannot have the same name") - } - - childType := blockS.Block.ImpliedType() - - switch blockS.Nesting { - case NestingSingle, NestingGroup: - atys[name] = childType - case NestingList: - - if childType.HasDynamicTypes() { - atys[name] = implied_type_cty.DynamicPseudoType - } else { - atys[name] = implied_type_cty.List(childType) - } - case NestingSet: - if childType.HasDynamicTypes() { - panic("can't use cty.DynamicPseudoType inside a block type with NestingSet") - } - atys[name] = implied_type_cty.Set(childType) - case NestingMap: - - if childType.HasDynamicTypes() { - atys[name] = implied_type_cty.DynamicPseudoType - } else { - atys[name] = implied_type_cty.Map(childType) - } - default: - panic("invalid nesting type") - } - } - - return implied_type_cty.Object(atys) -} - -func _() { - - var x [1]struct{} - _ = x[nestingModeInvalid-0] - _ = x[NestingSingle-1] - _ = x[NestingGroup-2] - _ = x[NestingList-3] - _ = x[NestingSet-4] - _ = x[NestingMap-5] -} - -const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" - -var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} - -func (i NestingMode) String() string { - if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { - return "NestingMode(" + nestingmode_string_strconv.FormatInt(int64(i), 10) + ")" - } - return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] -} - -type StringKind int - -const ( - StringPlain StringKind = iota - - StringMarkdown -) - -type Block struct { - Attributes map[string]*Attribute - - BlockTypes map[string]*NestedBlock - - Description string - DescriptionKind StringKind - - Deprecated bool -} - -type Attribute struct { - Type schema_cty.Type - - Description string - DescriptionKind StringKind - - Required bool - - Optional bool - - Computed bool - - Sensitive bool - - Deprecated bool -} - -type NestedBlock struct { - Block - - Nesting NestingMode - - MinItems, MaxItems int -} - -type NestingMode int - -const ( - nestingModeInvalid NestingMode = iota - - NestingSingle - - NestingGroup - - NestingList - - NestingSet - - NestingMap -) diff --git a/contrib/terraform-provider-kubeproxy/generated/configschema/generate.go b/contrib/terraform-provider-kubeproxy/generated/configschema/generate.go deleted file mode 100644 index 1a1f0cd724..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/configschema/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package configschema - -// Note: we can't actually exclude this module from codeanalysis since we import it -//go:generate go run github.com/synapsecns/sanguine/tools/bundle -prefix "" -pkg configschema -o configschema_gen.go github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema diff --git a/contrib/terraform-provider-kubeproxy/generated/convert/convert_gen.go b/contrib/terraform-provider-kubeproxy/generated/convert/convert_gen.go deleted file mode 100644 index bcb3ca4326..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/convert/convert_gen.go +++ /dev/null @@ -1,444 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -// nolint -package convert - -import ( - diagnostics_context "context" - schema_context "context" - schema_fmt "fmt" - schema_reflect "reflect" - schema_sort "sort" - - diagnostics_cty "github.com/hashicorp/go-cty/cty" - schema_cty "github.com/hashicorp/go-cty/cty" - diagnostics_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - schema_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - diagnostics_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - schema_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - diagnostics_diag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - schema_configschema "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/configschema" - diagnostics_logging "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/logging" - schema_logging "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/logging" -) - -func AppendProtoDiag(ctx diagnostics_context.Context, diags []*diagnostics_tfprotov5.Diagnostic, d interface{}) []*diagnostics_tfprotov5.Diagnostic { - switch d := d.(type) { - case diagnostics_cty.PathError: - ap := PathToAttributePath(d.Path) - diagnostic := &diagnostics_tfprotov5.Diagnostic{ - Severity: diagnostics_tfprotov5.DiagnosticSeverityError, - Summary: d.Error(), - Attribute: ap, - } - - if diagnostic.Summary == "" { - diagnostics_logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for cty.PathError type") - diagnostic.Summary = "Empty Error String" - diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." - } - - diags = append(diags, diagnostic) - case diagnostics_diag.Diagnostics: - diags = append(diags, DiagsToProto(d)...) - case error: - if d == nil { - diagnostics_logging.HelperSchemaDebug(ctx, "skipping diagnostic for nil error in AppendProtoDiag") - return diags - } - - diagnostic := &diagnostics_tfprotov5.Diagnostic{ - Severity: diagnostics_tfprotov5.DiagnosticSeverityError, - Summary: d.Error(), - } - - if diagnostic.Summary == "" { - diagnostics_logging.HelperSchemaWarn(ctx, "detected empty error string for diagnostic in AppendProtoDiag for error type") - diagnostic.Summary = "Error Missing Message" - diagnostic.Detail = "This is always a bug in the provider and should be reported to the provider developers." - } - - diags = append(diags, diagnostic) - case string: - if d == "" { - diagnostics_logging.HelperSchemaDebug(ctx, "skipping diagnostic for empty string in AppendProtoDiag") - return diags - } - - diags = append(diags, &diagnostics_tfprotov5.Diagnostic{ - Severity: diagnostics_tfprotov5.DiagnosticSeverityWarning, - Summary: d, - }) - case *diagnostics_tfprotov5.Diagnostic: - diags = append(diags, d) - case []*diagnostics_tfprotov5.Diagnostic: - diags = append(diags, d...) - } - return diags -} - -func ProtoToDiags(ds []*diagnostics_tfprotov5.Diagnostic) diagnostics_diag.Diagnostics { - var diags diagnostics_diag.Diagnostics - for _, d := range ds { - var severity diagnostics_diag.Severity - - switch d.Severity { - case diagnostics_tfprotov5.DiagnosticSeverityError: - severity = diagnostics_diag.Error - case diagnostics_tfprotov5.DiagnosticSeverityWarning: - severity = diagnostics_diag.Warning - } - - diags = append(diags, diagnostics_diag.Diagnostic{ - Severity: severity, - Summary: d.Summary, - Detail: d.Detail, - AttributePath: AttributePathToPath(d.Attribute), - }) - } - - return diags -} - -func DiagsToProto(diags diagnostics_diag.Diagnostics) []*diagnostics_tfprotov5.Diagnostic { - var ds []*diagnostics_tfprotov5.Diagnostic - for _, d := range diags { - protoDiag := &diagnostics_tfprotov5.Diagnostic{ - Severity: diagnostics_tfprotov5.DiagnosticSeverityError, - Summary: d.Summary, - Detail: d.Detail, - Attribute: PathToAttributePath(d.AttributePath), - } - if d.Severity == diagnostics_diag.Warning { - protoDiag.Severity = diagnostics_tfprotov5.DiagnosticSeverityWarning - } - if d.Summary == "" { - protoDiag.Summary = "Empty Summary: This is always a bug in the provider and should be reported to the provider developers." - } - ds = append(ds, protoDiag) - } - return ds -} - -func AttributePathToPath(ap *diagnostics_tftypes.AttributePath) diagnostics_cty.Path { - var p diagnostics_cty.Path - if ap == nil { - return p - } - for _, step := range ap.Steps() { - switch step := step.(type) { - case diagnostics_tftypes.AttributeName: - p = p.GetAttr(string(step)) - case diagnostics_tftypes.ElementKeyString: - p = p.Index(diagnostics_cty.StringVal(string(step))) - case diagnostics_tftypes.ElementKeyInt: - p = p.Index(diagnostics_cty.NumberIntVal(int64(step))) - } - } - return p -} - -func PathToAttributePath(p diagnostics_cty.Path) *diagnostics_tftypes.AttributePath { - if p == nil || len(p) < 1 { - return nil - } - ap := diagnostics_tftypes.NewAttributePath() - for _, step := range p { - switch selector := step.(type) { - case diagnostics_cty.GetAttrStep: - ap = ap.WithAttributeName(selector.Name) - - case diagnostics_cty.IndexStep: - key := selector.Key - switch key.Type() { - case diagnostics_cty.String: - ap = ap.WithElementKeyString(key.AsString()) - case diagnostics_cty.Number: - v, _ := key.AsBigFloat().Int64() - ap = ap.WithElementKeyInt(int(v)) - default: - - return ap - } - } - } - return ap -} - -func tftypeFromCtyType(in schema_cty.Type) (schema_tftypes.Type, error) { - switch { - case in.Equals(schema_cty.String): - return schema_tftypes.String, nil - case in.Equals(schema_cty.Number): - return schema_tftypes.Number, nil - case in.Equals(schema_cty.Bool): - return schema_tftypes.Bool, nil - case in.Equals(schema_cty.DynamicPseudoType): - return schema_tftypes.DynamicPseudoType, nil - case in.IsSetType(): - elemType, err := tftypeFromCtyType(in.ElementType()) - if err != nil { - return nil, err - } - return schema_tftypes.Set{ - ElementType: elemType, - }, nil - case in.IsListType(): - elemType, err := tftypeFromCtyType(in.ElementType()) - if err != nil { - return nil, err - } - return schema_tftypes.List{ - ElementType: elemType, - }, nil - case in.IsTupleType(): - elemTypes := make([]schema_tftypes.Type, 0, in.Length()) - for _, typ := range in.TupleElementTypes() { - elemType, err := tftypeFromCtyType(typ) - if err != nil { - return nil, err - } - elemTypes = append(elemTypes, elemType) - } - return schema_tftypes.Tuple{ - ElementTypes: elemTypes, - }, nil - case in.IsMapType(): - elemType, err := tftypeFromCtyType(in.ElementType()) - if err != nil { - return nil, err - } - return schema_tftypes.Map{ - ElementType: elemType, - }, nil - case in.IsObjectType(): - attrTypes := make(map[string]schema_tftypes.Type) - for key, typ := range in.AttributeTypes() { - attrType, err := tftypeFromCtyType(typ) - if err != nil { - return nil, err - } - attrTypes[key] = attrType - } - return schema_tftypes.Object{ - AttributeTypes: attrTypes, - }, nil - } - return nil, schema_fmt.Errorf("unknown cty type %s", in.GoString()) -} - -func ctyTypeFromTFType(in schema_tftypes.Type) (schema_cty.Type, error) { - switch { - case in.Is(schema_tftypes.String): - return schema_cty.String, nil - case in.Is(schema_tftypes.Bool): - return schema_cty.Bool, nil - case in.Is(schema_tftypes.Number): - return schema_cty.Number, nil - case in.Is(schema_tftypes.DynamicPseudoType): - return schema_cty.DynamicPseudoType, nil - case in.Is(schema_tftypes.List{}): - elemType, err := ctyTypeFromTFType(in.(schema_tftypes.List).ElementType) - if err != nil { - return schema_cty.Type{}, err - } - return schema_cty.List(elemType), nil - case in.Is(schema_tftypes.Set{}): - elemType, err := ctyTypeFromTFType(in.(schema_tftypes.Set).ElementType) - if err != nil { - return schema_cty.Type{}, err - } - return schema_cty.Set(elemType), nil - case in.Is(schema_tftypes.Map{}): - elemType, err := ctyTypeFromTFType(in.(schema_tftypes.Map).ElementType) - if err != nil { - return schema_cty.Type{}, err - } - return schema_cty.Map(elemType), nil - case in.Is(schema_tftypes.Tuple{}): - elemTypes := make([]schema_cty.Type, 0, len(in.(schema_tftypes.Tuple).ElementTypes)) - for _, typ := range in.(schema_tftypes.Tuple).ElementTypes { - elemType, err := ctyTypeFromTFType(typ) - if err != nil { - return schema_cty.Type{}, err - } - elemTypes = append(elemTypes, elemType) - } - return schema_cty.Tuple(elemTypes), nil - case in.Is(schema_tftypes.Object{}): - attrTypes := make(map[string]schema_cty.Type, len(in.(schema_tftypes.Object).AttributeTypes)) - for k, v := range in.(schema_tftypes.Object).AttributeTypes { - attrType, err := ctyTypeFromTFType(v) - if err != nil { - return schema_cty.Type{}, err - } - attrTypes[k] = attrType - } - return schema_cty.Object(attrTypes), nil - } - return schema_cty.Type{}, schema_fmt.Errorf("unknown tftypes.Type %s", in) -} - -func ConfigSchemaToProto(ctx schema_context.Context, b *schema_configschema.Block) *schema_tfprotov5.SchemaBlock { - block := &schema_tfprotov5.SchemaBlock{ - Description: b.Description, - DescriptionKind: protoStringKind(ctx, b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, name := range sortedKeys(b.Attributes) { - a := b.Attributes[name] - - attr := &schema_tfprotov5.SchemaAttribute{ - Name: name, - Description: a.Description, - DescriptionKind: protoStringKind(ctx, a.DescriptionKind), - Optional: a.Optional, - Computed: a.Computed, - Required: a.Required, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - var err error - attr.Type, err = tftypeFromCtyType(a.Type) - if err != nil { - panic(err) - } - - block.Attributes = append(block.Attributes, attr) - } - - for _, name := range sortedKeys(b.BlockTypes) { - b := b.BlockTypes[name] - block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(ctx, name, b)) - } - - return block -} - -func protoStringKind(ctx schema_context.Context, k schema_configschema.StringKind) schema_tfprotov5.StringKind { - switch k { - default: - schema_logging.HelperSchemaTrace(ctx, schema_fmt.Sprintf("Unexpected configschema.StringKind: %d", k)) - return schema_tfprotov5.StringKindPlain - case schema_configschema.StringPlain: - return schema_tfprotov5.StringKindPlain - case schema_configschema.StringMarkdown: - return schema_tfprotov5.StringKindMarkdown - } -} - -func protoSchemaNestedBlock(ctx schema_context.Context, name string, b *schema_configschema.NestedBlock) *schema_tfprotov5.SchemaNestedBlock { - var nesting schema_tfprotov5.SchemaNestedBlockNestingMode - switch b.Nesting { - case schema_configschema.NestingSingle: - nesting = schema_tfprotov5.SchemaNestedBlockNestingModeSingle - case schema_configschema.NestingGroup: - nesting = schema_tfprotov5.SchemaNestedBlockNestingModeGroup - case schema_configschema.NestingList: - nesting = schema_tfprotov5.SchemaNestedBlockNestingModeList - case schema_configschema.NestingSet: - nesting = schema_tfprotov5.SchemaNestedBlockNestingModeSet - case schema_configschema.NestingMap: - nesting = schema_tfprotov5.SchemaNestedBlockNestingModeMap - default: - nesting = schema_tfprotov5.SchemaNestedBlockNestingModeInvalid - } - return &schema_tfprotov5.SchemaNestedBlock{ - TypeName: name, - Block: ConfigSchemaToProto(ctx, &b.Block), - Nesting: nesting, - MinItems: int64(b.MinItems), - MaxItems: int64(b.MaxItems), - } -} - -func ProtoToConfigSchema(ctx schema_context.Context, b *schema_tfprotov5.SchemaBlock) *schema_configschema.Block { - block := &schema_configschema.Block{ - Attributes: make(map[string]*schema_configschema.Attribute), - BlockTypes: make(map[string]*schema_configschema.NestedBlock), - - Description: b.Description, - DescriptionKind: schemaStringKind(ctx, b.DescriptionKind), - Deprecated: b.Deprecated, - } - - for _, a := range b.Attributes { - attr := &schema_configschema.Attribute{ - Description: a.Description, - DescriptionKind: schemaStringKind(ctx, a.DescriptionKind), - Required: a.Required, - Optional: a.Optional, - Computed: a.Computed, - Sensitive: a.Sensitive, - Deprecated: a.Deprecated, - } - - var err error - attr.Type, err = ctyTypeFromTFType(a.Type) - if err != nil { - panic(err) - } - - block.Attributes[a.Name] = attr - } - - for _, b := range b.BlockTypes { - block.BlockTypes[b.TypeName] = schemaNestedBlock(ctx, b) - } - - return block -} - -func schemaStringKind(ctx schema_context.Context, k schema_tfprotov5.StringKind) schema_configschema.StringKind { - switch k { - default: - schema_logging.HelperSchemaTrace(ctx, schema_fmt.Sprintf("Unexpected tfprotov5.StringKind: %d", k)) - return schema_configschema.StringPlain - case schema_tfprotov5.StringKindPlain: - return schema_configschema.StringPlain - case schema_tfprotov5.StringKindMarkdown: - return schema_configschema.StringMarkdown - } -} - -func schemaNestedBlock(ctx schema_context.Context, b *schema_tfprotov5.SchemaNestedBlock) *schema_configschema.NestedBlock { - var nesting schema_configschema.NestingMode - switch b.Nesting { - case schema_tfprotov5.SchemaNestedBlockNestingModeSingle: - nesting = schema_configschema.NestingSingle - case schema_tfprotov5.SchemaNestedBlockNestingModeGroup: - nesting = schema_configschema.NestingGroup - case schema_tfprotov5.SchemaNestedBlockNestingModeList: - nesting = schema_configschema.NestingList - case schema_tfprotov5.SchemaNestedBlockNestingModeMap: - nesting = schema_configschema.NestingMap - case schema_tfprotov5.SchemaNestedBlockNestingModeSet: - nesting = schema_configschema.NestingSet - default: - - } - - nb := &schema_configschema.NestedBlock{ - Nesting: nesting, - MinItems: int(b.MinItems), - MaxItems: int(b.MaxItems), - } - - nested := ProtoToConfigSchema(ctx, b.Block) - nb.Block = *nested - return nb -} - -func sortedKeys(m interface{}) []string { - v := schema_reflect.ValueOf(m) - keys := make([]string, v.Len()) - - mapKeys := v.MapKeys() - for i, k := range mapKeys { - keys[i] = k.Interface().(string) - } - - schema_sort.Strings(keys) - return keys -} diff --git a/contrib/terraform-provider-kubeproxy/generated/convert/generate.go b/contrib/terraform-provider-kubeproxy/generated/convert/generate.go deleted file mode 100644 index b508eccf1e..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/convert/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package convert - -// Note: we can't actually exclude this module from codeanalysis since we import it -//go:generate go run github.com/synapsecns/sanguine/tools/bundle -prefix "" -pkg convert -o convert_gen.go -import github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging=github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/logging -import github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema=github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/configschema github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert diff --git a/contrib/terraform-provider-kubeproxy/generated/logging/generate.go b/contrib/terraform-provider-kubeproxy/generated/logging/generate.go deleted file mode 100644 index 0f33ffc01c..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/logging/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package logging - -// Note: we can't actually exclude this module from codeanalysis since we import it -//go:generate go run github.com/synapsecns/sanguine/tools/bundle -prefix "" -pkg logging -o logging_gen.go github.com/hashicorp/terraform-plugin-sdk/v2/internal/logging diff --git a/contrib/terraform-provider-kubeproxy/generated/logging/logging_gen.go b/contrib/terraform-provider-kubeproxy/generated/logging/logging_gen.go deleted file mode 100644 index 05c5b504d3..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/logging/logging_gen.go +++ /dev/null @@ -1,146 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -// nolint -package logging - -import ( - context_context "context" - helper_resource_context "context" - helper_schema_context "context" - - context_tfsdklog "github.com/hashicorp/terraform-plugin-log/tfsdklog" - helper_resource_tfsdklog "github.com/hashicorp/terraform-plugin-log/tfsdklog" - helper_schema_tfsdklog "github.com/hashicorp/terraform-plugin-log/tfsdklog" - context_logginghelperlogging "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" - context_testingtesting "github.com/mitchellh/go-testing-interface" -) - -func InitContext(ctx context_context.Context) context_context.Context { - ctx = context_tfsdklog.NewSubsystem(ctx, SubsystemHelperSchema, - - context_tfsdklog.WithAdditionalLocationOffset(1), - context_tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperSchema), - - context_tfsdklog.WithRootFields(), - ) - - return ctx -} - -func InitTestContext(ctx context_context.Context, t context_testingtesting.T) context_context.Context { - context_logginghelperlogging.SetOutput(t) - - ctx = context_tfsdklog.RegisterTestSink(ctx, t) - ctx = context_tfsdklog.NewRootSDKLogger(ctx, context_tfsdklog.WithLevelFromEnv(EnvTfLogSdk)) - ctx = context_tfsdklog.NewSubsystem(ctx, SubsystemHelperResource, - - context_tfsdklog.WithAdditionalLocationOffset(1), - context_tfsdklog.WithLevelFromEnv(EnvTfLogSdkHelperResource), - ) - ctx = TestNameContext(ctx, t.Name()) - - return ctx -} - -func TestNameContext(ctx context_context.Context, testName string) context_context.Context { - ctx = context_tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestName, testName) - - return ctx -} - -func TestStepNumberContext(ctx context_context.Context, stepNumber int) context_context.Context { - ctx = context_tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestStepNumber, stepNumber) - - return ctx -} - -func TestTerraformPathContext(ctx context_context.Context, terraformPath string) context_context.Context { - ctx = context_tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestTerraformPath, terraformPath) - - return ctx -} - -func TestWorkingDirectoryContext(ctx context_context.Context, workingDirectory string) context_context.Context { - ctx = context_tfsdklog.SubsystemSetField(ctx, SubsystemHelperResource, KeyTestWorkingDirectory, workingDirectory) - - return ctx -} - -const ( - EnvTfLogSdk = "TF_LOG_SDK" - - EnvTfLogSdkHelperResource = "TF_LOG_SDK_HELPER_RESOURCE" - - EnvTfLogSdkHelperSchema = "TF_LOG_SDK_HELPER_SCHEMA" -) - -const ( - SubsystemHelperResource = "helper_resource" -) - -func HelperResourceTrace(ctx helper_resource_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_resource_tfsdklog.SubsystemTrace(ctx, SubsystemHelperResource, msg, additionalFields...) -} - -func HelperResourceDebug(ctx helper_resource_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_resource_tfsdklog.SubsystemDebug(ctx, SubsystemHelperResource, msg, additionalFields...) -} - -func HelperResourceWarn(ctx helper_resource_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_resource_tfsdklog.SubsystemWarn(ctx, SubsystemHelperResource, msg, additionalFields...) -} - -func HelperResourceError(ctx helper_resource_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_resource_tfsdklog.SubsystemError(ctx, SubsystemHelperResource, msg, additionalFields...) -} - -const ( - SubsystemHelperSchema = "helper_schema" -) - -func HelperSchemaDebug(ctx helper_schema_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_schema_tfsdklog.SubsystemDebug(ctx, SubsystemHelperSchema, msg, additionalFields...) -} - -func HelperSchemaError(ctx helper_schema_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_schema_tfsdklog.SubsystemError(ctx, SubsystemHelperSchema, msg, additionalFields...) -} - -func HelperSchemaTrace(ctx helper_schema_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_schema_tfsdklog.SubsystemTrace(ctx, SubsystemHelperSchema, msg, additionalFields...) -} - -func HelperSchemaWarn(ctx helper_schema_context.Context, msg string, additionalFields ...map[string]interface{}) { - helper_schema_tfsdklog.SubsystemWarn(ctx, SubsystemHelperSchema, msg, additionalFields...) -} - -const ( - KeyAttributePath = "tf_attribute_path" - - KeyDataSourceType = "tf_data_source_type" - - KeyError = "error" - - KeyProviderAddress = "tf_provider_addr" - - KeyResourceType = "tf_resource_type" - - KeyTestName = "test_name" - - KeyTestStepNumber = "test_step_number" - - KeyTestTerraformConfiguration = "test_terraform_configuration" - - KeyTestTerraformLogLevel = "test_terraform_log_level" - - KeyTestTerraformLogCoreLevel = "test_terraform_log_core_level" - - KeyTestTerraformLogProviderLevel = "test_terraform_log_provider_level" - - KeyTestTerraformLogPath = "test_terraform_log_path" - - KeyTestTerraformPath = "test_terraform_path" - - KeyTestTerraformPlan = "test_terraform_plan" - - KeyTestWorkingDirectory = "test_working_directory" -) diff --git a/contrib/terraform-provider-kubeproxy/generated/manifest/generate.go b/contrib/terraform-provider-kubeproxy/generated/manifest/generate.go deleted file mode 100644 index 0bcb5b9fb4..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/manifest/generate.go +++ /dev/null @@ -1,12 +0,0 @@ -package manifest - -import _ "golang.org/x/tools/benchmark/parse" - -// required by go:generate -import _ "golang.org/x/mod/semver" - -// required for copying the module -import _ "github.com/hashicorp/terraform-provider-kubernetes/manifest/provider" - -// Note: we can't actually exclude this module from codeanalysis since we import it -//go:generate go run github.com/synapsecns/sanguine/tools/bundle -prefix "" -pkg manifest -o manifest_gen.go github.com/hashicorp/terraform-provider-kubernetes/manifest/provider diff --git a/contrib/terraform-provider-kubeproxy/generated/manifest/helpers.go b/contrib/terraform-provider-kubeproxy/generated/manifest/helpers.go deleted file mode 100644 index fed2ed0275..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/manifest/helpers.go +++ /dev/null @@ -1,8 +0,0 @@ -package manifest - -import "github.com/hashicorp/go-hclog" - -// SetLogger sets the logger on the raw provider -func (s *RawProviderServer) SetLogger(logger hclog.Logger) { - s.logger = logger -} diff --git a/contrib/terraform-provider-kubeproxy/generated/manifest/manifest_gen.go b/contrib/terraform-provider-kubeproxy/generated/manifest/manifest_gen.go deleted file mode 100644 index 058d63dfb1..0000000000 --- a/contrib/terraform-provider-kubeproxy/generated/manifest/manifest_gen.go +++ /dev/null @@ -1,4072 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -// nolint -package manifest - -import ( - apply_context "context" - clients_context "context" - configure_context "context" - datasource_context "context" - getproviderschema_context "context" - import_context "context" - plan_context "context" - plugin_context "context" - read_context "context" - resource_context "context" - server_context "context" - upgrade_state_context "context" - validate_context "context" - waiter_context "context" - plugin_json "encoding/json" - resource_json "encoding/json" - configure_pem "encoding/pem" - apply_errors "errors" - configure_errors "errors" - resource_errors "errors" - apply_fmt "fmt" - clients_fmt "fmt" - configure_fmt "fmt" - datasource_fmt "fmt" - diagnostics_fmt "fmt" - import_fmt "fmt" - plan_fmt "fmt" - plugin_fmt "fmt" - provider_fmt "fmt" - read_fmt "fmt" - resource_fmt "fmt" - upgrade_state_fmt "fmt" - validate_fmt "fmt" - waiter_fmt "fmt" - waiter_big "math/big" - clients_http "net/http" - configure_url "net/url" - configure_os "os" - plugin_os "os" - configure_filepath "path/filepath" - waiter_regexp "regexp" - configure_strconv "strconv" - configure_strings "strings" - validate_strings "strings" - plugin_testing "testing" - apply_time "time" - clients_time "time" - plugin_time "time" - validate_time "time" - waiter_time "time" - - plugin_hclog "github.com/hashicorp/go-hclog" - server_hclog "github.com/hashicorp/go-hclog" - waiter_hclog "github.com/hashicorp/go-hclog" - plugin_plugin "github.com/hashicorp/go-plugin" - waiter_hclhcl "github.com/hashicorp/hcl/v2" - waiter_hclsyntax "github.com/hashicorp/hcl/v2/hclsyntax" - plugin_tfexec "github.com/hashicorp/terraform-exec/tfexec" - apply_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - clients_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - configure_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - datasource_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - diagnostics_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - getproviderschema_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - import_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - plan_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - plugin_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - provider_config_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - provider_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - read_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - server_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - upgrade_state_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - validate_tfprotov5 "github.com/hashicorp/terraform-plugin-go/tfprotov5" - plugin_tf5servertf5server "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" - apply_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - configure_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - datasource_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - import_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - plan_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - provider_config_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - provider_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - read_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - resource_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - upgrade_state_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - validate_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - waiter_tftypes "github.com/hashicorp/terraform-plugin-go/tftypes" - clients_logging "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" - plan_manifest "github.com/hashicorp/terraform-provider-kubernetes/manifest" - apply_morph "github.com/hashicorp/terraform-provider-kubernetes/manifest/morph" - datasource_morph "github.com/hashicorp/terraform-provider-kubernetes/manifest/morph" - import_morph "github.com/hashicorp/terraform-provider-kubernetes/manifest/morph" - plan_morph "github.com/hashicorp/terraform-provider-kubernetes/manifest/morph" - read_morph "github.com/hashicorp/terraform-provider-kubernetes/manifest/morph" - upgrade_state_morph "github.com/hashicorp/terraform-provider-kubernetes/manifest/morph" - clients_openapi "github.com/hashicorp/terraform-provider-kubernetes/manifest/openapi" - resource_openapi "github.com/hashicorp/terraform-provider-kubernetes/manifest/openapi" - server_openapi "github.com/hashicorp/terraform-provider-kubernetes/manifest/openapi" - apply_payload "github.com/hashicorp/terraform-provider-kubernetes/manifest/payload" - datasource_payload "github.com/hashicorp/terraform-provider-kubernetes/manifest/payload" - import_payload "github.com/hashicorp/terraform-provider-kubernetes/manifest/payload" - plan_payload "github.com/hashicorp/terraform-provider-kubernetes/manifest/payload" - read_payload "github.com/hashicorp/terraform-provider-kubernetes/manifest/payload" - waiter_payload "github.com/hashicorp/terraform-provider-kubernetes/manifest/payload" - import_util "github.com/hashicorp/terraform-provider-kubernetes/util" - configure_homedir "github.com/mitchellh/go-homedir" - waiter_cty "github.com/zclconf/go-cty/cty" - configure_semver "golang.org/x/mod/semver" - server_codes "google.golang.org/grpc/codes" - server_status "google.golang.org/grpc/status" - server_install "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install" - apply_errorsapierrors "k8s.io/apimachinery/pkg/api/errors" - clients_errorsapierrors "k8s.io/apimachinery/pkg/api/errors" - datasource_errorsapierrors "k8s.io/apimachinery/pkg/api/errors" - read_errorsapierrors "k8s.io/apimachinery/pkg/api/errors" - waiter_errors "k8s.io/apimachinery/pkg/api/errors" - clients_meta "k8s.io/apimachinery/pkg/api/meta" - datasource_meta "k8s.io/apimachinery/pkg/api/meta" - resource_meta "k8s.io/apimachinery/pkg/api/meta" - server_meta "k8s.io/apimachinery/pkg/api/meta" - apply_v1metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - datasource_v1metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - diagnostics_v1metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - import_v1metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - plan_v1metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - read_v1metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - resource_v1v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - waiter_v1v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apply_unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - datasource_unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - import_unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - plan_unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - read_unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - resource_unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - configure_runtime "k8s.io/apimachinery/pkg/runtime" - configure_schemaapimachineryschema "k8s.io/apimachinery/pkg/runtime/schema" - datasource_schema "k8s.io/apimachinery/pkg/runtime/schema" - resource_schema "k8s.io/apimachinery/pkg/runtime/schema" - configure_serializer "k8s.io/apimachinery/pkg/runtime/serializer" - apply_types "k8s.io/apimachinery/pkg/types" - plan_types "k8s.io/apimachinery/pkg/types" - clients_discovery "k8s.io/client-go/discovery" - server_discovery "k8s.io/client-go/discovery" - clients_memory "k8s.io/client-go/discovery/cached/memory" - apply_dynamic "k8s.io/client-go/dynamic" - clients_dynamic "k8s.io/client-go/dynamic" - plan_dynamic "k8s.io/client-go/dynamic" - server_dynamic "k8s.io/client-go/dynamic" - waiter_dynamic "k8s.io/client-go/dynamic" - configure_scheme "k8s.io/client-go/kubernetes/scheme" - server_scheme "k8s.io/client-go/kubernetes/scheme" - clients_rest "k8s.io/client-go/rest" - configure_rest "k8s.io/client-go/rest" - server_rest "k8s.io/client-go/rest" - clients_restmapper "k8s.io/client-go/restmapper" - configure_clientcmd "k8s.io/client-go/tools/clientcmd" - configure_apiclientcmdapi "k8s.io/client-go/tools/clientcmd/api" - waiter_polymorphichelpers "k8s.io/kubectl/pkg/polymorphichelpers" -) - -var defaultCreateTimeout = "10m" - -var defaultUpdateTimeout = "10m" - -var defaultDeleteTimeout = "10m" - -func (s *RawProviderServer) ApplyResourceChange(ctx apply_context.Context, req *apply_tfprotov5.ApplyResourceChangeRequest) (*apply_tfprotov5.ApplyResourceChangeResponse, error) { - resp := &apply_tfprotov5.ApplyResourceChangeResponse{} - - execDiag := s.canExecute() - if len(execDiag) > 0 { - resp.Diagnostics = append(resp.Diagnostics, execDiag...) - return resp, nil - } - - rt, err := GetResourceType(req.TypeName) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine planned resource type", - Detail: err.Error(), - }) - return resp, nil - } - - applyPlannedState, err := req.PlannedState.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal planned resource state", - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[ApplyResourceChange][PlannedState] %#v", applyPlannedState) - - applyPriorState, err := req.PriorState.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal prior resource state", - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[ApplyResourceChange]", "[PriorState]", dump(applyPriorState)) - - config, err := req.Config.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal manifest configuration", - Detail: err.Error(), - }) - return resp, nil - } - confVals := make(map[string]apply_tftypes.Value) - err = config.As(&confVals) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract attributes from resource configuration", - Detail: err.Error(), - }) - return resp, nil - } - - var plannedStateVal map[string]apply_tftypes.Value = make(map[string]apply_tftypes.Value) - err = applyPlannedState.As(&plannedStateVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract planned resource state values", - Detail: err.Error(), - }) - return resp, nil - } - - computedFields := make(map[string]*apply_tftypes.AttributePath) - var atp *apply_tftypes.AttributePath - cfVal, ok := plannedStateVal["computed_fields"] - if ok && !cfVal.IsNull() && cfVal.IsKnown() { - var cf []apply_tftypes.Value - cfVal.As(&cf) - for _, v := range cf { - var vs string - err := v.As(&vs) - if err != nil { - s.logger.Error("[computed_fields] cannot extract element from list") - continue - } - atp, err := FieldPathToTftypesPath(vs) - if err != nil { - s.logger.Error("[Configure]", "[computed_fields] cannot parse field path element", err) - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "[computed_fields] cannot parse filed path element: " + vs, - Detail: err.Error(), - }) - continue - } - computedFields[atp.String()] = atp - } - } else { - - atp = apply_tftypes.NewAttributePath().WithAttributeName("metadata").WithAttributeName("annotations") - computedFields[atp.String()] = atp - - atp = apply_tftypes.NewAttributePath().WithAttributeName("metadata").WithAttributeName("labels") - computedFields[atp.String()] = atp - } - - c, err := s.getDynamicClient() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to retrieve Kubernetes dynamic client during apply", - Detail: err.Error(), - }) - return resp, nil - } - m, err := s.getRestMapper() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to retrieve Kubernetes RESTMapper client during apply", - Detail: err.Error(), - }) - return resp, nil - } - var rs apply_dynamic.ResourceInterface - - switch { - case applyPriorState.IsNull() || (!applyPlannedState.IsNull() && !applyPriorState.IsNull()): - - obj, ok := plannedStateVal["object"] - if !ok { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to find object value in planned resource state", - }) - return resp, nil - } - - gvk, err := GVKFromTftypesObject(&obj, m) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine the type of the resource", - Detail: apply_fmt.Sprintf(`This can happen when the "apiVersion" or "kind" fields are not present in the manifest, or when the corresponding "kind" or "apiVersion" could not be found on the Kubernetes cluster.\nError: %s`, err), - }) - return resp, nil - } - - tsch, th, err := s.TFTypeFromOpenAPI(ctx, gvk, false) - if err != nil { - return resp, apply_fmt.Errorf("failed to determine resource type ID: %s", err) - } - - obj, err = apply_tftypes.Transform(obj, func(ap *apply_tftypes.AttributePath, v apply_tftypes.Value) (apply_tftypes.Value, error) { - _, isComputed := computedFields[ap.String()] - if !isComputed { - return v, nil - } - if v.IsKnown() { - return v, nil - } - ppMan, restPath, err := apply_tftypes.WalkAttributePath(plannedStateVal["manifest"], ap) - if err != nil { - if len(restPath.Steps()) > 0 { - - return v, nil - } - return v, ap.NewError(err) - } - nv, d := apply_morph.ValueToType(ppMan.(apply_tftypes.Value), v.Type(), apply_tftypes.NewAttributePath()) - if len(d) > 0 { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Manifest configuration is incompatible with resource schema", - Detail: "Detailed descriptions of errors will follow below.", - }) - resp.Diagnostics = append(resp.Diagnostics, d...) - return v, nil - } - return nv, nil - }) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to backfill computed values in proposed value", - Detail: err.Error(), - }) - return resp, nil - } - - nullObj := apply_morph.UnknownToNull(obj) - s.logger.Trace("[ApplyResourceChange][Apply]", "[UnknownToNull]", dump(nullObj)) - - minObj, err := apply_tftypes.Transform(nullObj, func(ap *apply_tftypes.AttributePath, v apply_tftypes.Value) (apply_tftypes.Value, error) { - if v.IsNull() { - return apply_tftypes.NewValue(v.Type(), nil), nil - } - switch { - case v.Type().Is(apply_tftypes.Object{}) || v.Type().Is(apply_tftypes.Map{}): - atts := make(map[string]apply_tftypes.Value) - err := v.As(&atts) - if err != nil { - return v, err - } - var isEmpty bool = true - for _, atv := range atts { - if !atv.IsNull() { - isEmpty = false - break - } - } - - _, restPath, err := apply_tftypes.WalkAttributePath(confVals["manifest"], ap) - if (err == nil && len(restPath.Steps()) == 0) || !isEmpty { - - return apply_tftypes.NewValue(v.Type(), atts), nil - } - return apply_tftypes.NewValue(v.Type(), nil), nil - case v.Type().Is(apply_tftypes.List{}) || v.Type().Is(apply_tftypes.Set{}) || v.Type().Is(apply_tftypes.Tuple{}): - atts := make([]apply_tftypes.Value, 0) - err := v.As(&atts) - if err != nil { - return v, err - } - return apply_tftypes.NewValue(v.Type(), atts), nil - default: - return v, nil - } - }) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Detail: err.Error(), - Summary: "Failed to sanitize empty block ahead of payload preparation", - }) - return resp, nil - } - - pu, err := apply_payload.FromTFValue(minObj, th, apply_tftypes.NewAttributePath()) - if err != nil { - return resp, err - } - s.logger.Trace("[ApplyResourceChange][Apply]", "[payload.FromTFValue]", dump(pu)) - - rqObj := mapRemoveNulls(pu.(map[string]interface{})) - - uo := apply_unstructured.Unstructured{} - uo.SetUnstructuredContent(rqObj) - rnamespace := uo.GetNamespace() - rname := uo.GetName() - rnn := apply_types.NamespacedName{Namespace: rnamespace, Name: rname}.String() - - gvr, err := GVRFromUnstructured(&uo, m) - if err != nil { - return resp, apply_fmt.Errorf("failed to determine resource GVR: %s", err) - } - - ns, err := IsResourceNamespaced(gvk, m) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Detail: err.Error(), - Summary: apply_fmt.Sprintf("Failed to discover scope of resource '%s'", rnn), - }) - return resp, nil - } - - if ns { - rs = c.Resource(gvr).Namespace(rnamespace) - } else { - rs = c.Resource(gvr) - } - - if applyPriorState.IsNull() { - _, err := rs.Get(ctx, rname, apply_v1metav1.GetOptions{}) - if err == nil { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Cannot create resource that already exists", - Detail: apply_fmt.Sprintf("resource %q already exists", rnn), - }) - return resp, nil - } else if !apply_errorsapierrors.IsNotFound(err) { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: apply_fmt.Sprintf("Failed to determine if resource %q exists", rnn), - Detail: err.Error(), - }) - return resp, nil - } - } - - jsonManifest, err := uo.MarshalJSON() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Detail: err.Error(), - Summary: apply_fmt.Sprintf("Failed to marshall resource '%s' to JSON", rnn), - }) - return resp, nil - } - - fieldManagerName, forceConflicts, err := s.getFieldManagerConfig(plannedStateVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Could not extract field_manager config", - Detail: err.Error(), - }) - return resp, nil - } - - timeouts := s.getTimeouts(plannedStateVal) - var timeout apply_time.Duration - if applyPriorState.IsNull() { - timeout, _ = apply_time.ParseDuration(timeouts["create"]) - } else { - timeout, _ = apply_time.ParseDuration(timeouts["update"]) - } - deadline := apply_time.Now().Add(timeout) - ctxDeadline, cancel := apply_context.WithDeadline(ctx, deadline) - defer cancel() - - s.logger.Trace("[ApplyResourceChange][API Payload]: %s", jsonManifest) - result, err := rs.Patch(ctxDeadline, rname, apply_types.ApplyPatchType, jsonManifest, - apply_v1metav1.PatchOptions{ - FieldManager: fieldManagerName, - Force: &forceConflicts, - }, - ) - if err != nil { - s.logger.Error("[ApplyResourceChange][Apply]", "API error", dump(err), "API response", dump(result)) - if apply_errorsapierrors.IsConflict(err) { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: apply_fmt.Sprintf(`There was a field manager conflict when trying to apply the manifest for %q`, rnn), - Detail: apply_fmt.Sprintf( - "The API returned the following conflict: %q\n\n"+ - "You can override this conflict by setting \"force_conflicts\" to true in the \"field_manager\" block.", - err.Error(), - ), - }, - ) - } else if status := apply_errorsapierrors.APIStatus(nil); apply_errors.As(err, &status) { - resp.Diagnostics = append(resp.Diagnostics, APIStatusErrorToDiagnostics(status.Status())...) - } else { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Detail: err.Error(), - Summary: apply_fmt.Sprintf(`PATCH for resource "%s" failed to apply`, rnn), - }) - } - return resp, nil - } - - newResObject, err := apply_payload.ToTFValue(RemoveServerSideFields(result.Object), tsch, th, apply_tftypes.NewAttributePath()) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Conversion from Unstructured to tftypes.Value failed", - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[ApplyResourceChange][Apply]", "[payload.ToTFValue]", dump(newResObject)) - - wt, _, err := s.TFTypeFromOpenAPI(ctx, gvk, true) - if err != nil { - return resp, apply_fmt.Errorf("failed to determine resource type ID: %s", err) - } - - var waitConfig apply_tftypes.Value - if w, ok := plannedStateVal["wait"]; ok && !w.IsNull() { - s.logger.Trace("[ApplyResourceChange][Wait] Using waiter config from `wait` block") - var waitBlocks []apply_tftypes.Value - w.As(&waitBlocks) - if len(waitBlocks) > 0 { - waitConfig = waitBlocks[0] - } - } - if wf, ok := plannedStateVal["wait_for"]; ok && !wf.IsNull() { - s.logger.Trace("[ApplyResourceChange][Wait] Using waiter config from deprecated `wait_for` attribute") - waitConfig = wf - } - if !waitConfig.IsNull() { - err = s.waitForCompletion(ctxDeadline, waitConfig, rs, rname, wt, th) - if err != nil { - if err == apply_context.DeadlineExceeded { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Operation timed out", - Detail: "Terraform timed out waiting on the operation to complete", - }) - } else { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Error waiting for operation to complete", - Detail: err.Error(), - }) - } - return resp, nil - } - } - - compObj, err := apply_morph.DeepUnknown(tsch, newResObject, apply_tftypes.NewAttributePath()) - if err != nil { - return resp, err - } - plannedStateVal["object"] = apply_morph.UnknownToNull(compObj) - - newStateVal := apply_tftypes.NewValue(applyPlannedState.Type(), plannedStateVal) - s.logger.Trace("[ApplyResourceChange][Apply]", "new state value", dump(newStateVal)) - - newResState, err := apply_tfprotov5.NewDynamicValue(newStateVal.Type(), newStateVal) - if err != nil { - return resp, err - } - resp.NewState = &newResState - case applyPlannedState.IsNull(): - - priorStateVal := make(map[string]apply_tftypes.Value) - err = applyPriorState.As(&priorStateVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract prior resource state values", - Detail: err.Error(), - }) - return resp, nil - } - pco, ok := priorStateVal["object"] - if !ok { - resp.Diagnostics = append(resp.Diagnostics, &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to find object value in prior resource state", - }) - return resp, nil - } - - pu, err := apply_payload.FromTFValue(pco, nil, apply_tftypes.NewAttributePath()) - if err != nil { - return resp, err - } - - uo := apply_unstructured.Unstructured{Object: pu.(map[string]interface{})} - gvr, err := GVRFromUnstructured(&uo, m) - if err != nil { - return resp, err - } - - gvk, err := GVKFromTftypesObject(&pco, m) - if err != nil { - return resp, apply_fmt.Errorf("failed to determine resource GVK: %s", err) - } - - ns, err := IsResourceNamespaced(gvk, m) - if err != nil { - return resp, err - } - rnamespace := uo.GetNamespace() - rname := uo.GetName() - if ns { - rs = c.Resource(gvr).Namespace(rnamespace) - } else { - rs = c.Resource(gvr) - } - - timeouts := s.getTimeouts(priorStateVal) - timeout, _ := apply_time.ParseDuration(timeouts["delete"]) - deadline := apply_time.Now().Add(timeout) - ctxDeadline, cancel := apply_context.WithDeadline(ctx, deadline) - defer cancel() - - err = rs.Delete(ctxDeadline, rname, apply_v1metav1.DeleteOptions{}) - if err != nil { - rn := apply_types.NamespacedName{Namespace: rnamespace, Name: rname}.String() - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: apply_fmt.Sprintf("Error deleting resource %s: %s", rn, err), - Detail: err.Error(), - }) - return resp, nil - } - - for { - if apply_time.Now().After(deadline) { - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: apply_fmt.Sprintf("Timed out when waiting for resource %q to be deleted", rname), - Detail: "Deletion timed out. This can happen when there is a finalizer on a resource. You may need to delete this resource manually with kubectl.", - }) - return resp, nil - } - _, err := rs.Get(ctxDeadline, rname, apply_v1metav1.GetOptions{}) - if err != nil { - if apply_errorsapierrors.IsNotFound(err) { - s.logger.Trace("[ApplyResourceChange][Delete]", "Resource is deleted") - break - } - resp.Diagnostics = append(resp.Diagnostics, - &apply_tfprotov5.Diagnostic{ - Severity: apply_tfprotov5.DiagnosticSeverityError, - Summary: "Error waiting for deletion.", - Detail: apply_fmt.Sprintf("Error when waiting for resource %q to be deleted: %v", rname, err), - }) - return resp, nil - } - apply_time.Sleep(1 * apply_time.Second) - } - - resp.NewState = req.PlannedState - } - - return resp, nil -} - -func (s *RawProviderServer) getTimeouts(v map[string]apply_tftypes.Value) map[string]string { - timeouts := map[string]string{ - "create": defaultCreateTimeout, - "update": defaultUpdateTimeout, - "delete": defaultDeleteTimeout, - } - if !v["timeouts"].IsNull() && v["timeouts"].IsKnown() { - var timeoutsBlock []apply_tftypes.Value - v["timeouts"].As(&timeoutsBlock) - if len(timeoutsBlock) > 0 { - var t map[string]apply_tftypes.Value - timeoutsBlock[0].As(&t) - var s string - for _, k := range []string{"create", "update", "delete"} { - if vv, ok := t[k]; ok && !vv.IsNull() { - vv.As(&s) - if s != "" { - timeouts[k] = s - } - } - } - } - } - return timeouts -} - -const ( - OAPIFoundry string = "OPENAPIFOUNDRY" -) - -func (ps *RawProviderServer) getDynamicClient() (clients_dynamic.Interface, error) { - if ps.dynamicClient != nil { - return ps.dynamicClient, nil - } - if ps.clientConfig == nil { - return nil, clients_fmt.Errorf("cannot create dynamic client: no client config") - } - dynClient, err := clients_dynamic.NewForConfig(ps.clientConfig) - if err != nil { - return nil, err - } - ps.dynamicClient = dynClient - return dynClient, nil -} - -func (ps *RawProviderServer) getDiscoveryClient() (clients_discovery.DiscoveryInterface, error) { - if ps.discoveryClient != nil { - return ps.discoveryClient, nil - } - if ps.clientConfig == nil { - return nil, clients_fmt.Errorf("cannot create discovery client: no client config") - } - discoClient, err := clients_discovery.NewDiscoveryClientForConfig(ps.clientConfig) - if err != nil { - return nil, err - } - ps.discoveryClient = discoClient - return discoClient, nil -} - -func (ps *RawProviderServer) getRestMapper() (clients_meta.RESTMapper, error) { - if ps.restMapper != nil { - return ps.restMapper, nil - } - dc, err := ps.getDiscoveryClient() - if err != nil { - return nil, err - } - - cache := clients_memory.NewMemCacheClient(dc) - ps.restMapper = clients_restmapper.NewDeferredDiscoveryRESTMapper(cache) - return ps.restMapper, nil -} - -func (ps *RawProviderServer) getRestClient() (clients_rest.Interface, error) { - if ps.restClient != nil { - return ps.restClient, nil - } - if ps.clientConfig == nil { - return nil, clients_fmt.Errorf("cannot create REST client: no client config") - } - restClient, err := clients_rest.UnversionedRESTClientFor(ps.clientConfig) - if err != nil { - return nil, err - } - ps.restClient = restClient - return restClient, nil -} - -func (ps *RawProviderServer) getOAPIv2Foundry() (clients_openapi.Foundry, error) { - if ps.OAPIFoundry != nil { - return ps.OAPIFoundry, nil - } - - rc, err := ps.getRestClient() - if err != nil { - return nil, clients_fmt.Errorf("failed get OpenAPI spec: %s", err) - } - - rq := rc.Verb("GET").Timeout(30*clients_time.Second).AbsPath("openapi", "v2") - rs, err := rq.DoRaw(clients_context.TODO()) - if err != nil { - return nil, clients_fmt.Errorf("failed get OpenAPI spec: %s", err) - } - - oapif, err := clients_openapi.NewFoundryFromSpecV2(rs) - if err != nil { - return nil, clients_fmt.Errorf("failed construct OpenAPI foundry: %s", err) - } - - ps.OAPIFoundry = oapif - - return oapif, nil -} - -func loggingTransport(rt clients_http.RoundTripper) clients_http.RoundTripper { - return &loggingRountTripper{ - ot: rt, - lt: clients_logging.NewTransport("Kubernetes API", rt), - } -} - -type loggingRountTripper struct { - ot clients_http.RoundTripper - lt clients_http.RoundTripper -} - -func (t *loggingRountTripper) RoundTrip(req *clients_http.Request) (*clients_http.Response, error) { - if req.URL.Path == "/openapi/v2" { - - return t.ot.RoundTrip(req) - } - return t.lt.RoundTrip(req) -} - -func (ps *RawProviderServer) checkValidCredentials(ctx clients_context.Context) (diags []*clients_tfprotov5.Diagnostic) { - rc, err := ps.getRestClient() - if err != nil { - diags = append(diags, &clients_tfprotov5.Diagnostic{ - Severity: clients_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to construct REST client", - Detail: err.Error(), - }) - return - } - vpath := []string{"/apis"} - rs := rc.Get().AbsPath(vpath...).Do(ctx) - if rs.Error() != nil { - switch { - case clients_errorsapierrors.IsUnauthorized(rs.Error()): - diags = append(diags, &clients_tfprotov5.Diagnostic{ - Severity: clients_tfprotov5.DiagnosticSeverityError, - Summary: "Invalid credentials", - Detail: clients_fmt.Sprintf("The credentials configured in the provider block are not accepted by the API server. Error: %s\n\nSet TF_LOG=debug and look for '[InvalidClientConfiguration]' in the log to see actual configuration.", rs.Error().Error()), - }) - default: - diags = append(diags, &clients_tfprotov5.Diagnostic{ - Severity: clients_tfprotov5.DiagnosticSeverityError, - Summary: "Invalid configuration for API client", - Detail: rs.Error().Error(), - }) - } - ps.logger.Debug("[InvalidClientConfiguration]", "Config", dump(ps.clientConfig)) - } - return -} - -const minTFVersion string = "v0.14.8" - -func (s *RawProviderServer) ConfigureProvider(ctx configure_context.Context, req *configure_tfprotov5.ConfigureProviderRequest) (*configure_tfprotov5.ConfigureProviderResponse, error) { - response := &configure_tfprotov5.ConfigureProviderResponse{} - diags := []*configure_tfprotov5.Diagnostic{} - var providerConfig map[string]configure_tftypes.Value - var err error - - s.hostTFVersion = "v" + req.TerraformVersion - - cfgType := GetObjectTypeFromSchema(GetProviderConfigSchema()) - cfgVal, err := req.Config.Unmarshal(cfgType) - if err != nil { - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to decode ConfigureProvider request parameter", - Detail: err.Error(), - }) - return response, nil - } - err = cfgVal.As(&providerConfig) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'config_path' value", - Detail: err.Error(), - }) - return response, nil - } - - providerEnabled := true - if !providerConfig["experiments"].IsNull() && providerConfig["experiments"].IsKnown() { - var experimentsBlock []configure_tftypes.Value - err = providerConfig["experiments"].As(&experimentsBlock) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'experiments' value", - Detail: err.Error(), - }) - return response, nil - } - if len(experimentsBlock) > 0 { - var experimentsObj map[string]configure_tftypes.Value - err := experimentsBlock[0].As(&experimentsObj) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'experiments' value", - Detail: err.Error(), - }) - return response, nil - } - if !experimentsObj["manifest_resource"].IsNull() && experimentsObj["manifest_resource"].IsKnown() { - err = experimentsObj["manifest_resource"].As(&providerEnabled) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'manifest_resource' value", - Detail: err.Error(), - }) - return response, nil - } - } - } - } - if v := configure_os.Getenv("TF_X_KUBERNETES_MANIFEST_RESOURCE"); v != "" { - providerEnabled, err = configure_strconv.ParseBool(v) - if err != nil { - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to parse boolean from `TF_X_KUBERNETES_MANIFEST_RESOURCE` env var", - Detail: err.Error(), - }) - return response, nil - } - } - } - s.providerEnabled = providerEnabled - - if !providerEnabled { - - return response, nil - } - - overrides := &configure_clientcmd.ConfigOverrides{} - loader := &configure_clientcmd.ClientConfigLoadingRules{} - - var configPath string - if !providerConfig["config_path"].IsNull() && providerConfig["config_path"].IsKnown() { - err = providerConfig["config_path"].As(&configPath) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'config_path' value", - Detail: err.Error(), - }) - return response, nil - } - } - - if configPathEnv, ok := configure_os.LookupEnv("KUBE_CONFIG_PATH"); ok && configPathEnv != "" { - configPath = configPathEnv - } - if len(configPath) > 0 { - configPathAbs, err := configure_homedir.Expand(configPath) - if err == nil { - _, err = configure_os.Stat(configPathAbs) - } - if err != nil { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: configure_fmt.Sprintf("'config_path' refers to an invalid path: %q: %v", configPathAbs, err), - }) - } - loader.ExplicitPath = configPathAbs - } - - var precedence []string - if !providerConfig["config_paths"].IsNull() && providerConfig["config_paths"].IsKnown() { - var configPaths []configure_tftypes.Value - err = providerConfig["config_paths"].As(&configPaths) - if err != nil { - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'config_paths' value", - Detail: err.Error(), - }) - return response, nil - } - for _, p := range configPaths { - var pp string - p.As(&pp) - precedence = append(precedence, pp) - } - } - - if configPathsEnv, ok := configure_os.LookupEnv("KUBE_CONFIG_PATHS"); ok && configPathsEnv != "" { - precedence = configure_filepath.SplitList(configPathsEnv) - } - if len(precedence) > 0 { - for i, p := range precedence { - absPath, err := configure_homedir.Expand(p) - if err == nil { - _, err = configure_os.Stat(absPath) - } - if err != nil { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: configure_fmt.Sprintf("'config_paths' refers to an invalid path: %q: %v", absPath, err), - }) - } - precedence[i] = absPath - } - loader.Precedence = precedence - } - - var clientCertificate string - if !providerConfig["client_certificate"].IsNull() && providerConfig["client_certificate"].IsKnown() { - err = providerConfig["client_certificate"].As(&clientCertificate) - if err != nil { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: "'client_certificate' type cannot be asserted: " + err.Error(), - }) - return response, nil - } - } - if clientCrtEnv, ok := configure_os.LookupEnv("KUBE_CLIENT_CERT_DATA"); ok && clientCrtEnv != "" { - clientCertificate = clientCrtEnv - } - if len(clientCertificate) > 0 { - ccPEM, _ := configure_pem.Decode([]byte(clientCertificate)) - if ccPEM == nil || ccPEM.Type != "CERTIFICATE" { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: "'client_certificate' is not a valid PEM encoded certificate", - }) - } - overrides.AuthInfo.ClientCertificateData = []byte(clientCertificate) - } - - var clusterCaCertificate string - if !providerConfig["cluster_ca_certificate"].IsNull() && providerConfig["cluster_ca_certificate"].IsKnown() { - err = providerConfig["cluster_ca_certificate"].As(&clusterCaCertificate) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'cluster_ca_certificate' value", - Detail: err.Error(), - }) - return response, nil - } - } - if clusterCAEnv, ok := configure_os.LookupEnv("KUBE_CLUSTER_CA_CERT_DATA"); ok && clusterCAEnv != "" { - clusterCaCertificate = clusterCAEnv - } - if len(clusterCaCertificate) > 0 { - ca, _ := configure_pem.Decode([]byte(clusterCaCertificate)) - if ca == nil || ca.Type != "CERTIFICATE" { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: "'cluster_ca_certificate' is not a valid PEM encoded certificate", - }) - } - overrides.ClusterInfo.CertificateAuthorityData = []byte(clusterCaCertificate) - } - - var insecure bool - if !providerConfig["insecure"].IsNull() && providerConfig["insecure"].IsKnown() { - err = providerConfig["insecure"].As(&insecure) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'insecure' value", - Detail: err.Error(), - }) - return response, nil - } - } - if insecureEnv, ok := configure_os.LookupEnv("KUBE_INSECURE"); ok && insecureEnv != "" { - iv, err := configure_strconv.ParseBool(insecureEnv) - if err != nil { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid provider configuration", - Detail: "Environment variable KUBE_INSECURE contains invalid value: " + err.Error(), - }) - } else { - insecure = iv - } - } - overrides.ClusterInfo.InsecureSkipTLSVerify = insecure - - hasCA := len(overrides.ClusterInfo.CertificateAuthorityData) != 0 - hasCert := len(overrides.AuthInfo.ClientCertificateData) != 0 - defaultTLS := hasCA || hasCert || overrides.ClusterInfo.InsecureSkipTLSVerify - - var host string - if !providerConfig["host"].IsNull() && providerConfig["host"].IsKnown() { - err = providerConfig["host"].As(&host) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to extract 'host' value", - Detail: err.Error(), - }) - return response, nil - } - } - - if hostEnv, ok := configure_os.LookupEnv("KUBE_HOST"); ok && hostEnv != "" { - host = hostEnv - } - if len(host) > 0 { - _, err = configure_url.ParseRequestURI(host) - if err != nil { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: "'host' is not a valid URL", - }) - } - hostURL, _, err := configure_rest.DefaultServerURL(host, "", configure_schemaapimachineryschema.GroupVersion{}, defaultTLS) - if err != nil { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: "Invalid value for 'host': " + err.Error(), - }) - return response, nil - } - - overrides.ClusterInfo.Server = hostURL.String() - } - - var clientKey string - if !providerConfig["client_key"].IsNull() && providerConfig["client_key"].IsKnown() { - err = providerConfig["client_key"].As(&clientKey) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: ", - Detail: "Failed to extract 'client_key' value" + err.Error(), - }) - return response, nil - } - } - - if clientKeyEnv, ok := configure_os.LookupEnv("KUBE_CLIENT_KEY_DATA"); ok && clientKeyEnv != "" { - clientKey = clientKeyEnv - } - if len(clientKey) > 0 { - ck, _ := configure_pem.Decode([]byte(clientKey)) - if ck == nil || !configure_strings.Contains(ck.Type, "PRIVATE KEY") { - diags = append(diags, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityInvalid, - Summary: "Invalid attribute in provider configuration", - Detail: "'client_key' is not a valid PEM encoded private key", - }) - } - overrides.AuthInfo.ClientKeyData = []byte(clientKey) - } - - if len(diags) > 0 { - response.Diagnostics = diags - return response, nil - } - - var cfgContext string - if !providerConfig["config_context"].IsNull() && providerConfig["config_context"].IsKnown() { - err = providerConfig["config_context"].As(&cfgContext) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'config_context' value", - Detail: err.Error(), - }) - return response, nil - } - overrides.CurrentContext = cfgContext - } - if cfgContext, ok := configure_os.LookupEnv("KUBE_CTX"); ok && cfgContext != "" { - overrides.CurrentContext = cfgContext - } - - overrides.Context = configure_apiclientcmdapi.Context{} - - var cfgCtxCluster string - if !providerConfig["config_context_cluster"].IsNull() && providerConfig["config_context_cluster"].IsKnown() { - err = providerConfig["config_context_cluster"].As(&cfgCtxCluster) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'config_context_cluster' value", - Detail: err.Error(), - }) - return response, nil - } - overrides.Context.Cluster = cfgCtxCluster - } - if cfgCtxCluster, ok := configure_os.LookupEnv("KUBE_CTX_CLUSTER"); ok && cfgCtxCluster != "" { - overrides.Context.Cluster = cfgCtxCluster - } - - var cfgContextAuthInfo *string - if !providerConfig["config_context_user"].IsNull() && providerConfig["config_context_user"].IsKnown() { - err = providerConfig["config_context_user"].As(&cfgContextAuthInfo) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'config_context_user' value", - Detail: err.Error(), - }) - return response, nil - } - if cfgContextAuthInfo != nil { - overrides.Context.AuthInfo = *cfgContextAuthInfo - } - } - if cfgContextAuthInfoEnv, ok := configure_os.LookupEnv("KUBE_CTX_AUTH_INFO"); ok && cfgContextAuthInfoEnv != "" { - overrides.Context.AuthInfo = cfgContextAuthInfoEnv - } - - var username string - if !providerConfig["username"].IsNull() && providerConfig["username"].IsKnown() { - err = providerConfig["username"].As(&username) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'username' value", - Detail: err.Error(), - }) - return response, nil - } - overrides.AuthInfo.Username = username - } - if username, ok := configure_os.LookupEnv("KUBE_USERNAME"); ok && username != "" { - overrides.AuthInfo.Username = username - } - - var password string - if !providerConfig["password"].IsNull() && providerConfig["password"].IsKnown() { - err = providerConfig["password"].As(&password) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'password' value", - Detail: err.Error(), - }) - return response, nil - } - overrides.AuthInfo.Password = password - } - if password, ok := configure_os.LookupEnv("KUBE_PASSWORD"); ok && password != "" { - overrides.AuthInfo.Password = password - } - - var token string - if !providerConfig["token"].IsNull() && providerConfig["token"].IsKnown() { - err = providerConfig["token"].As(&token) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'token' value", - Detail: err.Error(), - }) - return response, nil - } - overrides.AuthInfo.Token = token - } - if token, ok := configure_os.LookupEnv("KUBE_TOKEN"); ok && token != "" { - overrides.AuthInfo.Token = token - } - - var proxyURL string - if !providerConfig["proxy_url"].IsNull() && providerConfig["proxy_url"].IsKnown() { - err = providerConfig["proxy_url"].As(&proxyURL) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'proxy_url' value", - Detail: err.Error(), - }) - return response, nil - } - overrides.ClusterDefaults.ProxyURL = proxyURL - } - if proxyUrl, ok := configure_os.LookupEnv("KUBE_PROXY_URL"); ok && proxyUrl != "" { - overrides.ClusterDefaults.ProxyURL = proxyURL - } - - if !providerConfig["exec"].IsNull() && providerConfig["exec"].IsKnown() { - var execBlock []configure_tftypes.Value - err = providerConfig["exec"].As(&execBlock) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'exec' value", - Detail: err.Error(), - }) - return response, nil - } - execCfg := configure_apiclientcmdapi.ExecConfig{} - execCfg.InteractiveMode = configure_apiclientcmdapi.IfAvailableExecInteractiveMode - if len(execBlock) > 0 { - var execObj map[string]configure_tftypes.Value - err := execBlock[0].As(&execObj) - if err != nil { - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: `Provider configuration: failed to assert type of "exec" block`, - Detail: err.Error(), - }) - return response, nil - } - if !execObj["api_version"].IsNull() && execObj["api_version"].IsKnown() { - var apiv string - err = execObj["api_version"].As(&apiv) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'api_version' value", - Detail: err.Error(), - }) - return response, nil - } - execCfg.APIVersion = apiv - } - if !execObj["command"].IsNull() && execObj["command"].IsKnown() { - var cmd string - err = execObj["command"].As(&cmd) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'command' value", - Detail: err.Error(), - }) - return response, nil - } - execCfg.Command = cmd - } - if !execObj["args"].IsNull() && execObj["args"].IsFullyKnown() { - var xcmdArgs []configure_tftypes.Value - err = execObj["args"].As(&xcmdArgs) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of 'args' value", - Detail: err.Error(), - }) - return response, nil - } - execCfg.Args = make([]string, 0, len(xcmdArgs)) - for _, arg := range xcmdArgs { - var v string - err := arg.As(&v) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of element in 'args' value", - Detail: err.Error(), - }) - return response, nil - } - execCfg.Args = append(execCfg.Args, v) - } - } - if !execObj["env"].IsNull() && execObj["env"].IsFullyKnown() { - var xcmdEnvs map[string]configure_tftypes.Value - err = execObj["env"].As(&xcmdEnvs) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of element in 'env' value", - Detail: err.Error(), - }) - return response, nil - } - execCfg.Env = make([]configure_apiclientcmdapi.ExecEnvVar, 0, len(xcmdEnvs)) - for k, v := range xcmdEnvs { - var vs string - err = v.As(&vs) - if err != nil { - - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: failed to assert type of element in 'env' value", - Detail: err.Error(), - }) - return response, nil - } - execCfg.Env = append(execCfg.Env, configure_apiclientcmdapi.ExecEnvVar{ - Name: k, - Value: vs, - }) - } - } - overrides.AuthInfo.Exec = &execCfg - } - } - - cc := configure_clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, overrides) - clientConfig, err := cc.ClientConfig() - if err != nil { - s.logger.Error("[Configure]", "Failed to load config:", dump(cc)) - if configure_errors.Is(err, configure_clientcmd.ErrEmptyConfig) { - - return response, nil - } - response.Diagnostics = append(response.Diagnostics, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Provider configuration: cannot load Kubernetes client config", - Detail: err.Error(), - }) - return response, nil - } - - if s.logger.IsTrace() { - clientConfig.WrapTransport = loggingTransport - } - - codec := configure_runtime.NoopEncoder{Decoder: configure_scheme.Codecs.UniversalDecoder()} - clientConfig.NegotiatedSerializer = configure_serializer.NegotiatedSerializerWrapper(configure_runtime.SerializerInfo{Serializer: codec}) - - s.logger.Trace("[Configure]", "[ClientConfig]", dump(*clientConfig)) - s.clientConfig = clientConfig - - return response, nil -} - -func (s *RawProviderServer) canExecute() (resp []*configure_tfprotov5.Diagnostic) { - if !s.providerEnabled { - resp = append(resp, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Experimental feature not enabled.", - Detail: "The `kubernetes_manifest` resource is an experimental feature and must be explicitly enabled in the provider configuration block.", - }) - } - if configure_semver.IsValid(s.hostTFVersion) && configure_semver.Compare(s.hostTFVersion, minTFVersion) < 0 { - resp = append(resp, &configure_tfprotov5.Diagnostic{ - Severity: configure_tfprotov5.DiagnosticSeverityError, - Summary: "Incompatible terraform version", - Detail: configure_fmt.Sprintf("The `kubernetes_manifest` resource requires Terraform %s or above", minTFVersion), - }) - } - return -} - -func (s *RawProviderServer) ReadDataSource(ctx datasource_context.Context, req *datasource_tfprotov5.ReadDataSourceRequest) (*datasource_tfprotov5.ReadDataSourceResponse, error) { - s.logger.Trace("[ReadDataSource][Request]\n%s\n", dump(*req)) - - resp := &datasource_tfprotov5.ReadDataSourceResponse{} - - execDiag := s.canExecute() - if len(execDiag) > 0 { - resp.Diagnostics = append(resp.Diagnostics, execDiag...) - return resp, nil - } - - rt, err := GetDataSourceType(req.TypeName) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine data source type", - Detail: err.Error(), - }) - return resp, nil - } - - config, err := req.Config.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal data source configuration", - Detail: err.Error(), - }) - return resp, nil - } - - var dsConfig map[string]datasource_tftypes.Value - err = config.As(&dsConfig) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract attributes from data source configuration", - Detail: err.Error(), - }) - return resp, nil - } - - rm, err := s.getRestMapper() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to get RESTMapper client", - Detail: err.Error(), - }) - return resp, nil - } - - client, err := s.getDynamicClient() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "failed to get Dynamic client", - Detail: err.Error(), - }) - return resp, nil - } - - var apiVersion, kind string - dsConfig["api_version"].As(&apiVersion) - dsConfig["kind"].As(&kind) - - gvr, err := getGVR(apiVersion, kind, rm) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine resource GroupVersion", - Detail: err.Error(), - }) - return resp, nil - } - - gvk := gvr.GroupVersion().WithKind(kind) - ns, err := IsResourceNamespaced(gvk, rm) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed determine if resource is namespaced", - Detail: err.Error(), - }) - return resp, nil - } - rcl := client.Resource(gvr) - - objectType, th, err := s.TFTypeFromOpenAPI(ctx, gvk, false) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to save resource state", - Detail: err.Error(), - }) - return resp, nil - } - - var metadataBlock []datasource_tftypes.Value - dsConfig["metadata"].As(&metadataBlock) - - var metadata map[string]datasource_tftypes.Value - metadataBlock[0].As(&metadata) - - var name string - metadata["name"].As(&name) - - var res *datasource_unstructured.Unstructured - if ns { - var namespace string - metadata["namespace"].As(&namespace) - if namespace == "" { - namespace = "default" - } - res, err = rcl.Namespace(namespace).Get(ctx, name, datasource_v1metav1.GetOptions{}) - } else { - res, err = rcl.Get(ctx, name, datasource_v1metav1.GetOptions{}) - } - if err != nil { - if datasource_errorsapierrors.IsNotFound(err) { - return resp, nil - } - d := datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: datasource_fmt.Sprintf("Failed to get data source"), - Detail: err.Error(), - } - resp.Diagnostics = append(resp.Diagnostics, &d) - return resp, nil - } - - fo := RemoveServerSideFields(res.Object) - nobj, err := datasource_payload.ToTFValue(fo, objectType, th, datasource_tftypes.NewAttributePath()) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to convert API response to Terraform value type", - Detail: err.Error(), - }) - return resp, nil - } - - nobj, err = datasource_morph.DeepUnknown(objectType, nobj, datasource_tftypes.NewAttributePath()) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to save resource state", - Detail: err.Error(), - }) - return resp, nil - } - rawState := make(map[string]datasource_tftypes.Value) - err = config.As(&rawState) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to save resource state", - Detail: err.Error(), - }) - return resp, nil - } - rawState["object"] = datasource_morph.UnknownToNull(nobj) - - v := datasource_tftypes.NewValue(rt, rawState) - state, err := datasource_tfprotov5.NewDynamicValue(v.Type(), v) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &datasource_tfprotov5.Diagnostic{ - Severity: datasource_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to save resource state", - Detail: err.Error(), - }) - return resp, nil - } - resp.State = &state - return resp, nil -} - -func getGVR(apiVersion, kind string, m datasource_meta.RESTMapper) (datasource_schema.GroupVersionResource, error) { - gv, err := datasource_schema.ParseGroupVersion(apiVersion) - if err != nil { - return datasource_schema.GroupVersionResource{}, err - } - mapping, err := m.RESTMapping(gv.WithKind(kind).GroupKind(), gv.Version) - if err != nil { - return datasource_schema.GroupVersionResource{}, err - } - return mapping.Resource, err -} - -func APIStatusErrorToDiagnostics(s diagnostics_v1metav1.Status) []*diagnostics_tfprotov5.Diagnostic { - var diags []*diagnostics_tfprotov5.Diagnostic - diags = append(diags, &diagnostics_tfprotov5.Diagnostic{ - Severity: diagnostics_tfprotov5.DiagnosticSeverityError, - Summary: "API response status: " + s.Status, - Detail: s.Message, - }) - if s.Details == nil { - return diags - } - gk := diagnostics_v1metav1.GroupKind{Group: s.Details.Group, Kind: s.Details.Kind} - diags = append(diags, &diagnostics_tfprotov5.Diagnostic{ - Severity: diagnostics_tfprotov5.DiagnosticSeverityError, - Summary: diagnostics_fmt.Sprintf("Kubernetes API Error: %s %s [%s]", string(s.Reason), gk.String(), s.Details.Name), - }) - for _, c := range s.Details.Causes { - diags = append(diags, &diagnostics_tfprotov5.Diagnostic{ - Severity: diagnostics_tfprotov5.DiagnosticSeverityError, - Detail: c.Message, - Summary: c.Field, - }) - } - return diags -} - -func (s *RawProviderServer) GetProviderSchema(ctx getproviderschema_context.Context, req *getproviderschema_tfprotov5.GetProviderSchemaRequest) (*getproviderschema_tfprotov5.GetProviderSchemaResponse, error) { - cfgSchema := GetProviderConfigSchema() - resSchema := GetProviderResourceSchema() - dsSchema := GetProviderDataSourceSchema() - - return &getproviderschema_tfprotov5.GetProviderSchemaResponse{ - Provider: cfgSchema, - ResourceSchemas: resSchema, - DataSourceSchemas: dsSchema, - }, nil -} - -func (s *RawProviderServer) ImportResourceState(ctx import_context.Context, req *import_tfprotov5.ImportResourceStateRequest) (*import_tfprotov5.ImportResourceStateResponse, error) { - - resp := &import_tfprotov5.ImportResourceStateResponse{} - - execDiag := s.canExecute() - if len(execDiag) > 0 { - resp.Diagnostics = append(resp.Diagnostics, execDiag...) - return resp, nil - } - - gvk, name, namespace, err := import_util.ParseResourceID(req.ID) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to parse import ID", - Detail: err.Error(), - }) - } - s.logger.Trace("[ImportResourceState]", "[ID]", gvk, name, namespace) - rt, err := GetResourceType(req.TypeName) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine resource type", - Detail: err.Error(), - }) - return resp, nil - } - rm, err := s.getRestMapper() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to get RESTMapper client", - Detail: err.Error(), - }) - return resp, nil - } - client, err := s.getDynamicClient() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "failed to get Dynamic client", - Detail: err.Error(), - }) - return resp, nil - } - ns, err := IsResourceNamespaced(gvk, rm) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to get namespacing requirement from RESTMapper", - Detail: err.Error(), - }) - return resp, nil - } - - io := import_unstructured.Unstructured{} - io.SetKind(gvk.Kind) - io.SetAPIVersion(gvk.GroupVersion().String()) - io.SetName(name) - io.SetNamespace(namespace) - - gvr, err := GVRFromUnstructured(&io, rm) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to get GVR from GVK via RESTMapper", - Detail: err.Error(), - }) - return resp, nil - } - rcl := client.Resource(gvr) - - var ro *import_unstructured.Unstructured - if ns { - ro, err = rcl.Namespace(namespace).Get(ctx, name, import_v1metav1.GetOptions{}) - } else { - ro, err = rcl.Get(ctx, name, import_v1metav1.GetOptions{}) - } - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: import_fmt.Sprintf("Failed to get resource %+v from API", io), - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[ImportResourceState]", "[API Resource]", ro) - - objectType, th, err := s.TFTypeFromOpenAPI(ctx, gvk, false) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: import_fmt.Sprintf("Failed to determine resource type from GVK: %s", gvk), - Detail: err.Error(), - }) - return resp, nil - } - - fo := RemoveServerSideFields(ro.UnstructuredContent()) - nobj, err := import_payload.ToTFValue(fo, objectType, th, import_tftypes.NewAttributePath()) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to convert unstructured to tftypes.Value", - Detail: err.Error(), - }) - return resp, nil - } - nobj, err = import_morph.DeepUnknown(objectType, nobj, import_tftypes.NewAttributePath()) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to backfill unknown values during import", - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[ImportResourceState]", "[tftypes.Value]", nobj) - - newState := make(map[string]import_tftypes.Value) - wftype := rt.(import_tftypes.Object).AttributeTypes["wait_for"] - wtype := rt.(import_tftypes.Object).AttributeTypes["wait"] - timeoutsType := rt.(import_tftypes.Object).AttributeTypes["timeouts"] - fmType := rt.(import_tftypes.Object).AttributeTypes["field_manager"] - cmpType := rt.(import_tftypes.Object).AttributeTypes["computed_fields"] - - newState["manifest"] = import_tftypes.NewValue(import_tftypes.Object{AttributeTypes: map[string]import_tftypes.Type{}}, nil) - newState["object"] = import_morph.UnknownToNull(nobj) - newState["wait_for"] = import_tftypes.NewValue(wftype, nil) - newState["wait"] = import_tftypes.NewValue(wtype, nil) - newState["timeouts"] = import_tftypes.NewValue(timeoutsType, nil) - newState["field_manager"] = import_tftypes.NewValue(fmType, nil) - newState["computed_fields"] = import_tftypes.NewValue(cmpType, nil) - - nsVal := import_tftypes.NewValue(rt, newState) - - impState, err := import_tfprotov5.NewDynamicValue(nsVal.Type(), nsVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to construct dynamic value for imported state", - Detail: err.Error(), - }) - return resp, nil - } - impf := import_tftypes.NewValue(privateStateSchema, - map[string]import_tftypes.Value{"IsImported": import_tftypes.NewValue(import_tftypes.Bool, true)}, - ) - fb, err := impf.MarshalMsgPack(privateStateSchema) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityWarning, - Summary: "Failed to earmark imported resource", - Detail: err.Error(), - }) - } - nr := &import_tfprotov5.ImportedResource{ - TypeName: req.TypeName, - State: &impState, - Private: fb, - } - resp.ImportedResources = append(resp.ImportedResources, nr) - resp.Diagnostics = append(resp.Diagnostics, &import_tfprotov5.Diagnostic{ - Severity: import_tfprotov5.DiagnosticSeverityWarning, - Summary: "Apply needed after 'import'", - Detail: "Please run apply after a successful import to realign the resource state to the configuration in Terraform.", - }) - return resp, nil -} - -func (s *RawProviderServer) dryRun(ctx plan_context.Context, obj plan_tftypes.Value, fieldManager string, forceConflicts bool, isNamespaced bool) error { - c, err := s.getDynamicClient() - if err != nil { - return plan_fmt.Errorf("failed to retrieve Kubernetes dynamic client during apply: %v", err) - } - m, err := s.getRestMapper() - if err != nil { - return plan_fmt.Errorf("failed to retrieve Kubernetes RESTMapper client during apply: %v", err) - } - - minObj := plan_morph.UnknownToNull(obj) - pu, err := plan_payload.FromTFValue(minObj, nil, plan_tftypes.NewAttributePath()) - if err != nil { - return err - } - - rqObj := mapRemoveNulls(pu.(map[string]interface{})) - uo := plan_unstructured.Unstructured{} - uo.SetUnstructuredContent(rqObj) - rnamespace := uo.GetNamespace() - rname := uo.GetName() - rnn := plan_types.NamespacedName{Namespace: rnamespace, Name: rname}.String() - - gvr, err := GVRFromUnstructured(&uo, m) - if err != nil { - return plan_fmt.Errorf("failed to determine resource GVR: %s", err) - } - - var rs plan_dynamic.ResourceInterface - if isNamespaced { - rs = c.Resource(gvr).Namespace(rnamespace) - } else { - rs = c.Resource(gvr) - } - - jsonManifest, err := uo.MarshalJSON() - if err != nil { - return plan_fmt.Errorf("failed to marshall resource %q to JSON: %v", rnn, err) - } - _, err = rs.Patch(ctx, rname, plan_types.ApplyPatchType, jsonManifest, - plan_v1metav1.PatchOptions{ - FieldManager: fieldManager, - Force: &forceConflicts, - DryRun: []string{"All"}, - }, - ) - - return err -} - -const defaultFieldManagerName = "Terraform" - -func (s *RawProviderServer) getFieldManagerConfig(v map[string]plan_tftypes.Value) (string, bool, error) { - fieldManagerName := defaultFieldManagerName - forceConflicts := false - if !v["field_manager"].IsNull() && v["field_manager"].IsKnown() { - var fieldManagerBlock []plan_tftypes.Value - err := v["field_manager"].As(&fieldManagerBlock) - if err != nil { - return "", false, err - } - if len(fieldManagerBlock) > 0 { - var fieldManagerObj map[string]plan_tftypes.Value - err := fieldManagerBlock[0].As(&fieldManagerObj) - if err != nil { - return "", false, err - } - if !fieldManagerObj["name"].IsNull() && fieldManagerObj["name"].IsKnown() { - err = fieldManagerObj["name"].As(&fieldManagerName) - if err != nil { - return "", false, err - } - } - if !fieldManagerObj["force_conflicts"].IsNull() && fieldManagerObj["force_conflicts"].IsKnown() { - err = fieldManagerObj["force_conflicts"].As(&forceConflicts) - if err != nil { - return "", false, err - } - } - } - } - return fieldManagerName, forceConflicts, nil -} - -func isImportedFlagFromPrivate(p []byte) (f bool, d []*plan_tfprotov5.Diagnostic) { - if p == nil || len(p) == 0 { - return - } - ps, err := getPrivateStateValue(p) - if err != nil { - d = append(d, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Unexpected format for private state", - Detail: err.Error(), - }) - } - err = ps["IsImported"].As(&f) - if err != nil { - d = append(d, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Unexpected format for import flag in private state", - Detail: err.Error(), - }) - } - return -} - -func (s *RawProviderServer) PlanResourceChange(ctx plan_context.Context, req *plan_tfprotov5.PlanResourceChangeRequest) (*plan_tfprotov5.PlanResourceChangeResponse, error) { - resp := &plan_tfprotov5.PlanResourceChangeResponse{} - - isImported, d := isImportedFlagFromPrivate(req.PriorPrivate) - resp.Diagnostics = append(resp.Diagnostics, d...) - if !isImported { - resp.RequiresReplace = append(resp.RequiresReplace, - plan_tftypes.NewAttributePath().WithAttributeName("manifest").WithAttributeName("apiVersion"), - plan_tftypes.NewAttributePath().WithAttributeName("manifest").WithAttributeName("kind"), - plan_tftypes.NewAttributePath().WithAttributeName("manifest").WithAttributeName("metadata").WithAttributeName("name"), - ) - } else { - resp.PlannedPrivate = req.PriorPrivate - } - - execDiag := s.canExecute() - if len(execDiag) > 0 { - resp.Diagnostics = append(resp.Diagnostics, execDiag...) - return resp, nil - } - - resp.Diagnostics = append(resp.Diagnostics, s.checkValidCredentials(ctx)...) - if len(resp.Diagnostics) > 0 { - return resp, nil - } - - rt, err := GetResourceType(req.TypeName) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine planned resource type", - Detail: err.Error(), - }) - return resp, nil - } - - proposedState, err := req.ProposedNewState.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal planned resource state", - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[PlanResourceChange]", "[ProposedState]", dump(proposedState)) - - proposedVal := make(map[string]plan_tftypes.Value) - err = proposedState.As(&proposedVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract planned resource state from tftypes.Value", - Detail: err.Error(), - }) - return resp, nil - } - - computedFields := make(map[string]*plan_tftypes.AttributePath) - var atp *plan_tftypes.AttributePath - cfVal, ok := proposedVal["computed_fields"] - if ok && !cfVal.IsNull() && cfVal.IsKnown() { - var cf []plan_tftypes.Value - cfVal.As(&cf) - for _, v := range cf { - var vs string - err := v.As(&vs) - if err != nil { - s.logger.Error("[computed_fields] cannot extract element from list") - continue - } - atp, err := FieldPathToTftypesPath(vs) - if err != nil { - s.logger.Error("[Configure]", "[computed_fields] cannot parse filed path element", err) - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "[computed_fields] cannot parse field path element: " + vs, - Detail: err.Error(), - }) - continue - } - computedFields[atp.String()] = atp - } - } else { - - atp = plan_tftypes.NewAttributePath().WithAttributeName("metadata").WithAttributeName("annotations") - computedFields[atp.String()] = atp - - atp = plan_tftypes.NewAttributePath().WithAttributeName("metadata").WithAttributeName("labels") - computedFields[atp.String()] = atp - } - - priorState, err := req.PriorState.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal prior resource state", - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[PlanResourceChange]", "[PriorState]", dump(priorState)) - - priorVal := make(map[string]plan_tftypes.Value) - err = priorState.As(&priorVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract prior resource state from tftypes.Value", - Detail: err.Error(), - }) - return resp, nil - } - - if proposedState.IsNull() { - - if _, ok := priorVal["object"]; ok { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Invalid prior state while planning for destroy", - Detail: plan_fmt.Sprintf("'object' attribute missing from state: %s", err), - }) - return resp, nil - } - resp.PlannedState = req.ProposedNewState - return resp, nil - } - - ppMan, ok := proposedVal["manifest"] - if !ok { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Invalid proposed state during planning", - Detail: "Missing 'manifest' attribute", - Attribute: plan_tftypes.NewAttributePath().WithAttributeName("manifest"), - }) - return resp, nil - } - - rm, err := s.getRestMapper() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to create K8s RESTMapper client", - Detail: err.Error(), - }) - return resp, nil - } - gvk, err := GVKFromTftypesObject(&ppMan, rm) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine GroupVersionResource for manifest", - Detail: err.Error(), - }) - return resp, nil - } - - vdiags := s.validateResourceOnline(&ppMan) - if len(vdiags) > 0 { - resp.Diagnostics = append(resp.Diagnostics, vdiags...) - return resp, nil - } - - ns, err := IsResourceNamespaced(gvk, rm) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to discover scope of resource", - Detail: err.Error(), - }) - return resp, nil - } - if ns && !isImported { - resp.RequiresReplace = append(resp.RequiresReplace, - plan_tftypes.NewAttributePath().WithAttributeName("manifest").WithAttributeName("metadata").WithAttributeName("namespace"), - ) - } - - objectType, hints, err := s.TFTypeFromOpenAPI(ctx, gvk, false) - if err != nil { - return resp, plan_fmt.Errorf("failed to determine resource type ID: %s", err) - } - - if !objectType.Is(plan_tftypes.Object{}) { - - objectType = ppMan.Type() - - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityWarning, - Summary: "This custom resource does not have an associated OpenAPI schema.", - Detail: "We could not find an OpenAPI schema for this custom resource. Updates to this resource will cause a forced replacement.", - }) - - fieldManagerName, forceConflicts, err := s.getFieldManagerConfig(proposedVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Could not extract field_manager config", - Detail: err.Error(), - }) - return resp, nil - } - - err = s.dryRun(ctx, ppMan, fieldManagerName, forceConflicts, ns) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Dry-run failed for non-structured resource", - Detail: plan_fmt.Sprintf("A dry-run apply was performed for this resource but was unsuccessful: %v", err), - }) - return resp, nil - } - - resp.RequiresReplace = []*plan_tftypes.AttributePath{ - plan_tftypes.NewAttributePath().WithAttributeName("manifest"), - plan_tftypes.NewAttributePath().WithAttributeName("object"), - } - } - - so := objectType.(plan_tftypes.Object) - s.logger.Debug("[PlanUpdateResource]", "OAPI type", dump(so)) - - morphedManifest, d := plan_morph.ValueToType(ppMan, objectType, plan_tftypes.NewAttributePath().WithAttributeName("object")) - if len(d) > 0 { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Manifest configuration incompatible with resource schema", - Detail: "Detailed descriptions of errors will follow below.", - }) - resp.Diagnostics = append(resp.Diagnostics, d...) - return resp, nil - } - s.logger.Debug("[PlanResourceChange]", "morphed manifest", dump(morphedManifest)) - - completePropMan, err := plan_morph.DeepUnknown(objectType, morphedManifest, plan_tftypes.NewAttributePath().WithAttributeName("object")) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to backfill manifest from OpenAPI type", - Detail: plan_fmt.Sprintf("This usually happens when the provider cannot fully process the schema retrieved from cluster. Please report this to the provider maintainers.\nError: %s", err.Error()), - Attribute: plan_tftypes.NewAttributePath().WithAttributeName("object"), - }) - return resp, nil - } - s.logger.Debug("[PlanResourceChange]", "backfilled manifest", dump(completePropMan)) - - if proposedVal["object"].IsNull() { - - s.logger.Debug("[PlanResourceChange]", "creating object", dump(completePropMan)) - newObj, err := plan_tftypes.Transform(completePropMan, func(ap *plan_tftypes.AttributePath, v plan_tftypes.Value) (plan_tftypes.Value, error) { - _, ok := computedFields[ap.String()] - if ok { - return plan_tftypes.NewValue(v.Type(), plan_tftypes.UnknownValue), nil - } - return v, nil - }) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to set computed attributes in new resource state", - Detail: err.Error(), - Attribute: plan_tftypes.NewAttributePath().WithAttributeName("object"), - }) - return resp, nil - } - proposedVal["object"] = newObj - } else { - - priorObj, ok := priorVal["object"] - if !ok { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Invalid prior state during planning", - Detail: "Missing 'object' attribute", - Attribute: plan_tftypes.NewAttributePath().WithAttributeName("object"), - }) - return resp, nil - } - priorMan, ok := priorVal["manifest"] - if !ok { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Invalid prior state during planning", - Detail: "Missing 'manifest' attribute", - Attribute: plan_tftypes.NewAttributePath().WithAttributeName("manifest"), - }) - return resp, nil - } - updatedObj, err := plan_tftypes.Transform(completePropMan, func(ap *plan_tftypes.AttributePath, v plan_tftypes.Value) (plan_tftypes.Value, error) { - _, isComputed := computedFields[ap.String()] - if v.IsKnown() { - hasChanged := false - wasCfg, restPath, err := plan_tftypes.WalkAttributePath(priorMan, ap) - if err != nil && len(restPath.Steps()) != 0 { - hasChanged = true - } - nowCfg, restPath, err := plan_tftypes.WalkAttributePath(ppMan, ap) - hasChanged = err == nil && len(restPath.Steps()) == 0 && wasCfg.(plan_tftypes.Value).IsKnown() && !wasCfg.(plan_tftypes.Value).Equal(nowCfg.(plan_tftypes.Value)) - if hasChanged { - h, ok := hints[plan_morph.ValueToTypePath(ap).String()] - if ok && h == plan_manifest.PreserveUnknownFieldsLabel { - apm := append(plan_tftypes.NewAttributePath().WithAttributeName("manifest").Steps(), ap.Steps()...) - resp.RequiresReplace = append(resp.RequiresReplace, plan_tftypes.NewAttributePathWithSteps(apm)) - } - } - if isComputed { - if hasChanged { - return plan_tftypes.NewValue(v.Type(), plan_tftypes.UnknownValue), nil - } - nowVal, restPath, err := plan_tftypes.WalkAttributePath(proposedVal["object"], ap) - if err == nil && len(restPath.Steps()) == 0 { - return nowVal.(plan_tftypes.Value), nil - } - } - return v, nil - } - - wasVal, restPath, err := plan_tftypes.WalkAttributePath(priorMan, ap) - if err == nil && len(restPath.Steps()) == 0 && wasVal.(plan_tftypes.Value).IsKnown() { - - return v, nil - } - - priorAtrVal, restPath, err := plan_tftypes.WalkAttributePath(priorObj, ap) - if err != nil { - if len(restPath.Steps()) > 0 { - - return v, nil - } - - return v, ap.NewError(err) - } - if len(restPath.Steps()) > 0 { - s.logger.Warn("[PlanResourceChange]", "Unexpected missing attribute from state at", ap.String(), " + ", restPath.String()) - } - return priorAtrVal.(plan_tftypes.Value), nil - }) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to update proposed state from prior state", - Detail: err.Error(), - Attribute: plan_tftypes.NewAttributePath().WithAttributeName("object"), - }) - return resp, nil - } - - proposedVal["object"] = updatedObj - } - - propStateVal := plan_tftypes.NewValue(proposedState.Type(), proposedVal) - s.logger.Trace("[PlanResourceChange]", "new planned state", dump(propStateVal)) - - plannedState, err := plan_tfprotov5.NewDynamicValue(propStateVal.Type(), propStateVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &plan_tfprotov5.Diagnostic{ - Severity: plan_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to assemble proposed state during plan", - Detail: err.Error(), - }) - return resp, nil - } - - resp.PlannedState = &plannedState - return resp, nil -} - -func getAttributeValue(v plan_tftypes.Value, path string) (plan_tftypes.Value, error) { - p, err := FieldPathToTftypesPath(path) - if err != nil { - return plan_tftypes.Value{}, err - } - vv, _, err := plan_tftypes.WalkAttributePath(v, p) - if err != nil { - return plan_tftypes.Value{}, err - } - return vv.(plan_tftypes.Value), nil -} - -var providerName = "registry.terraform.io/hashicorp/kubernetes" - -func Serve(ctx plugin_context.Context, logger plugin_hclog.Logger) error { - return plugin_tf5servertf5server.Serve(providerName, func() plugin_tfprotov5.ProviderServer { return &(RawProviderServer{logger: logger}) }) -} - -func Provider() func() plugin_tfprotov5.ProviderServer { - var logLevel string - var ok bool = false - for _, ev := range []string{"TF_LOG_PROVIDER_KUBERNETES", "TF_LOG_PROVIDER", "TF_LOG"} { - logLevel, ok = plugin_os.LookupEnv(ev) - if ok { - break - } - } - if !ok { - logLevel = "off" - } - - return func() plugin_tfprotov5.ProviderServer { - return &(RawProviderServer{logger: plugin_hclog.New(&plugin_hclog.LoggerOptions{ - Level: plugin_hclog.LevelFromString(logLevel), - Output: plugin_os.Stderr, - })}) - } -} - -func ServeTest(ctx plugin_context.Context, logger plugin_hclog.Logger, t *plugin_testing.T) (plugin_tfexec.ReattachInfo, error) { - reattachConfigCh := make(chan *plugin_plugin.ReattachConfig) - - go plugin_tf5servertf5server.Serve(providerName, - func() plugin_tfprotov5.ProviderServer { return &(RawProviderServer{logger: logger}) }, - plugin_tf5servertf5server.WithDebug(ctx, reattachConfigCh, nil), - plugin_tf5servertf5server.WithLoggingSink(t), - plugin_tf5servertf5server.WithGoPluginLogger(logger), - ) - - reattachConfig, err := waitForReattachConfig(reattachConfigCh) - if err != nil { - return nil, plugin_fmt.Errorf("Error getting reattach config: %s", err) - } - - return map[string]plugin_tfexec.ReattachConfig{ - providerName: convertReattachConfig(reattachConfig), - }, nil -} - -func convertReattachConfig(reattachConfig *plugin_plugin.ReattachConfig) plugin_tfexec.ReattachConfig { - return plugin_tfexec.ReattachConfig{ - Protocol: string(reattachConfig.Protocol), - Pid: reattachConfig.Pid, - Test: true, - Addr: plugin_tfexec.ReattachConfigAddr{ - Network: reattachConfig.Addr.Network(), - String: reattachConfig.Addr.String(), - }, - } -} - -func printReattachConfig(config *plugin_plugin.ReattachConfig) { - reattachStr, err := plugin_json.Marshal(map[string]plugin_tfexec.ReattachConfig{ - providerName: convertReattachConfig(config), - }) - if err != nil { - plugin_fmt.Printf("Error building reattach string: %s", err) - return - } - plugin_fmt.Printf("# Provider server started\nexport TF_REATTACH_PROVIDERS='%s'\n", string(reattachStr)) -} - -func waitForReattachConfig(ch chan *plugin_plugin.ReattachConfig) (*plugin_plugin.ReattachConfig, error) { - select { - case config := <-ch: - return config, nil - case <-plugin_time.After(2 * plugin_time.Second): - return nil, plugin_fmt.Errorf("timeout while waiting for reattach configuration") - } -} - -func GetObjectTypeFromSchema(schema *provider_tfprotov5.Schema) provider_tftypes.Type { - bm := map[string]provider_tftypes.Type{} - - for _, att := range schema.Block.Attributes { - bm[att.Name] = att.Type - } - - for _, b := range schema.Block.BlockTypes { - a := map[string]provider_tftypes.Type{} - for _, att := range b.Block.Attributes { - a[att.Name] = att.Type - } - bm[b.TypeName] = provider_tftypes.List{ - ElementType: provider_tftypes.Object{AttributeTypes: a}, - } - - for _, bb := range b.Block.BlockTypes { - aa := map[string]provider_tftypes.Type{} - for _, att := range bb.Block.Attributes { - aa[att.Name] = att.Type - } - a[bb.TypeName] = provider_tftypes.List{ - ElementType: provider_tftypes.Object{AttributeTypes: aa}, - } - } - } - - return provider_tftypes.Object{AttributeTypes: bm} -} - -func GetResourceType(name string) (provider_tftypes.Type, error) { - sch := GetProviderResourceSchema() - rsch, ok := sch[name] - if !ok { - return provider_tftypes.DynamicPseudoType, provider_fmt.Errorf("unknown resource %s - cannot find schema", name) - } - return GetObjectTypeFromSchema(rsch), nil -} - -func GetDataSourceType(name string) (provider_tftypes.Type, error) { - sch := GetProviderDataSourceSchema() - rsch, ok := sch[name] - if !ok { - return provider_tftypes.DynamicPseudoType, provider_fmt.Errorf("unknown data source %q: cannot find schema", name) - } - return GetObjectTypeFromSchema(rsch), nil -} - -func GetProviderResourceSchema() map[string]*provider_tfprotov5.Schema { - return map[string]*provider_tfprotov5.Schema{ - "kubernetes_manifest": { - Version: 1, - Block: &provider_tfprotov5.SchemaBlock{ - BlockTypes: []*provider_tfprotov5.SchemaNestedBlock{ - { - TypeName: "timeouts", - Nesting: provider_tfprotov5.SchemaNestedBlockNestingModeList, - MinItems: 0, - MaxItems: 1, - Block: &provider_tfprotov5.SchemaBlock{ - Attributes: []*provider_tfprotov5.SchemaAttribute{ - { - Name: "create", - Type: provider_tftypes.String, - Description: "Timeout for the create operation.", - Optional: true, - }, - { - Name: "update", - Type: provider_tftypes.String, - Description: "Timeout for the update operation.", - Optional: true, - }, - { - Name: "delete", - Type: provider_tftypes.String, - Description: "Timeout for the delete operation.", - Optional: true, - }, - }, - }, - }, - { - TypeName: "field_manager", - Nesting: provider_tfprotov5.SchemaNestedBlockNestingModeList, - MinItems: 0, - MaxItems: 1, - Block: &provider_tfprotov5.SchemaBlock{ - Description: "Configure field manager options.", - Attributes: []*provider_tfprotov5.SchemaAttribute{ - { - Name: "name", - Type: provider_tftypes.String, - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - Description: "The name to use for the field manager when creating and updating the resource.", - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "force_conflicts", - Type: provider_tftypes.Bool, - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - Description: "Force changes against conflicts.", - DescriptionKind: 0, - Deprecated: false, - }, - }, - }, - }, - { - TypeName: "wait", - Nesting: provider_tfprotov5.SchemaNestedBlockNestingModeList, - MinItems: 0, - MaxItems: 1, - Block: &provider_tfprotov5.SchemaBlock{ - Description: "Configure waiter options.", - BlockTypes: []*provider_tfprotov5.SchemaNestedBlock{ - { - TypeName: "condition", - Nesting: provider_tfprotov5.SchemaNestedBlockNestingModeList, - MinItems: 0, - Block: &provider_tfprotov5.SchemaBlock{ - Attributes: []*provider_tfprotov5.SchemaAttribute{ - { - Name: "status", - Type: provider_tftypes.String, - Optional: true, - Description: "The condition status.", - }, { - Name: "type", - Type: provider_tftypes.String, - Optional: true, - Description: "The type of condition.", - }, - }, - }, - }, - }, - Attributes: []*provider_tfprotov5.SchemaAttribute{ - { - Name: "rollout", - Type: provider_tftypes.Bool, - Optional: true, - Description: "Wait for rollout to complete on resources that support `kubectl rollout status`.", - }, - { - Name: "fields", - Type: provider_tftypes.Map{ElementType: provider_tftypes.String}, - Optional: true, - Description: "A map of paths to fields to wait for a specific field value.", - }, - }, - }, - }, - }, - Attributes: []*provider_tfprotov5.SchemaAttribute{ - { - Name: "manifest", - Type: provider_tftypes.DynamicPseudoType, - Required: true, - Description: "A Kubernetes manifest describing the desired state of the resource in HCL format.", - }, - { - Name: "object", - Type: provider_tftypes.DynamicPseudoType, - Optional: true, - Computed: true, - Description: "The resulting resource state, as returned by the API server after applying the desired state from `manifest`.", - }, - { - Name: "wait_for", - Type: provider_tftypes.Object{ - AttributeTypes: map[string]provider_tftypes.Type{ - "fields": provider_tftypes.Map{ - ElementType: provider_tftypes.String, - }, - }, - }, - Optional: true, - Deprecated: true, - Description: "A map of attribute paths and desired patterns to be matched. After each apply the provider will wait for all attributes listed here to reach a value that matches the desired pattern.", - }, - { - Name: "computed_fields", - Type: provider_tftypes.List{ElementType: provider_tftypes.String}, - Description: "List of manifest fields whose values can be altered by the API server during 'apply'. Defaults to: [\"metadata.annotations\", \"metadata.labels\"]", - Optional: true, - }, - }, - }, - }, - } -} - -func GetProviderDataSourceSchema() map[string]*provider_tfprotov5.Schema { - return map[string]*provider_tfprotov5.Schema{ - "kubernetes_resource": { - Version: 1, - Block: &provider_tfprotov5.SchemaBlock{ - Attributes: []*provider_tfprotov5.SchemaAttribute{ - { - Name: "api_version", - Type: provider_tftypes.String, - Required: true, - Description: "The resource apiVersion.", - }, - { - Name: "kind", - Type: provider_tftypes.String, - Required: true, - Description: "The resource kind.", - }, - { - Name: "object", - Type: provider_tftypes.DynamicPseudoType, - Optional: true, - Computed: true, - Description: "The response from the API server.", - }, - }, - BlockTypes: []*provider_tfprotov5.SchemaNestedBlock{ - { - TypeName: "metadata", - Nesting: provider_tfprotov5.SchemaNestedBlockNestingModeList, - MinItems: 1, - MaxItems: 1, - Block: &provider_tfprotov5.SchemaBlock{ - Description: "Metadata for the resource", - Attributes: []*provider_tfprotov5.SchemaAttribute{ - { - Name: "name", - Type: provider_tftypes.String, - Required: true, - Description: "The resource name.", - }, - { - Name: "namespace", - Type: provider_tftypes.String, - Optional: true, - Description: "The resource namespace.", - }, - }, - }, - }, - }, - }, - }, - } -} - -func GetProviderConfigSchema() *provider_config_tfprotov5.Schema { - b := provider_config_tfprotov5.SchemaBlock{ - - Attributes: []*provider_config_tfprotov5.SchemaAttribute{ - { - Name: "host", - Type: provider_config_tftypes.String, - Description: "The hostname (in form of URI) of Kubernetes master.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "username", - Type: provider_config_tftypes.String, - Description: "The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "password", - Type: provider_config_tftypes.String, - Description: "The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "insecure", - Type: provider_config_tftypes.Bool, - Description: "Whether server should be accessed without verifying the TLS certificate.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "client_certificate", - Type: provider_config_tftypes.String, - Description: "PEM-encoded client certificate for TLS authentication.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "client_key", - Type: provider_config_tftypes.String, - Description: "PEM-encoded client certificate key for TLS authentication.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "cluster_ca_certificate", - Type: provider_config_tftypes.String, - Description: "PEM-encoded root certificates bundle for TLS authentication.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "config_paths", - Type: provider_config_tftypes.List{ElementType: provider_config_tftypes.String}, - Description: "A list of paths to kube config files. Can be set with KUBE_CONFIG_PATHS environment variable.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "config_path", - Type: provider_config_tftypes.String, - Description: "Path to the kube config file. Can be set with KUBE_CONFIG_PATH.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "config_context", - Type: provider_config_tftypes.String, - Description: "", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "config_context_auth_info", - Type: provider_config_tftypes.String, - Description: "", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "config_context_cluster", - Type: provider_config_tftypes.String, - Description: "", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "token", - Type: provider_config_tftypes.String, - Description: "Token to authenticate an service account", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "proxy_url", - Type: provider_config_tftypes.String, - Description: "URL to the proxy to be used for all API requests", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "ignore_annotations", - Type: provider_config_tftypes.List{ElementType: provider_config_tftypes.String}, - Description: "List of Kubernetes metadata annotations to ignore across all resources handled by this provider for situations where external systems are managing certain resource annotations. Each item is a regular expression.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "ignore_labels", - Type: provider_config_tftypes.List{ElementType: provider_config_tftypes.String}, - Description: "List of Kubernetes metadata labels to ignore across all resources handled by this provider for situations where external systems are managing certain resource labels. Each item is a regular expression.", - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - }, - BlockTypes: []*provider_config_tfprotov5.SchemaNestedBlock{ - { - TypeName: "exec", - Nesting: provider_config_tfprotov5.SchemaNestedBlockNestingModeList, - MinItems: 0, - MaxItems: 1, - Block: &provider_config_tfprotov5.SchemaBlock{ - Attributes: []*provider_config_tfprotov5.SchemaAttribute{ - { - Name: "api_version", - Type: provider_config_tftypes.String, - Required: true, - Optional: false, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "command", - Type: provider_config_tftypes.String, - Required: true, - Optional: false, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "env", - Type: provider_config_tftypes.Map{ElementType: provider_config_tftypes.String}, - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - { - Name: "args", - Type: provider_config_tftypes.List{ElementType: provider_config_tftypes.String}, - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - DescriptionKind: 0, - Deprecated: false, - }, - }, - }, - }, - { - TypeName: "experiments", - Nesting: provider_config_tfprotov5.SchemaNestedBlockNestingModeList, - MinItems: 0, - MaxItems: 1, - Block: &provider_config_tfprotov5.SchemaBlock{ - Description: "Enable and disable experimental features.", - Attributes: []*provider_config_tfprotov5.SchemaAttribute{ - { - Name: "manifest_resource", - Type: provider_config_tftypes.Bool, - Required: false, - Optional: true, - Computed: false, - Sensitive: false, - Description: "Enable the `kubernetes_manifest` resource.", - DescriptionKind: 0, - Deprecated: false, - }, - }, - }, - }, - }, - } - - return &provider_config_tfprotov5.Schema{ - Version: 0, - Block: &b, - } -} - -func (s *RawProviderServer) ReadResource(ctx read_context.Context, req *read_tfprotov5.ReadResourceRequest) (*read_tfprotov5.ReadResourceResponse, error) { - resp := &read_tfprotov5.ReadResourceResponse{} - - resp.Private = req.Private - - execDiag := s.canExecute() - if len(execDiag) > 0 { - resp.Diagnostics = append(resp.Diagnostics, execDiag...) - return resp, nil - } - - var resState map[string]read_tftypes.Value - var err error - rt, err := GetResourceType(req.TypeName) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine resource type", - Detail: err.Error(), - }) - return resp, nil - } - - currentState, err := req.CurrentState.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to decode current state", - Detail: err.Error(), - }) - return resp, nil - } - if currentState.IsNull() { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to read resource", - Detail: "Incomplete of missing state", - }) - return resp, nil - } - err = currentState.As(&resState) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract resource from current state", - Detail: err.Error(), - }) - return resp, nil - } - - co, hasOb := resState["object"] - if !hasOb || co.IsNull() { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Current state of resource has no 'object' attribute", - Detail: "This should not happen. The state may be incomplete or corrupted.\nIf this error is reproducible, plese report issue to provider maintainers.", - }) - return resp, nil - } - rm, err := s.getRestMapper() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to get RESTMapper client", - Detail: err.Error(), - }) - return resp, nil - } - gvk, err := GVKFromTftypesObject(&co, rm) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine GroupVersionResource for manifest", - Detail: err.Error(), - }) - return resp, nil - } - - objectType, th, err := s.TFTypeFromOpenAPI(ctx, gvk, false) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: read_fmt.Sprintf("Failed to determine resource type from GVK: %s", gvk), - Detail: err.Error(), - }) - return resp, nil - } - - cu, err := read_payload.FromTFValue(co, th, read_tftypes.NewAttributePath()) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "Failed encode 'object' attribute to Unstructured", - Detail: err.Error(), - }) - return resp, nil - } - s.logger.Trace("[ReadResource]", "[unstructured.FromTFValue]", dump(cu)) - - client, err := s.getDynamicClient() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: "failed to get Dynamic client", - Detail: err.Error(), - }) - return resp, nil - } - - uo := read_unstructured.Unstructured{Object: cu.(map[string]interface{})} - cGVR, err := GVRFromUnstructured(&uo, rm) - if err != nil { - return resp, err - } - ns, err := IsResourceNamespaced(uo.GroupVersionKind(), rm) - if err != nil { - return resp, err - } - rcl := client.Resource(cGVR) - - rnamespace := uo.GetNamespace() - rname := uo.GetName() - - var ro *read_unstructured.Unstructured - if ns { - ro, err = rcl.Namespace(rnamespace).Get(ctx, rname, read_v1metav1.GetOptions{}) - } else { - ro, err = rcl.Get(ctx, rname, read_v1metav1.GetOptions{}) - } - if err != nil { - if read_errorsapierrors.IsNotFound(err) { - return resp, nil - } - d := read_tfprotov5.Diagnostic{ - Severity: read_tfprotov5.DiagnosticSeverityError, - Summary: read_fmt.Sprintf("Cannot GET resource %s", dump(co)), - Detail: err.Error(), - } - resp.Diagnostics = append(resp.Diagnostics, &d) - return resp, nil - } - - fo := RemoveServerSideFields(ro.Object) - nobj, err := read_payload.ToTFValue(fo, objectType, th, read_tftypes.NewAttributePath()) - if err != nil { - return resp, err - } - - nobj, err = read_morph.DeepUnknown(objectType, nobj, read_tftypes.NewAttributePath()) - if err != nil { - return resp, err - } - - rawState := make(map[string]read_tftypes.Value) - err = currentState.As(&rawState) - if err != nil { - return resp, err - } - rawState["object"] = read_morph.UnknownToNull(nobj) - - nsVal := read_tftypes.NewValue(currentState.Type(), rawState) - newState, err := read_tfprotov5.NewDynamicValue(nsVal.Type(), nsVal) - if err != nil { - return resp, err - } - resp.NewState = &newState - return resp, nil -} - -func GVRFromUnstructured(o *resource_unstructured.Unstructured, m resource_meta.RESTMapper) (resource_schema.GroupVersionResource, error) { - apv := o.GetAPIVersion() - kind := o.GetKind() - gv, err := resource_schema.ParseGroupVersion(apv) - if err != nil { - return resource_schema.GroupVersionResource{}, err - } - mapping, err := m.RESTMapping(gv.WithKind(kind).GroupKind(), gv.Version) - if err != nil { - return resource_schema.GroupVersionResource{}, err - } - return mapping.Resource, err -} - -func GVKFromTftypesObject(in *resource_tftypes.Value, m resource_meta.RESTMapper) (resource_schema.GroupVersionKind, error) { - var obj map[string]resource_tftypes.Value - err := in.As(&obj) - if err != nil { - return resource_schema.GroupVersionKind{}, err - } - var apv string - var kind string - err = obj["apiVersion"].As(&apv) - if err != nil { - return resource_schema.GroupVersionKind{}, err - } - err = obj["kind"].As(&kind) - if err != nil { - return resource_schema.GroupVersionKind{}, err - } - gv, err := resource_schema.ParseGroupVersion(apv) - if err != nil { - return resource_schema.GroupVersionKind{}, err - } - mappings, err := m.RESTMappings(gv.WithKind(kind).GroupKind()) - if err != nil { - return resource_schema.GroupVersionKind{}, err - } - for _, m := range mappings { - if m.GroupVersionKind.GroupVersion().String() == apv { - return m.GroupVersionKind, nil - } - } - return resource_schema.GroupVersionKind{}, resource_errors.New("cannot select exact GV from REST mapper") -} - -func IsResourceNamespaced(gvk resource_schema.GroupVersionKind, m resource_meta.RESTMapper) (bool, error) { - rm, err := m.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return false, err - } - if rm.Scope.Name() == resource_meta.RESTScopeNameNamespace { - return true, nil - } - return false, nil -} - -func (ps *RawProviderServer) TFTypeFromOpenAPI(ctx resource_context.Context, gvk resource_schema.GroupVersionKind, status bool) (resource_tftypes.Type, map[string]string, error) { - var tsch resource_tftypes.Type - var hints map[string]string - - oapi, err := ps.getOAPIv2Foundry() - if err != nil { - return nil, hints, resource_fmt.Errorf("cannot get OpenAPI foundry: %s", err) - } - - crdSchema, err := ps.lookUpGVKinCRDs(ctx, gvk) - if err != nil { - return nil, hints, resource_fmt.Errorf("failed to look up GVK [%s] among available CRDs: %s", gvk.String(), err) - } - if crdSchema != nil { - js, err := resource_json.Marshal(resource_openapi.SchemaToSpec("", crdSchema.(map[string]interface{}))) - if err != nil { - return nil, hints, resource_fmt.Errorf("CRD schema fails to marshal into JSON: %s", err) - } - oapiv3, err := resource_openapi.NewFoundryFromSpecV3(js) - if err != nil { - return nil, hints, err - } - tsch, hints, err = oapiv3.GetTypeByGVK(gvk) - if err != nil { - return nil, hints, resource_fmt.Errorf("failed to generate tftypes for GVK [%s] from CRD schema: %s", gvk.String(), err) - } - } - if tsch == nil { - - tsch, hints, err = oapi.GetTypeByGVK(gvk) - if err != nil { - return nil, hints, resource_fmt.Errorf("cannot get resource type from OpenAPI (%s): %s", gvk.String(), err) - } - } - - if tsch.Is(resource_tftypes.Object{}) && !status { - ot := tsch.(resource_tftypes.Object) - atts := make(map[string]resource_tftypes.Type) - for k, t := range ot.AttributeTypes { - if k != "status" { - atts[k] = t - } - } - - if _, ok := atts["apiVersion"]; !ok { - atts["apiVersion"] = resource_tftypes.String - } - if _, ok := atts["kind"]; !ok { - atts["kind"] = resource_tftypes.String - } - metaType, _, err := oapi.GetTypeByGVK(resource_openapi.ObjectMetaGVK) - if err != nil { - return nil, hints, resource_fmt.Errorf("failed to generate tftypes for v1.ObjectMeta: %s", err) - } - atts["metadata"] = metaType.(resource_tftypes.Object) - - tsch = resource_tftypes.Object{AttributeTypes: atts} - } - - return tsch, hints, nil -} - -func mapRemoveNulls(in map[string]interface{}) map[string]interface{} { - for k, v := range in { - switch tv := v.(type) { - case []interface{}: - in[k] = sliceRemoveNulls(tv) - case map[string]interface{}: - in[k] = mapRemoveNulls(tv) - default: - if v == nil { - delete(in, k) - } - } - } - return in -} - -func sliceRemoveNulls(in []interface{}) []interface{} { - s := []interface{}{} - for _, v := range in { - switch tv := v.(type) { - case []interface{}: - s = append(s, sliceRemoveNulls(tv)) - case map[string]interface{}: - s = append(s, mapRemoveNulls(tv)) - default: - if v != nil { - s = append(s, v) - } - } - } - return s -} - -func RemoveServerSideFields(in map[string]interface{}) map[string]interface{} { - - delete(in, "status") - - resource_meta := in["metadata"].(map[string]interface{}) - - delete(resource_meta, "uid") - delete(resource_meta, "creationTimestamp") - delete(resource_meta, "resourceVersion") - delete(resource_meta, "generation") - delete(resource_meta, "selfLink") - - delete(resource_meta, "managedFields") - - return in -} - -func (ps *RawProviderServer) lookUpGVKinCRDs(ctx resource_context.Context, gvk resource_schema.GroupVersionKind) (interface{}, error) { - c, err := ps.getDynamicClient() - if err != nil { - return nil, err - } - m, err := ps.getRestMapper() - if err != nil { - return nil, err - } - - crd := resource_schema.GroupKind{Group: "apiextensions.k8s.io", Kind: "CustomResourceDefinition"} - crms, err := m.RESTMappings(crd) - if err != nil { - return nil, resource_fmt.Errorf("could not extract resource version mappings for apiextensions.k8s.io.CustomResourceDefinition: %s", err) - } - - for _, crm := range crms { - crdRes, err := c.Resource(crm.Resource).List(ctx, resource_v1v1.ListOptions{}) - if err != nil { - return nil, err - } - - for _, r := range crdRes.Items { - spec := r.Object["spec"].(map[string]interface{}) - if spec == nil { - continue - } - grp := spec["group"].(string) - if grp != gvk.Group { - continue - } - names := spec["names"] - if names == nil { - continue - } - kind := names.(map[string]interface{})["kind"] - if kind != gvk.Kind { - continue - } - ver := spec["versions"] - if ver == nil { - ver = spec["version"] - if ver == nil { - continue - } - } - for _, rv := range ver.([]interface{}) { - if rv == nil { - continue - } - v := rv.(map[string]interface{}) - if v["name"] == gvk.Version { - s, ok := v["schema"].(map[string]interface{}) - if !ok { - return nil, nil - } - return s["openAPIV3Schema"], nil - } - } - } - } - return nil, nil -} - -var privateStateSchema resource_tftypes.Object = resource_tftypes.Object{AttributeTypes: map[string]resource_tftypes.Type{ - "IsImported": resource_tftypes.Bool, -}} - -func getPrivateStateValue(p []byte) (ps map[string]resource_tftypes.Value, err error) { - if p == nil { - err = resource_errors.New("private state value is nil") - return - } - pv, err := resource_tftypes.ValueFromMsgPack(p, privateStateSchema) - err = pv.As(&ps) - return -} - -func init() { - server_install.Install(server_scheme.Scheme) -} - -type RawProviderServer struct { - logger server_hclog.Logger - clientConfig *server_rest.Config - dynamicClient server_dynamic.Interface - discoveryClient server_discovery.DiscoveryInterface - restMapper server_meta.RESTMapper - restClient server_rest.Interface - OAPIFoundry server_openapi.Foundry - - providerEnabled bool - hostTFVersion string -} - -func dump(v interface{}) server_hclog.Format { - return server_hclog.Fmt("%v", v) -} - -func (s *RawProviderServer) PrepareProviderConfig(ctx server_context.Context, req *server_tfprotov5.PrepareProviderConfigRequest) (*server_tfprotov5.PrepareProviderConfigResponse, error) { - s.logger.Trace("[PrepareProviderConfig][Request]\n%s\n", dump(*req)) - resp := &server_tfprotov5.PrepareProviderConfigResponse{} - return resp, nil -} - -func (s *RawProviderServer) ValidateDataSourceConfig(ctx server_context.Context, req *server_tfprotov5.ValidateDataSourceConfigRequest) (*server_tfprotov5.ValidateDataSourceConfigResponse, error) { - s.logger.Trace("[ValidateDataSourceConfig][Request]\n%s\n", dump(*req)) - resp := &server_tfprotov5.ValidateDataSourceConfigResponse{} - return resp, nil -} - -func (s *RawProviderServer) StopProvider(ctx server_context.Context, req *server_tfprotov5.StopProviderRequest) (*server_tfprotov5.StopProviderResponse, error) { - s.logger.Trace("[StopProvider][Request]\n%s\n", dump(*req)) - - return nil, server_status.Errorf(server_codes.Unimplemented, "method Stop not implemented") -} - -func (s *RawProviderServer) UpgradeResourceState(ctx upgrade_state_context.Context, req *upgrade_state_tfprotov5.UpgradeResourceStateRequest) (*upgrade_state_tfprotov5.UpgradeResourceStateResponse, error) { - resp := &upgrade_state_tfprotov5.UpgradeResourceStateResponse{} - resp.Diagnostics = []*upgrade_state_tfprotov5.Diagnostic{} - - sch := GetProviderResourceSchema() - rt := GetObjectTypeFromSchema(sch[req.TypeName]) - - rv, err := req.RawState.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &upgrade_state_tfprotov5.Diagnostic{ - Severity: upgrade_state_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal old state during upgrade", - Detail: err.Error(), - }) - return resp, nil - } - - cd := s.checkValidCredentials(ctx) - if len(cd) > 0 { - us, err := upgrade_state_tfprotov5.NewDynamicValue(rt, rv) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &upgrade_state_tfprotov5.Diagnostic{ - Severity: upgrade_state_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to encode new state during upgrade", - Detail: err.Error(), - }) - } - resp.UpgradedState = &us - - return resp, nil - } - - var cs map[string]upgrade_state_tftypes.Value - err = rv.As(&cs) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &upgrade_state_tfprotov5.Diagnostic{ - Severity: upgrade_state_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract values from old state during upgrade", - Detail: err.Error(), - }) - return resp, nil - } - - obj, ok := cs["object"] - if !ok { - resp.Diagnostics = append(resp.Diagnostics, &upgrade_state_tfprotov5.Diagnostic{ - Severity: upgrade_state_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to find object value in existing resource state", - }) - return resp, nil - } - - m, err := s.getRestMapper() - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, - &upgrade_state_tfprotov5.Diagnostic{ - Severity: upgrade_state_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to retrieve Kubernetes RESTMapper client during state upgrade", - Detail: err.Error(), - }) - return resp, nil - } - - gvk, err := GVKFromTftypesObject(&obj, m) - if err != nil { - return resp, upgrade_state_fmt.Errorf("failed to determine resource GVK: %s", err) - } - - tsch, _, err := s.TFTypeFromOpenAPI(ctx, gvk, false) - if err != nil { - return resp, upgrade_state_fmt.Errorf("failed to determine resource type ID: %s", err) - } - - morphedObject, d := upgrade_state_morph.ValueToType(obj, tsch, upgrade_state_tftypes.NewAttributePath()) - if len(d) > 0 { - resp.Diagnostics = append(resp.Diagnostics, d...) - for i := range d { - if d[i].Severity == upgrade_state_tfprotov5.DiagnosticSeverityError { - return resp, nil - } - } - } - s.logger.Debug("[UpgradeResourceState]", "morphed object", dump(morphedObject)) - - cs["object"] = obj - - newStateVal := upgrade_state_tftypes.NewValue(rv.Type(), cs) - - us, err := upgrade_state_tfprotov5.NewDynamicValue(rt, newStateVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &upgrade_state_tfprotov5.Diagnostic{ - Severity: upgrade_state_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to encode new state during upgrade", - Detail: err.Error(), - }) - } - resp.UpgradedState = &us - - return resp, nil -} - -func (s *RawProviderServer) ValidateResourceTypeConfig(ctx validate_context.Context, req *validate_tfprotov5.ValidateResourceTypeConfigRequest) (*validate_tfprotov5.ValidateResourceTypeConfigResponse, error) { - resp := &validate_tfprotov5.ValidateResourceTypeConfigResponse{} - requiredKeys := []string{"apiVersion", "kind", "metadata"} - forbiddenKeys := []string{"status"} - - rt, err := GetResourceType(req.TypeName) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine resource type", - Detail: err.Error(), - }) - return resp, nil - } - - config, err := req.Config.Unmarshal(rt) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to unmarshal resource state", - Detail: err.Error(), - }) - return resp, nil - } - - att := validate_tftypes.NewAttributePath() - att = att.WithAttributeName("manifest") - - configVal := make(map[string]validate_tftypes.Value) - err = config.As(&configVal) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to extract resource state from SDK value", - Detail: err.Error(), - }) - return resp, nil - } - - manifest, ok := configVal["manifest"] - if !ok { - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: "Manifest missing from resource configuration", - Detail: "A manifest attribute containing a valid Kubernetes resource configuration is required.", - Attribute: att, - }) - return resp, nil - } - - rawManifest := make(map[string]validate_tftypes.Value) - err = manifest.As(&rawManifest) - if err != nil { - if err.Error() == "unmarshaling unknown values is not supported" { - - return resp, nil - } - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: `Failed to extract "manifest" attribute value from resource configuration`, - Detail: err.Error(), - Attribute: att, - }) - return resp, nil - } - - for _, key := range requiredKeys { - if _, present := rawManifest[key]; !present { - kp := att.WithAttributeName(key) - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: `Attribute key missing from "manifest" value`, - Detail: validate_fmt.Sprintf("'%s' attribute key is missing from manifest configuration", key), - Attribute: kp, - }) - } - } - - for _, key := range forbiddenKeys { - if _, present := rawManifest[key]; present { - kp := att.WithAttributeName(key) - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: `Forbidden attribute key in "manifest" value`, - Detail: validate_fmt.Sprintf("'%s' attribute key is not allowed in manifest configuration", key), - Attribute: kp, - }) - } - } - - timeouts := s.getTimeouts(configVal) - path := validate_tftypes.NewAttributePath().WithAttributeName("timeouts") - for k, v := range timeouts { - _, err := validate_time.ParseDuration(v) - if err != nil { - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: validate_fmt.Sprintf("Error parsing timeout for %q", k), - Detail: err.Error(), - Attribute: path.WithAttributeName(k), - }) - } - } - - if wait, ok := configVal["wait"]; ok && !wait.IsNull() { - var waitBlock []validate_tftypes.Value - wait.As(&waitBlock) - if len(waitBlock) > 0 { - var w map[string]validate_tftypes.Value - waitBlock[0].As(&w) - waiters := []string{} - for k, ww := range w { - if !ww.IsNull() { - if k == "condition" { - var cb []validate_tftypes.Value - ww.As(&cb) - if len(cb) == 0 { - continue - } - } - waiters = append(waiters, k) - } - } - if len(waiters) > 1 { - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: "Invalid wait configuration", - Detail: validate_fmt.Sprintf(`You may only set one of "%s".`, validate_strings.Join(waiters, "\", \"")), - Attribute: validate_tftypes.NewAttributePath().WithAttributeName("wait"), - }) - } - } - } - if waitFor, ok := configVal["wait_for"]; ok && !waitFor.IsNull() { - resp.Diagnostics = append(resp.Diagnostics, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityWarning, - Summary: "Deprecated Attribute", - Detail: `The "wait_for" attribute has been deprecated. Please use the "wait" block instead.`, - Attribute: validate_tftypes.NewAttributePath().WithAttributeName("wait_for"), - }) - } - - return resp, nil -} - -func (s *RawProviderServer) validateResourceOnline(manifest *validate_tftypes.Value) (diags []*validate_tfprotov5.Diagnostic) { - rm, err := s.getRestMapper() - if err != nil { - diags = append(diags, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to create K8s RESTMapper client", - Detail: err.Error(), - }) - return - } - gvk, err := GVKFromTftypesObject(manifest, rm) - if err != nil { - diags = append(diags, &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Summary: "Failed to determine GroupVersionResource for manifest", - Detail: err.Error(), - }) - return - } - - ns, err := IsResourceNamespaced(gvk, rm) - if err != nil { - diags = append(diags, - &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Detail: err.Error(), - Summary: validate_fmt.Sprintf("Failed to discover scope of resource '%s'", gvk.String()), - }) - return - } - nsPath := validate_tftypes.NewAttributePath() - nsPath = nsPath.WithAttributeName("metadata").WithAttributeName("namespace") - nsVal, restPath, err := validate_tftypes.WalkAttributePath(*manifest, nsPath) - if ns { - if err != nil || len(restPath.Steps()) > 0 { - diags = append(diags, - &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Detail: validate_fmt.Sprintf("Resources of type '%s' require a namespace", gvk.String()), - Summary: "Namespace required", - }) - return - } - if nsVal.(validate_tftypes.Value).IsNull() { - diags = append(diags, - &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Detail: validate_fmt.Sprintf("Namespace for resource '%s' cannot be nil", gvk.String()), - Summary: "Namespace required", - }) - } - var nsStr string - err := nsVal.(validate_tftypes.Value).As(&nsStr) - if nsStr == "" && err == nil { - diags = append(diags, - &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Detail: validate_fmt.Sprintf("Namespace for resource '%s' cannot be empty", gvk.String()), - Summary: "Namespace required", - }) - } - } else { - if err == nil && len(restPath.Steps()) == 0 && !nsVal.(validate_tftypes.Value).IsNull() { - diags = append(diags, - &validate_tfprotov5.Diagnostic{ - Severity: validate_tfprotov5.DiagnosticSeverityError, - Detail: validate_fmt.Sprintf("Resources of type '%s' cannot have a namespace", gvk.String()), - Summary: "Cluster level resource cannot take namespace", - }) - } - } - return -} - -const waiterSleepTime = 1 * waiter_time.Second - -func (s *RawProviderServer) waitForCompletion(ctx waiter_context.Context, waitForBlock waiter_tftypes.Value, rs waiter_dynamic.ResourceInterface, rname string, rtype waiter_tftypes.Type, th map[string]string) error { - if waitForBlock.IsNull() || !waitForBlock.IsKnown() { - return nil - } - - waiter, err := NewResourceWaiter(rs, rname, rtype, th, waitForBlock, s.logger) - if err != nil { - return err - } - return waiter.Wait(ctx) -} - -type Waiter interface { - Wait(waiter_context.Context) error -} - -func NewResourceWaiter(resource waiter_dynamic.ResourceInterface, resourceName string, resourceType waiter_tftypes.Type, th map[string]string, waitForBlock waiter_tftypes.Value, hl waiter_hclog.Logger) (Waiter, error) { - var waitForBlockVal map[string]waiter_tftypes.Value - err := waitForBlock.As(&waitForBlockVal) - if err != nil { - return nil, err - } - - if v, ok := waitForBlockVal["rollout"]; ok { - var rollout bool - v.As(&rollout) - if rollout { - return &RolloutWaiter{ - resource, - resourceName, - hl, - }, nil - } - } - - if v, ok := waitForBlockVal["condition"]; ok { - var conditionsBlocks []waiter_tftypes.Value - v.As(&conditionsBlocks) - if len(conditionsBlocks) > 0 { - return &ConditionsWaiter{ - resource, - resourceName, - conditionsBlocks, - hl, - }, nil - } - } - - fields, ok := waitForBlockVal["fields"] - if !ok || fields.IsNull() || !fields.IsKnown() { - return &NoopWaiter{}, nil - } - - if !fields.Type().Is(waiter_tftypes.Map{}) { - return nil, waiter_fmt.Errorf(`"fields" should be a map of strings`) - } - - var vm map[string]waiter_tftypes.Value - fields.As(&vm) - var matchers []FieldMatcher - - for k, v := range vm { - var expr string - v.As(&expr) - var re *waiter_regexp.Regexp - if expr == "*" { - - re = waiter_regexp.MustCompile("(.*)?") - } else { - var err error - re, err = waiter_regexp.Compile(expr) - if err != nil { - return nil, waiter_fmt.Errorf("invalid regular expression: %q", expr) - } - } - - p, err := FieldPathToTftypesPath(k) - if err != nil { - return nil, err - } - matchers = append(matchers, FieldMatcher{p, re}) - } - - return &FieldWaiter{ - resource, - resourceName, - resourceType, - th, - matchers, - hl, - }, nil - -} - -type FieldMatcher struct { - path *waiter_tftypes.AttributePath - valueMatcher *waiter_regexp.Regexp -} - -type FieldWaiter struct { - resource waiter_dynamic.ResourceInterface - resourceName string - resourceType waiter_tftypes.Type - typeHints map[string]string - fieldMatchers []FieldMatcher - logger waiter_hclog.Logger -} - -func (w *FieldWaiter) Wait(ctx waiter_context.Context) error { - w.logger.Info("[ApplyResourceChange][Wait] Waiting until ready...\n") - for { - if deadline, ok := ctx.Deadline(); ok { - if waiter_time.Now().After(deadline) { - return waiter_context.DeadlineExceeded - } - } - - res, err := w.resource.Get(ctx, w.resourceName, waiter_v1v1.GetOptions{}) - if err != nil { - return err - } - if waiter_errors.IsGone(err) { - return waiter_fmt.Errorf("resource was deleted") - } - resObj := res.Object - meta := resObj["metadata"].(map[string]interface{}) - delete(meta, "managedFields") - - w.logger.Trace("[ApplyResourceChange][Wait]", "API Response", resObj) - - obj, err := waiter_payload.ToTFValue(resObj, w.resourceType, w.typeHints, waiter_tftypes.NewAttributePath()) - if err != nil { - return err - } - - done, err := func(obj waiter_tftypes.Value) (bool, error) { - for _, m := range w.fieldMatchers { - vi, rp, err := waiter_tftypes.WalkAttributePath(obj, m.path) - if err != nil { - return false, err - } - if len(rp.Steps()) > 0 { - return false, waiter_fmt.Errorf("attribute not present at path '%s'", m.path.String()) - } - - var s string - v := vi.(waiter_tftypes.Value) - switch { - case v.Type().Is(waiter_tftypes.String): - v.As(&s) - case v.Type().Is(waiter_tftypes.Bool): - var vb bool - v.As(&vb) - s = waiter_fmt.Sprintf("%t", vb) - case v.Type().Is(waiter_tftypes.Number): - var f waiter_big.Float - v.As(&f) - if f.IsInt() { - i, _ := f.Int64() - s = waiter_fmt.Sprintf("%d", i) - } else { - i, _ := f.Float64() - s = waiter_fmt.Sprintf("%f", i) - } - default: - return true, waiter_fmt.Errorf("wait_for: cannot match on type %q", v.Type().String()) - } - - if !m.valueMatcher.Match([]byte(s)) { - return false, nil - } - } - - return true, nil - }(obj) - - if done { - w.logger.Info("[ApplyResourceChange][Wait] Done waiting.\n") - return err - } - - waiter_time.Sleep(waiterSleepTime) - } -} - -type NoopWaiter struct{} - -func (w *NoopWaiter) Wait(_ waiter_context.Context) error { - return nil -} - -func FieldPathToTftypesPath(fieldPath string) (*waiter_tftypes.AttributePath, error) { - t, d := waiter_hclsyntax.ParseTraversalAbs([]byte(fieldPath), "", waiter_hclhcl.Pos{Line: 1, Column: 1}) - if d.HasErrors() { - return waiter_tftypes.NewAttributePath(), waiter_fmt.Errorf("invalid field path %q: %s", fieldPath, d.Error()) - } - - path := waiter_tftypes.NewAttributePath() - for _, p := range t { - switch p.(type) { - case waiter_hclhcl.TraverseRoot: - path = path.WithAttributeName(p.(waiter_hclhcl.TraverseRoot).Name) - case waiter_hclhcl.TraverseIndex: - indexKey := p.(waiter_hclhcl.TraverseIndex).Key - indexKeyType := indexKey.Type() - if indexKeyType.Equals(waiter_cty.String) { - path = path.WithElementKeyString(indexKey.AsString()) - } else if indexKeyType.Equals(waiter_cty.Number) { - f := indexKey.AsBigFloat() - if f.IsInt() { - i, _ := f.Int64() - path = path.WithElementKeyInt(int(i)) - } else { - return waiter_tftypes.NewAttributePath(), waiter_fmt.Errorf("index in field path must be an integer") - } - } else { - return waiter_tftypes.NewAttributePath(), waiter_fmt.Errorf("unsupported type in field path: %s", indexKeyType.FriendlyName()) - } - case waiter_hclhcl.TraverseAttr: - path = path.WithAttributeName(p.(waiter_hclhcl.TraverseAttr).Name) - case waiter_hclhcl.TraverseSplat: - return waiter_tftypes.NewAttributePath(), waiter_fmt.Errorf("splat is not supported") - } - } - - return path, nil -} - -type RolloutWaiter struct { - resource waiter_dynamic.ResourceInterface - resourceName string - logger waiter_hclog.Logger -} - -func (w *RolloutWaiter) Wait(ctx waiter_context.Context) error { - w.logger.Info("[ApplyResourceChange][Wait] Waiting until rollout complete...\n") - for { - if deadline, ok := ctx.Deadline(); ok { - if waiter_time.Now().After(deadline) { - return waiter_context.DeadlineExceeded - } - } - - res, err := w.resource.Get(ctx, w.resourceName, waiter_v1v1.GetOptions{}) - if err != nil { - return err - } - if waiter_errors.IsGone(err) { - return waiter_fmt.Errorf("resource was deleted") - } - - gk := res.GetObjectKind().GroupVersionKind().GroupKind() - statusViewer, err := waiter_polymorphichelpers.StatusViewerFor(gk) - if err != nil { - return waiter_fmt.Errorf("error getting resource status: %v", err) - } - - _, done, err := statusViewer.Status(res, 0) - if err != nil { - return waiter_fmt.Errorf("error getting resource status: %v", err) - } - - if done { - break - } - - waiter_time.Sleep(waiterSleepTime) - } - - w.logger.Info("[ApplyResourceChange][Wait] Rollout complete\n") - return nil -} - -type ConditionsWaiter struct { - resource waiter_dynamic.ResourceInterface - resourceName string - conditions []waiter_tftypes.Value - logger waiter_hclog.Logger -} - -func (w *ConditionsWaiter) Wait(ctx waiter_context.Context) error { - w.logger.Info("[ApplyResourceChange][Wait] Waiting for conditions...\n") - - for { - if deadline, ok := ctx.Deadline(); ok { - if waiter_time.Now().After(deadline) { - return waiter_context.DeadlineExceeded - } - } - - res, err := w.resource.Get(ctx, w.resourceName, waiter_v1v1.GetOptions{}) - if err != nil { - return err - } - if waiter_errors.IsGone(err) { - return waiter_fmt.Errorf("resource was deleted") - } - - status := res.Object["status"].(map[string]interface{}) - conditions := status["conditions"].([]interface{}) - conditionsMet := true - for _, c := range w.conditions { - var condition map[string]waiter_tftypes.Value - c.As(&condition) - var conditionType, conditionStatus string - condition["type"].As(&conditionType) - condition["status"].As(&conditionStatus) - conditionMet := false - for _, cc := range conditions { - ccc := cc.(map[string]interface{}) - if ccc["type"].(string) == conditionType { - conditionMet = ccc["status"].(string) == conditionStatus - break - } - } - conditionsMet = conditionsMet && conditionMet - } - - if conditionsMet { - break - } - - waiter_time.Sleep(waiterSleepTime) - } - - w.logger.Info("[ApplyResourceChange][Wait] All conditions met.\n") - return nil -} diff --git a/contrib/terraform-provider-kubeproxy/go.mod b/contrib/terraform-provider-kubeproxy/go.mod deleted file mode 100644 index 80c9dabd8d..0000000000 --- a/contrib/terraform-provider-kubeproxy/go.mod +++ /dev/null @@ -1,193 +0,0 @@ -module github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy - -go 1.21 - -require ( - github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 - github.com/hashicorp/go-hclog v1.4.0 - github.com/hashicorp/go-plugin v1.4.8 - github.com/hashicorp/hcl/v2 v2.15.0 - github.com/hashicorp/terraform-exec v0.17.3 - github.com/hashicorp/terraform-plugin-go v0.14.2 - github.com/hashicorp/terraform-plugin-log v0.7.0 - github.com/hashicorp/terraform-plugin-mux v0.7.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 - github.com/hashicorp/terraform-provider-kubernetes v1.13.4-0.20220907163743-48d1f3528353 - github.com/mitchellh/go-homedir v1.1.0 - github.com/mitchellh/go-testing-interface v1.14.1 - github.com/synapsecns/sanguine/contrib/tfcore v0.0.0-00010101000000-000000000000 - github.com/zclconf/go-cty v1.12.1 - golang.org/x/exp v0.0.0-20240213143201-ec583247a57a - golang.org/x/mod v0.15.0 - golang.org/x/tools v0.18.0 - google.golang.org/grpc v1.60.1 - k8s.io/apiextensions-apiserver v0.25.5 - k8s.io/apimachinery v0.25.5 - k8s.io/client-go v0.25.5 - k8s.io/kubectl v0.25.5 -) - -require ( - bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.111.0 // indirect - cloud.google.com/go/bigtable v1.10.1 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - dario.cat/mergo v1.0.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 // indirect - github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect - github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-cidr v1.1.0 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/cli v20.10.17+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect - github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/emirpasic/gods v1.18.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/camelcase v1.0.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect - github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect - github.com/gartnera/gcloud v0.0.15 // indirect - github.com/getkin/kin-openapi v0.76.0 // indirect - github.com/ghodss/yaml v1.0.0 // indirect - github.com/go-errors/errors v1.4.2 // indirect - github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/gogo/protobuf v1.3.3 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/glog v1.1.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.5.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-provider-google/v4 v4.2.0 // indirect - github.com/hashicorp/terraform-registry-address v0.1.0 // indirect - github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jinzhu/copier v0.2.9 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/hashstructure v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/onsi/ginkgo v1.16.5 // indirect - github.com/onsi/ginkgo/v2 v2.15.0 // indirect - github.com/onsi/gomega v1.30.0 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/robfig/cron v1.2.0 // indirect - github.com/russross/blackfriday v1.6.0 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/xanzy/ssh-agent v0.3.3 // indirect - github.com/xlab/treeprint v1.2.0 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.23.1 // indirect - go.opentelemetry.io/otel/metric v1.23.1 // indirect - go.opentelemetry.io/otel/trace v1.23.1 // indirect - go.starlark.net v0.0.0-20221205180719-3fd0dac74452 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/api v0.149.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.25.5 // indirect - k8s.io/cli-runtime v0.25.5 // indirect - k8s.io/component-base v0.25.5 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-aggregator v0.23.5 // indirect - k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect -) - -replace ( - github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/synapsecns/sanguine/contrib/tfcore => ../tfcore - golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - k8s.io/kubectl => k8s.io/kubectl v0.24.2 -) diff --git a/contrib/terraform-provider-kubeproxy/go.sum b/contrib/terraform-provider-kubeproxy/go.sum deleted file mode 100644 index d2f53016b6..0000000000 --- a/contrib/terraform-provider-kubeproxy/go.sum +++ /dev/null @@ -1,1935 +0,0 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= -bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= -bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= -cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g= -cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= -github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 h1:tFdFasG+VDpnn+BfVbZrfGcoH6pw6s7ODYlZlhTO3UM= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518/go.mod h1:oEeBHikdF/NrnUy0ornVaY1OT+jGvTqm+LQS0+ZDKzU= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= -github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93 h1:z6k1vb5L2wqLK4SIk3fpUiXnhNWSZ6Oyy8AaLqr0B+A= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93/go.mod h1:ps2Vk8wMZarkeIPtUqW/FUvwVVdeRDbewMYz+EmuEgk= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= -github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= -github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= -github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 h1:R+19WKQClnfMXS60cP5BmMe1wjZ4u0evY2p2Ar0ZTXo= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 h1:EipXK6U05IQ2wtuFRn4k3h0+2lXypzItoXGVyf4r9Io= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= -github.com/gartnera/gcloud v0.0.15 h1:/PkEnxPczVRS78MkMDz6wfdRR8YDDjzr0VF6ri6cGVs= -github.com/gartnera/gcloud v0.0.15/go.mod h1:i9wWa1ndPbE8AhduqRMX9nAv9X9HqN9xgqydfEdFLGo= -github.com/getkin/kin-openapi v0.76.0 h1:j77zg3Ec+k+r+GA3d8hBoXpAc6KX9TbBPrwQGBIy2sY= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.40.1/go.mod h1:OyFTr1muxaWeGTcHQcL3B7C4rETnDphTKYenZDgH2/g= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gookit/color v1.3.8/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= -github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= -github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= -github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= -github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= -github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.13.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI= -github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= -github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.14.2 h1:rhsVEOGCnY04msNymSvbUsXfRLKh9znXZmHlf5e8mhE= -github.com/hashicorp/terraform-plugin-go v0.14.2/go.mod h1:Q12UjumPNGiFsZffxOsA40Tlz1WVXt2Evh865Zj0+UA= -github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= -github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-mux v0.7.0 h1:wRbSYzg+v2sn5Mdee0UKm4YTt4wJG0LfSwtgNuBkglY= -github.com/hashicorp/terraform-plugin-mux v0.7.0/go.mod h1:Ae30Mc5lz4d1awtiCbHP0YyvgBeiQ00Q1nAq0U3lb+I= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.5.0/go.mod h1:z+cMZ0iswzZOahBJ3XmNWgWkVnAd2bl8g+FhyyuPDH4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 h1:zHcMbxY0+rFO9gY99elV/XC/UnQVg7FhRCbj1i5b7vM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1/go.mod h1:+tNlb0wkfdsDJ7JEiERLz4HzM19HyiuIoGzTsM7rPpw= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0 h1:w0r/YEy7ZM5mTMAarRUpS7eyYrXTN5mazwHtLnEGAk8= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0/go.mod h1:eUbSXbhfBMNiOuofFo688iPhk42O782vze8drAN2sPA= -github.com/hashicorp/terraform-provider-kubernetes v1.13.4-0.20220907163743-48d1f3528353 h1:+KJ8vXkPhoNXwB59lh4VB3U5kPvDavt0Y7KO0XWGLms= -github.com/hashicorp/terraform-provider-kubernetes v1.13.4-0.20220907163743-48d1f3528353/go.mod h1:u760UDsjsuqmeJpnioTfgPFywTKfD6PwghAePuZxnBM= -github.com/hashicorp/terraform-registry-address v0.1.0 h1:W6JkV9wbum+m516rCl5/NjKxCyTVaaUBbzYcMzBDO3U= -github.com/hashicorp/terraform-registry-address v0.1.0/go.mod h1:EnyO2jYO6j29DTHbJcm00E5nQTFeTtyZH3H5ycydQ5A= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= -github.com/jhump/protoreflect v1.14.1/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= -github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= -github.com/jinzhu/copier v0.2.9 h1:v0Wnz+GKEI63FX7cU2LI20mFFlhTNl+36cjDIuhq3QY= -github.com/jinzhu/copier v0.2.9/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= -github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f h1:dKccXx7xA56UNqOcFIbuqFjAWPVtP688j5QMgmo6OHU= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f/go.mod h1:4rEELDSfUAlBSyUjPG0JnaNGjf13JySHFeRdD/3dLP0= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.6/go.mod h1:Lj5gIVxjBlH8REa3icEOkdfchwYc291nShzZ4QYWyMo= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= -github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= -github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= -github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanposhiho/wastedassign v1.0.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.4/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.4.6/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= -github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= -github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= -github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.5.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= -go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= -go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= -go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.starlark.net v0.0.0-20221205180719-3fd0dac74452 h1:JZtNuL6LPB+scU5yaQ6hqRlJFRiddZm2FwRt2AQqtHA= -go.starlark.net v0.0.0-20221205180719-3fd0dac74452/go.mod h1:kIVgS18CjmEC3PqMd5kaJSGEifyV/CeB9x506ZJ1Vbk= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= -golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= -golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= -google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.25.5 h1:mqyHf7aoaYMpdvO87mqpol+Qnsmo+y09S0PMIXwiZKo= -k8s.io/api v0.25.5/go.mod h1:RzplZX0Z8rV/WhSTfEvnyd91bBhBQTRWo85qBQwRmb8= -k8s.io/apiextensions-apiserver v0.25.5 h1:iHkMyFGzRgXO8AQlCYPVTVsKLqXvruswirIW8hRji+g= -k8s.io/apiextensions-apiserver v0.25.5/go.mod h1:TWAHgFssGm050Oe6MhN+Jaeav+ISEl9M/qWsPzq2s3k= -k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.25.5 h1:SQomYHvv+aO43qdu3QKRf9YuI0oI8w3RrOQ1qPbAUGY= -k8s.io/apimachinery v0.25.5/go.mod h1:1S2i1QHkmxc8+EZCIxe/fX5hpldVXk4gvnJInMEb8D4= -k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw= -k8s.io/cli-runtime v0.24.2/go.mod h1:1LIhKL2RblkhfG4v5lZEt7FtgFG5mVb8wqv5lE9m5qY= -k8s.io/cli-runtime v0.25.5 h1:5Q37ITYtPtSw2JQcN6EBsdOQBnGvvo/D1g93Da4ceYI= -k8s.io/cli-runtime v0.25.5/go.mod h1:o7lT2rFyfbLrQOzTFsV828OyxKsTE/FmVc3ag1nx0IU= -k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= -k8s.io/client-go v0.25.5 h1:7QWVK0Ph4bLn0UwotPTc2FTgm8shreQXyvXnnHDd8rE= -k8s.io/client-go v0.25.5/go.mod h1:bOeoaUUdpyz3WDFGo+Xm3nOQFh2KuYXRDwrvbAPtFQA= -k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= -k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0= -k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= -k8s.io/component-base v0.25.5 h1:tVni0kgpceq71MDMBSixp8Y621YGvTS/1zq3RABgX9A= -k8s.io/component-base v0.25.5/go.mod h1:9J+e9uIUwUOG2x5q5+aaOR0b8QI5OIqwqPAbeODkYpc= -k8s.io/component-helpers v0.24.2/go.mod h1:TRQPBQKfmqkmV6c0HAmUs8cXVNYYYLsXy4zu8eODi9g= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-aggregator v0.23.5 h1:UZ+qE3hGo6DcgKySf27Jg7d3X9/6JQkVLUiHZAoAfCY= -k8s.io/kube-aggregator v0.23.5/go.mod h1:3ynYx07Co6dzjpKPgipM+1/Mt2Jcm7dY++cRlKLr5s8= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kubectl v0.24.2 h1:+RfQVhth8akUmIc2Ge8krMl/pt66V7210ka3RE/p0J4= -k8s.io/kubectl v0.24.2/go.mod h1:+HIFJc0bA6Tzu5O/YcuUt45APAxnNL8LeMuXwoiGsPg= -k8s.io/metrics v0.24.2/go.mod h1:5NWURxZ6Lz5gj8TFU83+vdWIVASx7W8lwPpHYCqopMo= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= -sigs.k8s.io/kustomize/cmd/config v0.10.6/go.mod h1:/S4A4nUANUa4bZJ/Edt7ZQTyKOY9WCER0uBS1SW2Rco= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.4/go.mod h1:Zo/Xc5FKD6sHl0lilbrieeGeZHVYCA4BzxeAaLI05Bg= -sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/contrib/terraform-provider-kubeproxy/main.go b/contrib/terraform-provider-kubeproxy/main.go deleted file mode 100644 index 712c069667..0000000000 --- a/contrib/terraform-provider-kubeproxy/main.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package main provides a Terraform wrapper around kubernetes for using an IAP (Identity-Aware Proxy) when interacting with GCP resources. -// The provider wraps the original provider (e.g Helm or Kubernetes) and adds the necessary fields and functionality for configuring and using the IAP proxy. -// This allows for more fine-grained authentication and authorization of access to resources, and is especially useful for short-lived Terraform resources. -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/provider" - "log" - "os" - "time" - - "github.com/hashicorp/go-plugin" - "github.com/hashicorp/terraform-exec/tfexec" - tf5server "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" - tf5muxserver "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" -) - -const providerName = "registry.terraform.io/hashicorp/kubernetes" - -// Generate docs for website -// go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs - -func main() { - debugFlag := flag.Bool("debug", false, "Start provider in stand-alone debug mode.") - flag.Parse() - - mainProvider := provider.MainProvider().GRPCProvider - manifestProvider, err := provider.ManifestProvider() - if err != nil { - panic(err) - } - // note: manifest provider is not currently supported - - ctx := context.Background() - muxer, err := tf5muxserver.NewMuxServer(ctx, mainProvider, manifestProvider) - if err != nil { - log.Println(err.Error()) - os.Exit(1) - } - - opts := []tf5server.ServeOpt{} - if *debugFlag { - reattachConfigCh := make(chan *plugin.ReattachConfig) - go func() { - reattachConfig, err := waitForReattachConfig(reattachConfigCh) - if err != nil { - fmt.Printf("Error getting reattach config: %s\n", err) - return - } - printReattachConfig(reattachConfig) - }() - opts = append(opts, tf5server.WithDebug(ctx, reattachConfigCh, nil)) - } - - _ = tf5server.Serve(providerName, muxer.ProviderServer, opts...) -} - -// convertReattachConfig converts plugin.ReattachConfig to tfexec.ReattachConfig. -func convertReattachConfig(reattachConfig *plugin.ReattachConfig) tfexec.ReattachConfig { - return tfexec.ReattachConfig{ - Protocol: string(reattachConfig.Protocol), - Pid: reattachConfig.Pid, - Test: true, - Addr: tfexec.ReattachConfigAddr{ - Network: reattachConfig.Addr.Network(), - String: reattachConfig.Addr.String(), - }, - } -} - -// printReattachConfig prints the line the user needs to copy and paste -// to set the TF_REATTACH_PROVIDERS variable. -func printReattachConfig(config *plugin.ReattachConfig) { - reattachStr, err := json.Marshal(map[string]tfexec.ReattachConfig{ - "kubernetes": convertReattachConfig(config), - }) - if err != nil { - fmt.Printf("Error building reattach string: %s", err) - return - } - fmt.Printf("# Provider server started\nexport TF_REATTACH_PROVIDERS='%s'\n", string(reattachStr)) -} - -// waitForReattachConfig blocks until a ReattachConfig is received on the -// supplied channel or times out after 2 seconds. -func waitForReattachConfig(ch chan *plugin.ReattachConfig) (*plugin.ReattachConfig, error) { - select { - case config := <-ch: - return config, nil - case <-time.After(2 * time.Second): - return nil, fmt.Errorf("timeout while waiting for reattach configuration") - } -} diff --git a/contrib/terraform-provider-kubeproxy/provider/doc.go b/contrib/terraform-provider-kubeproxy/provider/doc.go deleted file mode 100644 index b2f6918232..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package provider provides the overwrriten kubeproxy providy -package provider diff --git a/contrib/terraform-provider-kubeproxy/provider/export_test.go b/contrib/terraform-provider-kubeproxy/provider/export_test.go deleted file mode 100644 index 39411bc445..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/export_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package provider - -import ( - "context" - "github.com/hashicorp/go-cty/cty" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// ValidateConfigNulls is a wrapper around validateConfigNulls for testing. -func ValidateConfigNulls(ctx context.Context, v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { - return validateConfigNulls(ctx, v, path) -} - -// CombinedSchema is a wrapper around combinedSchema for testing. -func (r *RawProviderServer) CombinedSchema() *tfprotov5.Schema { - return r.combinedSchema -} - -// GoogleProvider is a wrapper around googleProvider for testing. -func (r *RawProviderServer) GoogleProvider() *schema.Provider { - return r.googleProvider -} diff --git a/contrib/terraform-provider-kubeproxy/provider/main_provider.go b/contrib/terraform-provider-kubeproxy/provider/main_provider.go deleted file mode 100644 index d1d71c2f13..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/main_provider.go +++ /dev/null @@ -1,87 +0,0 @@ -package provider - -import ( - "context" - provider_diag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-kubernetes/kubernetes" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/google" - "github.com/synapsecns/sanguine/contrib/tfcore/utils" - "log" -) - -type configuredKubeProvider struct { - // googleIface is the google interface - googleIface *google.Config - // helmIface is the helm interface - kubeIface interface{} -} - -func (c configuredKubeProvider) GoogleProvider() interface{} { - return c.googleIface -} - -func (c configuredKubeProvider) UnderlyingProvider() interface{} { - return c.kubeIface -} - -var _ utils.WrappedProvider = &configuredKubeProvider{} - -// MainProvider creates the main provider for the iap tunnel. -func MainProvider() *schema.Provider { - combinedSchema := utils.CombineSchemas(google.Provider(), kubernetes.Provider(), "kubernetes", "kubeproxy") - underlyingGoogleProvider := google.Provider() - underlyingKubernetesProvider := kubernetes.Provider() - return &schema.Provider{ - Schema: combinedSchema.Schema, - ProviderMetaSchema: combinedSchema.MetaSchema, - ResourcesMap: combinedSchema.ResourceMap, - DataSourcesMap: combinedSchema.DataSourceMap, - ConfigureContextFunc: func(ctx context.Context, data *schema.ResourceData) (_ interface{}, dg provider_diag.Diagnostics) { - cp := &configuredKubeProvider{} - var gdg, hdg provider_diag.Diagnostics - var giface interface{} - var ok bool - - giface, gdg = underlyingGoogleProvider.ConfigureContextFunc(ctx, data) - if gdg.HasError() { - return nil, gdg - } - dg = append(dg, gdg...) - cp.googleIface, ok = giface.(*google.Config) - if !ok { - return nil, append(gdg, provider_diag.Diagnostic{ - Severity: provider_diag.Error, - Summary: "failed to cast google interface", - }) - } - - // TODO: the proxy_url needs to be set in here - proxyURL, err := utils.StartTunnel(ctx, data, cp.googleIface) - if err != nil { - return nil, append(gdg, provider_diag.FromErr(err)[0]) - } - - // set the proxy url - log.Printf("[INFO] setting proxy url to %s", proxyURL) - err = data.Set("proxy_url", proxyURL) - if err != nil { - return nil, append(gdg, provider_diag.FromErr(err)[0]) - } - - cp.kubeIface, hdg = underlyingKubernetesProvider.ConfigureContextFunc(ctx, data) - if hdg.HasError() { - return nil, hdg - } - dg = append(dg, hdg...) - if !ok { - return nil, append(hdg, provider_diag.Diagnostic{ - Severity: provider_diag.Error, - Summary: "failed to cast kubernetes interface", - }) - } - - return cp, dg - }, - } -} diff --git a/contrib/terraform-provider-kubeproxy/provider/manifest_provider.go b/contrib/terraform-provider-kubeproxy/provider/manifest_provider.go deleted file mode 100644 index 35d7ef46a2..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/manifest_provider.go +++ /dev/null @@ -1,262 +0,0 @@ -package provider - -import ( - "context" - gojson "encoding/json" - "fmt" - "github.com/hashicorp/go-cty/cty" - "github.com/hashicorp/go-cty/cty/json" - "github.com/hashicorp/go-cty/cty/msgpack" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - provider_diag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/hashicorp/terraform-provider-kubernetes/manifest/test/logging" - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/configschema" - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/convert" - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/manifest" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/google" - "github.com/synapsecns/sanguine/contrib/tfcore/utils" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - "log" - "os" - "strconv" - "strings" - "sync" -) - -// ManifestProvider gets the manifest provider. -func ManifestProvider() (func() tfprotov5.ProviderServer, error) { - providerSchema, err := manifest.Provider()().GetProviderSchema(context.Background(), &tfprotov5.GetProviderSchemaRequest{}) - if err != nil { - return nil, fmt.Errorf("could not get provider schema: %w", err) - } - - rawProvider := makeRawProvider() - - rawProvider.combinedSchema, err = utils.CombineProtoSchemas(context.Background(), google.Provider(), providerSchema, "", "") - if err != nil { - return nil, fmt.Errorf("could not combine schemas: %w", err) - } - - rawProvider.googleProvider = google.Provider() - rawProvider.googleProvider.Schema = utils.UpdateSchemaWithDefaults(rawProvider.googleProvider.Schema) - - return func() tfprotov5.ProviderServer { - return rawProvider - }, nil -} - -// RawProviderServer is the raw provider server. -type RawProviderServer struct { - *manifest.RawProviderServer - combinedSchema *tfprotov5.Schema - googleProvider *schema.Provider - // used for stop context - stopMu sync.Mutex - // used for stop context - stopCh chan struct{} -} - -const ( - originalProviderPrefix = "kubernetes" - replacedProviderPrefix = "kubeproxy" -) - -// GetProviderSchema returns the provider schema. -func (r *RawProviderServer) GetProviderSchema(_ context.Context, _ *tfprotov5.GetProviderSchemaRequest) (*tfprotov5.GetProviderSchemaResponse, error) { - return &tfprotov5.GetProviderSchemaResponse{ - Provider: r.combinedSchema, - // TODO: keys must be rewritten - ResourceSchemas: replaceResourceKeys(manifest.GetProviderResourceSchema(), originalProviderPrefix, replacedProviderPrefix), - DataSourceSchemas: replaceResourceKeys(manifest.GetProviderDataSourceSchema(), originalProviderPrefix, replacedProviderPrefix), - }, nil -} - -func replaceResourceKeys(keyMap map[string]*tfprotov5.Schema, toReplace, replaceWith string) map[string]*tfprotov5.Schema { - newKeyMap := make(map[string]*tfprotov5.Schema) - for key, value := range keyMap { - newKeyMap[strings.Replace(key, toReplace, replaceWith, 1)] = value - } - return newKeyMap -} - -// ConfigureProvider configures the provider and sets up the tunnel. -func (r *RawProviderServer) ConfigureProvider(ctx context.Context, req *tfprotov5.ConfigureProviderRequest) (*tfprotov5.ConfigureProviderResponse, error) { - resp := &tfprotov5.ConfigureProviderResponse{} - - // we start by adding our custom fields to the google schema - updatedSchema := utils.UpdateSchemaWithDefaults(google.Provider().Schema) - // and then converting the google schema to an internal map. This can be used by the converter module - googleSchema := schema.InternalMap(updatedSchema).CoreConfigSchema() - - // we convert the schema to a config schema - combinedConfigSchema := convert.ProtoToConfigSchema(ctx, r.combinedSchema.Block) - - // we then use that to unmarshall the config - reqConfig, err := msgpack.Unmarshal(req.Config.MsgPack, combinedConfigSchema.ImpliedType()) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) - return resp, nil - } - - // Ensure there are no nulls that will cause helper/schema to panic. - if err := validateConfigNulls(ctx, reqConfig, nil); err != nil { - resp.Diagnostics = append(resp.Diagnostics, err...) - return resp, nil - } - - // shim the tfproto config into a terraform config - config := terraform.NewResourceConfigShimmed(reqConfig, googleSchema) - ctxHack := context.WithValue(ctx, schema.StopContextKey, r.StopContext(context.Background())) - - logging.HelperSchemaTrace(ctx, "Calling downstream configure google") - // configure the google provider - r.googleProvider.ConfigureContextFunc = googConfigureContextFunc - diag := r.googleProvider.Configure(ctxHack, config) - if diag.HasError() { - resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, diag) - return resp, nil - } - - logging.HelperSchemaTrace(ctx, "Called downstream configure google") - // remove extra fields - - marshalledRequest, err := removeRequestFields(ctx, reqConfig, combinedConfigSchema, maps.Keys(updatedSchema)) - if err != nil { - resp.Diagnostics = convert.AppendProtoDiag(ctx, resp.Diagnostics, err) - } - marshalledRequest.TerraformVersion = req.TerraformVersion - - // end remove extra fields - resp, err = r.RawProviderServer.ConfigureProvider(ctx, marshalledRequest) - if err != nil { - return nil, fmt.Errorf("could not configure provider: %w", err) - } - - return resp, nil -} - -// removeRequestFields removes google specified fields from the configure provider request -// represented as cty.Value and returns a new request that can be used for the provider. -func removeRequestFields(ctx context.Context, reqConfig cty.Value, combinedConfigSchema *configschema.Block, keysToPrune []string) (*tfprotov5.ConfigureProviderRequest, error) { - // we'll start by marshaling the config to json, we'll then remove extra keys and - // convert the config back to msgpack - jsonReq, err := json.Marshal(reqConfig, combinedConfigSchema.ImpliedType()) - if err != nil { - return nil, fmt.Errorf("could not marshal config: %w", err) - } - logging.HelperSchemaTrace(ctx, "pruning google fields from config") - logging.HelperSchemaTrace(ctx, string(jsonReq)) - - var objmap map[string]gojson.RawMessage - err = gojson.Unmarshal(jsonReq, &objmap) - if err != nil { - return nil, fmt.Errorf("could not unmarshal config: %w", err) - } - - // we'll remove the google fields - for field := range objmap { - if slices.Contains(keysToPrune, field) { - delete(objmap, field) - } - if field == "proxy_url" { - objmap["proxy_url"] = gojson.RawMessage(strconv.Quote(os.Getenv("KUBE_PROXY_URL"))) - } - } - - // we'll then marshal the config back to messagepack - jsonReq, err = gojson.Marshal(objmap) - if err != nil { - return nil, fmt.Errorf("could not marshal config: %w", err) - } - - logging.HelperSchemaTrace(ctx, "pruned google fields from config") - logging.HelperSchemaTrace(ctx, string(jsonReq)) - - req := &tfprotov5.ConfigureProviderRequest{} - req.Config = &tfprotov5.DynamicValue{ - JSON: jsonReq, - } - return req, nil -} - -// googConfigureContextFunc configures the context function for google. -func googConfigureContextFunc(ctx context.Context, d *schema.ResourceData) (_ interface{}, gdg provider_diag.Diagnostics) { - gface, googleDiagnostics := google.Provider().ConfigureContextFunc(ctx, d) - gdg = append(gdg, googleDiagnostics...) - if gdg.HasError() { - return nil, gdg - } - - googleConfig, ok := gface.(*google.Config) - if !ok { - return nil, append(gdg, provider_diag.Diagnostic{ - Severity: provider_diag.Error, - Summary: "failed to cast google interface", - }) - } - // TODO: the proxy_url needs to be set in here - proxyURL, err := utils.StartTunnel(ctx, d, googleConfig) - if err != nil { - return nil, append(gdg, provider_diag.FromErr(err)[0]) - } - - // set the proxy url - log.Printf("[INFO] setting proxy url to %s", proxyURL) - err = os.Setenv("KUBE_PROXY_URL", proxyURL) - if err != nil { - return nil, append(gdg, provider_diag.FromErr(err)[0]) - } - return gface, gdg -} - -// StopContext derives a new context from the passed in grpc context. -// It creates a goroutine to wait for the server stop and propagates -// cancellation to the derived grpc context. -func (r *RawProviderServer) StopContext(ctx context.Context) context.Context { - ctx = logging.InitContext(ctx) - r.stopMu.Lock() - defer r.stopMu.Unlock() - - stoppable, cancel := context.WithCancel(ctx) - go mergeStop(stoppable, cancel, r.stopCh) - return stoppable -} - -// mergeStop is called in a goroutine and waits for the global stop signal -// and propagates cancellation to the passed in ctx/cancel func. The ctx is -// also passed to this function and waited upon so no goroutine leak is caused. -func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { - select { - case <-ctx.Done(): - return - case <-stopCh: - cancel() - } -} - -// makeRawProvider makes a raw provider. -func makeRawProvider() *RawProviderServer { - var logLevel string - var ok = false - for _, ev := range []string{"TF_LOG_PROVIDER_KUBERNETES", "TF_LOG_PROVIDER", "TF_LOG"} { - logLevel, ok = os.LookupEnv(ev) - if ok { - break - } - } - if !ok { - logLevel = "off" - } - - rawProvider := &manifest.RawProviderServer{} - rawProvider.SetLogger(hclog.New(&hclog.LoggerOptions{ - Level: hclog.LevelFromString(logLevel), - Output: os.Stderr, - })) - - return &RawProviderServer{RawProviderServer: rawProvider} -} diff --git a/contrib/terraform-provider-kubeproxy/provider/provider_test.go b/contrib/terraform-provider-kubeproxy/provider/provider_test.go deleted file mode 100644 index 90e4ba0e8c..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/provider_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package provider_test - -import ( - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/provider" - "testing" -) - -func TestManifestProvider(t *testing.T) { - fn, err := provider.ManifestProvider() - if err != nil { - t.Fatalf("error creating manifest prov: %v", err) - } - - server := fn() - - prov, ok := server.(*provider.RawProviderServer) - if !ok { - t.Fatalf("incorrect prov type: %T", server) - } - - if prov.CombinedSchema() == nil { - t.Fatalf("combined schema should not be nil") - } - if prov.GoogleProvider() == nil { - t.Fatalf("google prov should not be nil") - } - if prov.RawProviderServer == nil { - t.Fatalf("raw prov server should not be nil") - } -} diff --git a/contrib/terraform-provider-kubeproxy/provider/resource.go b/contrib/terraform-provider-kubeproxy/provider/resource.go deleted file mode 100644 index 5dc93ad648..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/resource.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package provider is a shim for the Package method of the underlying provider. -// nolint: wrapcheck -package provider - -import ( - "context" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "strings" -) - -// ReadDataSource is a shim for the ReadDataSource method of the underlying provider. -func (r *RawProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5.ReadDataSourceRequest) (*tfprotov5.ReadDataSourceResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.ReadDataSource(ctx, req) -} - -// ValidateDataSourceConfig is a shim for the ValidateDataSourceConfig method of the underlying provider. -func (r *RawProviderServer) ValidateDataSourceConfig(ctx context.Context, req *tfprotov5.ValidateDataSourceConfigRequest) (*tfprotov5.ValidateDataSourceConfigResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.ValidateDataSourceConfig(ctx, req) -} - -// ValidateResourceTypeConfig is a shim for the ValidateProviderConfig method of the underlying provider. -func (r *RawProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *tfprotov5.ValidateResourceTypeConfigRequest) (*tfprotov5.ValidateResourceTypeConfigResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.ValidateResourceTypeConfig(ctx, req) -} - -// UpgradeResourceState is a shim for the UpgradeResourceState method of the underlying provider. -func (r *RawProviderServer) UpgradeResourceState(ctx context.Context, req *tfprotov5.UpgradeResourceStateRequest) (*tfprotov5.UpgradeResourceStateResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.UpgradeResourceState(ctx, req) -} - -// ReadResource is a shim for the ReadResource method of the underlying provider. -func (r *RawProviderServer) ReadResource(ctx context.Context, req *tfprotov5.ReadResourceRequest) (*tfprotov5.ReadResourceResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.ReadResource(ctx, req) -} - -// PlanResourceChange is a shim for the PlanResourceChange method of the underlying provider. -func (r *RawProviderServer) PlanResourceChange(ctx context.Context, req *tfprotov5.PlanResourceChangeRequest) (*tfprotov5.PlanResourceChangeResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.PlanResourceChange(ctx, req) -} - -// ApplyResourceChange is a shim for the ApplyResourceChange method of the underlying provider. -func (r *RawProviderServer) ApplyResourceChange(ctx context.Context, req *tfprotov5.ApplyResourceChangeRequest) (*tfprotov5.ApplyResourceChangeResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.ApplyResourceChange(ctx, req) -} - -// ImportResourceState is a shim for the ImportResourceState method of the underlying provider. -func (r *RawProviderServer) ImportResourceState(ctx context.Context, req *tfprotov5.ImportResourceStateRequest) (*tfprotov5.ImportResourceStateResponse, error) { - req.TypeName = strings.Replace(req.TypeName, replacedProviderPrefix, originalProviderPrefix, 1) - - return r.RawProviderServer.ImportResourceState(ctx, req) -} diff --git a/contrib/terraform-provider-kubeproxy/provider/validate.go b/contrib/terraform-provider-kubeproxy/provider/validate.go deleted file mode 100644 index a24636bcc3..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/validate.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package provider is the Terraform provider for Kubernetes -// nolint -package provider - -import ( - "context" - "github.com/hashicorp/go-cty/cty" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/convert" -) - -// validateConfigNulls checks a config value for unsupported nulls before -// attempting to shim the value. While null values can mostly be ignored in the -// configuration, since they're not supported in HCL1, the case where a null -// appears in a list-like attribute (list, set, tuple) will present a nil value -// to helper/schema which can panic. Return an error to the user in this case, -// indicating the attribute with the null value. -// this function is copied from the grpc provider server with some modifications to avoid use of internals -func validateConfigNulls(ctx context.Context, v cty.Value, path cty.Path) []*tfprotov5.Diagnostic { - var diags []*tfprotov5.Diagnostic - if v.IsNull() || !v.IsKnown() { - return diags - } - - switch { - case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - if ev.IsNull() { - // if this is a set, the kv is also going to be null which - // isn't a valid path element, so we can't append it to the - // diagnostic. - p := path - if !kv.IsNull() { - p = append(p, cty.IndexStep{Key: kv}) - } - - diags = append(diags, &tfprotov5.Diagnostic{ - Severity: tfprotov5.DiagnosticSeverityError, - Summary: "Null value found in list", - Detail: "Null values are not allowed for this attribute value.", - Attribute: convert.PathToAttributePath(p), - }) - continue - } - - d := validateConfigNulls(ctx, ev, append(path, cty.IndexStep{Key: kv})) - diags = append(diags, d...) - } - - case v.Type().IsMapType() || v.Type().IsObjectType(): - it := v.ElementIterator() - for it.Next() { - kv, ev := it.Element() - var step cty.PathStep - switch { - case v.Type().IsMapType(): - step = cty.IndexStep{Key: kv} - case v.Type().IsObjectType(): - step = cty.GetAttrStep{Name: kv.AsString()} - } - d := validateConfigNulls(ctx, ev, append(path, step)) - diags = append(diags, d...) - } - } - - return diags -} diff --git a/contrib/terraform-provider-kubeproxy/provider/validate_test.go b/contrib/terraform-provider-kubeproxy/provider/validate_test.go deleted file mode 100644 index c3e5ffbc83..0000000000 --- a/contrib/terraform-provider-kubeproxy/provider/validate_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package provider_test - -import ( - "context" - "github.com/hashicorp/go-cty/cty" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/generated/convert" - "github.com/synapsecns/sanguine/contrib/terraform-provider-kubeproxy/provider" - "strconv" - "testing" -) - -// Copied from grpc provider to test parity. -func TestValidateNulls(t *testing.T) { - for i, tc := range []struct { - Cfg cty.Value - Err bool - }{ - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - Err: true, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "map": cty.MapVal(map[string]cty.Value{ - "string": cty.StringVal("string"), - "null": cty.NullVal(cty.String), - }), - }), - Err: false, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - }), - Err: true, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "list": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - "list2": cty.ListVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - }), - Err: true, - }, - { - Cfg: cty.ObjectVal(map[string]cty.Value{ - "object": cty.ObjectVal(map[string]cty.Value{ - "list": cty.SetVal([]cty.Value{ - cty.StringVal("string"), - cty.NullVal(cty.String), - }), - }), - }), - Err: true, - }, - } { - tc := tc // capture func literal - t.Run(strconv.Itoa(i), func(t *testing.T) { - d := provider.ValidateConfigNulls(context.Background(), tc.Cfg, nil) - diags := convert.ProtoToDiags(d) - switch { - case tc.Err: - if !diags.HasError() { - t.Fatal("expected error") - } - default: - for _, d := range diags { - if d.Severity == diag.Error { - t.Fatalf("unexpected error: %q", d) - } - } - } - }) - } -} diff --git a/contrib/terraform-provider-kubeproxy/scripts/add-tfmac.sh b/contrib/terraform-provider-kubeproxy/scripts/add-tfmac.sh deleted file mode 100755 index 4067060a39..0000000000 --- a/contrib/terraform-provider-kubeproxy/scripts/add-tfmac.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/zsh - -# if not already present in zshrc -if [ "$(grep -c -w "alias tfmac='TFENV_ARCH=arm64 TFENV_TERRAFORM_VERSION=latest:^1.3 terraform'" ~/.zshrc)" -le 0 ]; then - echo "adding tfmac command to zshrc. You might have to source ~/.zshrc or open a new tab" - echo "alias tfmac='TFENV_ARCH=arm64 TFENV_TERRAFORM_VERSION=latest:^1.3 terraform'" >> ~/.zshrc -fi diff --git a/contrib/terraform-provider-kubeproxy/scripts/build-tf.sh b/contrib/terraform-provider-kubeproxy/scripts/build-tf.sh deleted file mode 100755 index c459bee1c0..0000000000 --- a/contrib/terraform-provider-kubeproxy/scripts/build-tf.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2086 - -# This gets the arch prefix we use when building a terraform plugin -TF_PREFIX=$( go version | awk '{print $NF}' | sed 's/\//_/') - -# define the plugin directory -PLUGIN_DIR=$(realpath -m ~/.terraform.d/plugins/example-kube.com/provider/kubeproxy/1.0.0/$TF_PREFIX) - -# fixes async problems on arm64 https://github.com/hashicorp/terraform-provider-aws/issues/20274#issuecomment-996795241 -# we don't need this for production builds, just darwinarm64. -GODEBUG=asyncpreemptoff=1 go build . - -# make the plugin directory if it doesn't exist -rm -rf $PLUGIN_DIR -mkdir -p $PLUGIN_DIR -cp terraform-provider-kubeproxy $PLUGIN_DIR diff --git a/contrib/terraform-provider-kubeproxy/terraform-registry-manifest.json b/contrib/terraform-provider-kubeproxy/terraform-registry-manifest.json deleted file mode 100644 index 295001a07f..0000000000 --- a/contrib/terraform-provider-kubeproxy/terraform-registry-manifest.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "version": 1, - "metadata": { - "protocol_versions": ["6.0"] - } -} diff --git a/contrib/tfcore/.goreleaser.yml b/contrib/tfcore/.goreleaser.yml deleted file mode 100644 index 4133ea2405..0000000000 --- a/contrib/tfcore/.goreleaser.yml +++ /dev/null @@ -1,45 +0,0 @@ -project_name: tfcore - -monorepo: - tag_prefix: contrib/tfcore/ - dir: contrib/tfcore/ - -builds: - - skip: true - -# add a source archive at release time -source: - enabled: true - -# Archives -archives: - - format: tar.gz - wrap_in_directory: true - format_overrides: - - goos: windows - format: zip - name_template: '{{.ProjectName}}-{{.Version}}_{{.Os}}_{{.Arch}}' - files: - - README.md - -checksum: - name_template: checksums.txt - -# Add a changelog -changelog: - sort: asc - -# track sizes -report_sizes: true - -# modified timestamps -metadata: - # Set the modified timestamp on the metadata files. - # - # Templates: allowed. - mod_timestamp: '{{ .CommitTimestamp }}' - -# produce software bill of lading -sboms: - - artifacts: archive - diff --git a/contrib/tfcore/Makefile b/contrib/tfcore/Makefile deleted file mode 120000 index 15e4536f4b..0000000000 --- a/contrib/tfcore/Makefile +++ /dev/null @@ -1 +0,0 @@ -../../make/go.Makefile \ No newline at end of file diff --git a/contrib/tfcore/README.md b/contrib/tfcore/README.md deleted file mode 100644 index 466544215b..0000000000 --- a/contrib/tfcore/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Terraform Core - -[![Go Reference](https://pkg.go.dev/badge/github.com/synapsecns/sanguine/contrib/tfcore.svg)](https://pkg.go.dev/github.com/synapsecns/sanguine/contrib/tfcore) -[![Go Report Card](https://goreportcard.com/badge/github.com/synapsecns/sanguine/contrib/tfcore)](https://goreportcard.com/report/github.com/synapsecns/sanguine/contrib/tfcore) - -This directory contains the common utilities used by different terraform providers diff --git a/contrib/tfcore/generated/google/doc.go b/contrib/tfcore/generated/google/doc.go deleted file mode 100644 index 6c2b695290..0000000000 --- a/contrib/tfcore/generated/google/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package google is a generated bundle file that copies the google module with some additional exports we need for our provider. -package google diff --git a/contrib/tfcore/generated/google/generate.go b/contrib/tfcore/generated/google/generate.go deleted file mode 100644 index 22a00002d8..0000000000 --- a/contrib/tfcore/generated/google/generate.go +++ /dev/null @@ -1,12 +0,0 @@ -package google - -import _ "golang.org/x/tools/benchmark/parse" - -// required by go:generate -import _ "golang.org/x/mod/semver" - -// required for copying the module -import _ "github.com/hashicorp/terraform-provider-google/v4/google" - -// Note: we can't actually exclude this module from codeanalysis since we import it -//go:generate go run github.com/synapsecns/sanguine/tools/bundle -prefix "" -pkg google -o google_gen.go github.com/hashicorp/terraform-provider-google/v4/google diff --git a/contrib/tfcore/generated/google/getters.go b/contrib/tfcore/generated/google/getters.go deleted file mode 100644 index e8b99e63b3..0000000000 --- a/contrib/tfcore/generated/google/getters.go +++ /dev/null @@ -1,15 +0,0 @@ -package google - -import ( - "context" - "golang.org/x/oauth2" -) - -// GetTokenSource exports the token source for use by the provider -func (c *Config) GetTokenSource() oauth2.TokenSource { - return c.tokenSource -} - -func (c *Config) GetContext() context.Context { - return c.context -} diff --git a/contrib/tfcore/generated/google/google_gen.go b/contrib/tfcore/generated/google/google_gen.go deleted file mode 100644 index 200202cf98..0000000000 --- a/contrib/tfcore/generated/google/google_gen.go +++ /dev/null @@ -1,255823 +0,0 @@ -// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. -// nolint -package google - -import ( - compute_operation_bytes "bytes" - data_source_storage_object_signed_url_bytes "bytes" - deployment_manager_operation_bytes "bytes" - resource_binary_authorization_policy_bytes "bytes" - resource_compute_backend_service_bytes "bytes" - resource_compute_firewall_bytes "bytes" - resource_compute_vpn_tunnel_bytes "bytes" - resource_dns_managed_zone_bytes "bytes" - resource_dns_policy_bytes "bytes" - resource_folder_access_approval_settings_bytes "bytes" - resource_sourcerepo_repository_bytes "bytes" - resource_storage_bucket_bytes "bytes" - resource_storage_bucket_object_bytes "bytes" - retry_transport_bytes "bytes" - sqladmin_operation_bytes "bytes" - transport_bytes "bytes" - batcher_context "context" - bigtable_client_factory_context "context" - compute_operation_context "context" - config_context "context" - container_operation_context "context" - mtls_util_context "context" - provider_context "context" - resource_app_engine_application_context "context" - resource_assured_workloads_workload_context "context" - resource_bigquery_data_transfer_config_context "context" - resource_bigquery_table_context "context" - resource_bigtable_gc_policy_context "context" - resource_bigtable_instance_context "context" - resource_bigtable_instance_migrate_context "context" - resource_bigtable_table_context "context" - resource_billing_budget_context "context" - resource_cloud_run_service_context "context" - resource_cloud_scheduler_job_context "context" - resource_cloudbuild_trigger_context "context" - resource_compute_disk_context "context" - resource_compute_firewall_context "context" - resource_compute_firewall_policy_association_context "context" - resource_compute_firewall_policy_context "context" - resource_compute_firewall_policy_rule_context "context" - resource_compute_forwarding_rule_context "context" - resource_compute_global_forwarding_rule_context "context" - resource_compute_health_check_context "context" - resource_compute_instance_context "context" - resource_compute_instance_template_context "context" - resource_compute_region_backend_service_context "context" - resource_compute_router_context "context" - resource_compute_router_nat_context "context" - resource_compute_security_policy_context "context" - resource_compute_ssl_policy_context "context" - resource_compute_subnetwork_context "context" - resource_container_cluster_context "context" - resource_dataflow_job_context "context" - resource_dataproc_workflow_template_context "context" - resource_deployment_manager_deployment_context "context" - resource_endpoints_service_context "context" - resource_eventarc_trigger_context "context" - resource_google_project_context "context" - resource_kms_crypto_key_context "context" - resource_logging_project_sink_context "context" - resource_monitoring_notification_channel_context "context" - resource_org_policy_policy_context "context" - resource_privateca_certificate_template_context "context" - resource_redis_instance_context "context" - resource_spanner_database_context "context" - resource_sql_database_instance_context "context" - resource_storage_bucket_acl_context "context" - resource_storage_bucket_context "context" - resource_storage_object_acl_context "context" - resource_tpu_node_context "context" - resource_workflows_workflow_context "context" - retry_transport_context "context" - data_source_storage_object_signed_url_crypto "crypto" - resource_storage_bucket_object_md5 "crypto/md5" - data_source_storage_object_signed_url_rand "crypto/rand" - data_source_storage_object_signed_url_rsa "crypto/rsa" - common_diff_suppress_sha256 "crypto/sha256" - data_source_storage_object_signed_url_sha256 "crypto/sha256" - resource_compute_instance_sha256 "crypto/sha256" - resource_storage_bucket_object_sha256 "crypto/sha256" - data_source_storage_object_signed_url_x509 "crypto/x509" - data_source_google_kms_secret_base64 "encoding/base64" - data_source_google_kms_secret_ciphertext_base64 "encoding/base64" - data_source_secret_manager_secret_version_base64 "encoding/base64" - data_source_storage_object_signed_url_base64 "encoding/base64" - resource_compute_instance_base64 "encoding/base64" - resource_endpoints_service_base64 "encoding/base64" - resource_endpoints_service_migration_base64 "encoding/base64" - resource_kms_secret_ciphertext_base64 "encoding/base64" - resource_secret_manager_secret_version_base64 "encoding/base64" - resource_storage_bucket_object_base64 "encoding/base64" - validation_base64 "encoding/base64" - common_diff_suppress_hex "encoding/hex" - access_context_manager_operation_json "encoding/json" - active_directory_operation_json "encoding/json" - apigee_operation_json "encoding/json" - appengine_operation_json "encoding/json" - convert_json "encoding/json" - data_source_google_iam_policy_json "encoding/json" - data_source_google_netblock_ip_ranges_json "encoding/json" - datastore_operation_json "encoding/json" - dialogflow_cx_operation_json "encoding/json" - filestore_operation_json "encoding/json" - firestore_operation_json "encoding/json" - game_services_operation_json "encoding/json" - gke_hub_operation_json "encoding/json" - iam_json "encoding/json" - memcache_operation_json "encoding/json" - ml_engine_operation_json "encoding/json" - network_management_operation_json "encoding/json" - notebooks_operation_json "encoding/json" - privateca_operation_json "encoding/json" - redis_operation_json "encoding/json" - resource_bigquery_routine_json "encoding/json" - resource_bigquery_table_json "encoding/json" - resource_data_catalog_entry_json "encoding/json" - resource_endpoints_service_json "encoding/json" - resource_firestore_document_json "encoding/json" - resource_google_folder_json "encoding/json" - resource_healthcare_hl7_v2_store_json "encoding/json" - resource_iam_policy_json "encoding/json" - resource_manager_operation_json "encoding/json" - service_usage_operation_json "encoding/json" - serviceusage_operation_json "encoding/json" - spanner_operation_json "encoding/json" - tags_operation_json "encoding/json" - tpu_operation_json "encoding/json" - transport_json "encoding/json" - vertex_ai_operation_json "encoding/json" - vpc_access_operation_json "encoding/json" - workflows_operation_json "encoding/json" - data_source_storage_object_signed_url_pem "encoding/pem" - compute_operation_errors "errors" - container_operation_errors "errors" - data_source_compute_network_endpoint_group_errors "errors" - data_source_google_compute_instance_group_errors "errors" - data_source_storage_object_signed_url_errors "errors" - iam_bigquery_dataset_errors "errors" - metadata_errors "errors" - resource_bigquery_table_errors "errors" - resource_compute_instance_errors "errors" - resource_dataproc_cluster_errors "errors" - resource_endpoints_service_errors "errors" - resource_iam_audit_config_errors "errors" - resource_iam_binding_errors "errors" - resource_iam_member_errors "errors" - resource_iam_policy_errors "errors" - resource_logging_project_sink_errors "errors" - resource_sql_database_instance_errors "errors" - resource_storage_bucket_errors "errors" - retry_transport_errors "errors" - self_link_helpers_errors "errors" - test_utils_errors "errors" - transport_errors "errors" - access_context_manager_operation_fmt "fmt" - active_directory_operation_fmt "fmt" - apigee_operation_fmt "fmt" - appengine_operation_fmt "fmt" - batcher_fmt "fmt" - cloudfunctions_operation_fmt "fmt" - cloudrun_polling_fmt "fmt" - common_operation_fmt "fmt" - common_polling_fmt "fmt" - composer_operation_fmt "fmt" - compute_instance_helpers_fmt "fmt" - compute_instance_network_interface_helpers_fmt "fmt" - compute_operation_fmt "fmt" - config_fmt "fmt" - container_operation_fmt "fmt" - data_google_game_services_game_server_deployment_rollout_fmt "fmt" - data_source_cloud_identity_group_memberships_fmt "fmt" - data_source_cloud_identity_groups_fmt "fmt" - data_source_cloud_run_locations_fmt "fmt" - data_source_cloud_run_service_fmt "fmt" - data_source_compute_lb_ip_ranges_fmt "fmt" - data_source_compute_network_endpoint_group_fmt "fmt" - data_source_container_registry_image_fmt "fmt" - data_source_container_registry_repository_fmt "fmt" - data_source_dns_keys_fmt "fmt" - data_source_dns_managed_zone_fmt "fmt" - data_source_google_active_folder_fmt "fmt" - data_source_google_app_engine_default_service_account_fmt "fmt" - data_source_google_bigquery_default_service_account_fmt "fmt" - data_source_google_billing_account_fmt "fmt" - data_source_google_client_config_fmt "fmt" - data_source_google_client_openid_userinfo_fmt "fmt" - data_source_google_composer_environment_fmt "fmt" - data_source_google_composer_image_versions_fmt "fmt" - data_source_google_compute_address_fmt "fmt" - data_source_google_compute_backend_bucket_fmt "fmt" - data_source_google_compute_backend_service_fmt "fmt" - data_source_google_compute_default_service_account_fmt "fmt" - data_source_google_compute_forwarding_rule_fmt "fmt" - data_source_google_compute_global_address_fmt "fmt" - data_source_google_compute_ha_vpn_gateway_fmt "fmt" - data_source_google_compute_image_fmt "fmt" - data_source_google_compute_instance_fmt "fmt" - data_source_google_compute_instance_group_fmt "fmt" - data_source_google_compute_instance_serial_port_fmt "fmt" - data_source_google_compute_instance_template_fmt "fmt" - data_source_google_compute_network_fmt "fmt" - data_source_google_compute_node_types_fmt "fmt" - data_source_google_compute_region_instance_group_fmt "fmt" - data_source_google_compute_region_ssl_certificate_fmt "fmt" - data_source_google_compute_regions_fmt "fmt" - data_source_google_compute_resource_policy_fmt "fmt" - data_source_google_compute_router_status_fmt "fmt" - data_source_google_compute_ssl_certificate_fmt "fmt" - data_source_google_compute_ssl_policy_fmt "fmt" - data_source_google_compute_subnetwork_fmt "fmt" - data_source_google_compute_vpn_gateway_fmt "fmt" - data_source_google_compute_zones_fmt "fmt" - data_source_google_container_engine_versions_fmt "fmt" - data_source_google_folder_fmt "fmt" - data_source_google_folder_organization_policy_fmt "fmt" - data_source_google_global_compute_forwarding_rule_fmt "fmt" - data_source_google_iam_policy_fmt "fmt" - data_source_google_iam_role_fmt "fmt" - data_source_google_iam_testable_permissions_fmt "fmt" - data_source_google_kms_crypto_key_version_fmt "fmt" - data_source_google_kms_secret_ciphertext_fmt "fmt" - data_source_google_kms_secret_fmt "fmt" - data_source_google_monitoring_uptime_check_ips_fmt "fmt" - data_source_google_netblock_ip_ranges_fmt "fmt" - data_source_google_organization_fmt "fmt" - data_source_google_project_fmt "fmt" - data_source_google_project_organization_policy_fmt "fmt" - data_source_google_projects_fmt "fmt" - data_source_google_service_account_access_token_fmt "fmt" - data_source_google_service_account_fmt "fmt" - data_source_google_service_account_id_token_fmt "fmt" - data_source_google_service_account_key_fmt "fmt" - data_source_google_sql_ca_certs_fmt "fmt" - data_source_google_storage_bucket_object_fmt "fmt" - data_source_google_storage_project_service_account_fmt "fmt" - data_source_google_storage_transfer_project_service_account_fmt "fmt" - data_source_iap_client_fmt "fmt" - data_source_monitoring_notification_channel_fmt "fmt" - data_source_monitoring_service_fmt "fmt" - data_source_pubsub_topic_fmt "fmt" - data_source_secret_manager_secret_fmt "fmt" - data_source_secret_manager_secret_version_fmt "fmt" - data_source_sourcerepo_repository_fmt "fmt" - data_source_spanner_instance_fmt "fmt" - data_source_sql_backup_run_fmt "fmt" - data_source_storage_bucket_object_content_fmt "fmt" - data_source_storage_object_signed_url_fmt "fmt" - data_source_tpu_tensorflow_versions_fmt "fmt" - dataproc_cluster_operation_fmt "fmt" - dataproc_job_operation_fmt "fmt" - datastore_operation_fmt "fmt" - dcl_logger_fmt "fmt" - deployment_manager_operation_fmt "fmt" - dialogflow_cx_operation_fmt "fmt" - error_retry_predicates_fmt "fmt" - field_helpers_fmt "fmt" - filestore_operation_fmt "fmt" - firestore_operation_fmt "fmt" - game_services_operation_fmt "fmt" - gke_hub_operation_fmt "fmt" - healthcare_utils_fmt "fmt" - iam_batching_fmt "fmt" - iam_bigquery_dataset_fmt "fmt" - iam_bigquery_table_fmt "fmt" - iam_bigtable_instance_fmt "fmt" - iam_bigtable_table_fmt "fmt" - iam_billing_account_fmt "fmt" - iam_binary_authorization_attestor_fmt "fmt" - iam_cloud_run_service_fmt "fmt" - iam_cloudfunctions_function_fmt "fmt" - iam_compute_disk_fmt "fmt" - iam_compute_image_fmt "fmt" - iam_compute_instance_fmt "fmt" - iam_compute_region_disk_fmt "fmt" - iam_compute_subnetwork_fmt "fmt" - iam_data_catalog_entry_group_fmt "fmt" - iam_data_catalog_tag_template_fmt "fmt" - iam_dataproc_cluster_fmt "fmt" - iam_dataproc_job_fmt "fmt" - iam_endpoints_service_fmt "fmt" - iam_fmt "fmt" - iam_folder_fmt "fmt" - iam_healthcare_consent_store_fmt "fmt" - iam_healthcare_dataset_fmt "fmt" - iam_healthcare_dicom_store_fmt "fmt" - iam_healthcare_fhir_store_fmt "fmt" - iam_healthcare_hl7_v2_store_fmt "fmt" - iam_iap_app_engine_service_fmt "fmt" - iam_iap_app_engine_version_fmt "fmt" - iam_iap_tunnel_fmt "fmt" - iam_iap_tunnel_instance_fmt "fmt" - iam_iap_web_backend_service_fmt "fmt" - iam_iap_web_fmt "fmt" - iam_iap_web_type_app_engine_fmt "fmt" - iam_iap_web_type_compute_fmt "fmt" - iam_kms_crypto_key_fmt "fmt" - iam_kms_key_ring_fmt "fmt" - iam_notebooks_instance_fmt "fmt" - iam_organization_fmt "fmt" - iam_privateca_ca_pool_fmt "fmt" - iam_project_fmt "fmt" - iam_pubsub_subscription_fmt "fmt" - iam_pubsub_topic_fmt "fmt" - iam_secret_manager_secret_fmt "fmt" - iam_service_account_fmt "fmt" - iam_sourcerepo_repository_fmt "fmt" - iam_spanner_database_fmt "fmt" - iam_spanner_instance_fmt "fmt" - iam_storage_bucket_fmt "fmt" - iam_tags_tag_key_fmt "fmt" - iam_tags_tag_value_fmt "fmt" - image_fmt "fmt" - import_fmt "fmt" - kms_utils_fmt "fmt" - logging_exclusion_billing_account_fmt "fmt" - logging_exclusion_folder_fmt "fmt" - logging_exclusion_organization_fmt "fmt" - logging_exclusion_project_fmt "fmt" - logging_utils_fmt "fmt" - memcache_operation_fmt "fmt" - metadata_fmt "fmt" - ml_engine_operation_fmt "fmt" - mtls_util_fmt "fmt" - network_management_operation_fmt "fmt" - network_services_operation_fmt "fmt" - notebooks_operation_fmt "fmt" - orgpolicy_utils_fmt "fmt" - privateca_operation_fmt "fmt" - provider_fmt "fmt" - pubsub_utils_fmt "fmt" - redis_operation_fmt "fmt" - regional_utils_fmt "fmt" - resource_access_context_manager_access_level_condition_fmt "fmt" - resource_access_context_manager_access_level_fmt "fmt" - resource_access_context_manager_access_levels_fmt "fmt" - resource_access_context_manager_access_policy_fmt "fmt" - resource_access_context_manager_gcp_user_access_binding_fmt "fmt" - resource_access_context_manager_service_perimeter_fmt "fmt" - resource_access_context_manager_service_perimeter_resource_fmt "fmt" - resource_access_context_manager_service_perimeters_fmt "fmt" - resource_active_directory_domain_fmt "fmt" - resource_active_directory_domain_trust_fmt "fmt" - resource_apigee_envgroup_attachment_fmt "fmt" - resource_apigee_envgroup_fmt "fmt" - resource_apigee_environment_fmt "fmt" - resource_apigee_instance_attachment_fmt "fmt" - resource_apigee_instance_fmt "fmt" - resource_apigee_organization_fmt "fmt" - resource_app_engine_application_fmt "fmt" - resource_app_engine_application_url_dispatch_rules_fmt "fmt" - resource_app_engine_domain_mapping_fmt "fmt" - resource_app_engine_firewall_rule_fmt "fmt" - resource_app_engine_flexible_app_version_fmt "fmt" - resource_app_engine_service_network_settings_fmt "fmt" - resource_app_engine_service_split_traffic_fmt "fmt" - resource_app_engine_standard_app_version_fmt "fmt" - resource_assured_workloads_workload_fmt "fmt" - resource_bigquery_data_transfer_config_fmt "fmt" - resource_bigquery_dataset_access_fmt "fmt" - resource_bigquery_dataset_fmt "fmt" - resource_bigquery_job_fmt "fmt" - resource_bigquery_reservation_fmt "fmt" - resource_bigquery_routine_fmt "fmt" - resource_bigquery_table_fmt "fmt" - resource_bigtable_app_profile_fmt "fmt" - resource_bigtable_gc_policy_fmt "fmt" - resource_bigtable_instance_fmt "fmt" - resource_bigtable_table_fmt "fmt" - resource_billing_budget_fmt "fmt" - resource_binary_authorization_attestor_fmt "fmt" - resource_binary_authorization_policy_fmt "fmt" - resource_cloud_asset_folder_feed_fmt "fmt" - resource_cloud_asset_organization_feed_fmt "fmt" - resource_cloud_asset_project_feed_fmt "fmt" - resource_cloud_identity_group_fmt "fmt" - resource_cloud_identity_group_membership_fmt "fmt" - resource_cloud_run_domain_mapping_fmt "fmt" - resource_cloud_run_service_fmt "fmt" - resource_cloud_scheduler_job_fmt "fmt" - resource_cloud_tasks_queue_fmt "fmt" - resource_cloudbuild_trigger_fmt "fmt" - resource_cloudfunctions_function_fmt "fmt" - resource_cloudiot_device_fmt "fmt" - resource_cloudiot_registry_fmt "fmt" - resource_composer_environment_fmt "fmt" - resource_compute_address_fmt "fmt" - resource_compute_attached_disk_fmt "fmt" - resource_compute_autoscaler_fmt "fmt" - resource_compute_backend_bucket_fmt "fmt" - resource_compute_backend_bucket_signed_url_key_fmt "fmt" - resource_compute_backend_service_fmt "fmt" - resource_compute_backend_service_signed_url_key_fmt "fmt" - resource_compute_disk_fmt "fmt" - resource_compute_disk_resource_policy_attachment_fmt "fmt" - resource_compute_external_vpn_gateway_fmt "fmt" - resource_compute_firewall_fmt "fmt" - resource_compute_firewall_migrate_fmt "fmt" - resource_compute_firewall_policy_association_fmt "fmt" - resource_compute_firewall_policy_fmt "fmt" - resource_compute_firewall_policy_rule_fmt "fmt" - resource_compute_forwarding_rule_fmt "fmt" - resource_compute_global_address_fmt "fmt" - resource_compute_global_forwarding_rule_fmt "fmt" - resource_compute_global_network_endpoint_fmt "fmt" - resource_compute_global_network_endpoint_group_fmt "fmt" - resource_compute_ha_vpn_gateway_fmt "fmt" - resource_compute_health_check_fmt "fmt" - resource_compute_http_health_check_fmt "fmt" - resource_compute_https_health_check_fmt "fmt" - resource_compute_image_fmt "fmt" - resource_compute_instance_fmt "fmt" - resource_compute_instance_from_template_fmt "fmt" - resource_compute_instance_group_fmt "fmt" - resource_compute_instance_group_manager_fmt "fmt" - resource_compute_instance_group_migrate_fmt "fmt" - resource_compute_instance_group_named_port_fmt "fmt" - resource_compute_instance_migrate_fmt "fmt" - resource_compute_instance_template_fmt "fmt" - resource_compute_instance_template_migrate_fmt "fmt" - resource_compute_interconnect_attachment_fmt "fmt" - resource_compute_managed_ssl_certificate_fmt "fmt" - resource_compute_network_endpoint_fmt "fmt" - resource_compute_network_endpoint_group_fmt "fmt" - resource_compute_network_fmt "fmt" - resource_compute_network_peering_fmt "fmt" - resource_compute_network_peering_routes_config_fmt "fmt" - resource_compute_node_group_fmt "fmt" - resource_compute_node_template_fmt "fmt" - resource_compute_packet_mirroring_fmt "fmt" - resource_compute_per_instance_config_fmt "fmt" - resource_compute_project_default_network_tier_fmt "fmt" - resource_compute_project_metadata_fmt "fmt" - resource_compute_project_metadata_item_fmt "fmt" - resource_compute_region_autoscaler_fmt "fmt" - resource_compute_region_backend_service_fmt "fmt" - resource_compute_region_disk_fmt "fmt" - resource_compute_region_disk_resource_policy_attachment_fmt "fmt" - resource_compute_region_health_check_fmt "fmt" - resource_compute_region_instance_group_manager_fmt "fmt" - resource_compute_region_network_endpoint_group_fmt "fmt" - resource_compute_region_per_instance_config_fmt "fmt" - resource_compute_region_ssl_certificate_fmt "fmt" - resource_compute_region_target_http_proxy_fmt "fmt" - resource_compute_region_target_https_proxy_fmt "fmt" - resource_compute_region_url_map_fmt "fmt" - resource_compute_reservation_fmt "fmt" - resource_compute_resource_policy_fmt "fmt" - resource_compute_route_fmt "fmt" - resource_compute_router_fmt "fmt" - resource_compute_router_interface_fmt "fmt" - resource_compute_router_nat_fmt "fmt" - resource_compute_router_peer_fmt "fmt" - resource_compute_security_policy_fmt "fmt" - resource_compute_service_attachment_fmt "fmt" - resource_compute_shared_vpc_host_project_fmt "fmt" - resource_compute_shared_vpc_service_project_fmt "fmt" - resource_compute_snapshot_fmt "fmt" - resource_compute_ssl_certificate_fmt "fmt" - resource_compute_ssl_policy_fmt "fmt" - resource_compute_subnetwork_fmt "fmt" - resource_compute_target_grpc_proxy_fmt "fmt" - resource_compute_target_http_proxy_fmt "fmt" - resource_compute_target_https_proxy_fmt "fmt" - resource_compute_target_instance_fmt "fmt" - resource_compute_target_pool_fmt "fmt" - resource_compute_target_ssl_proxy_fmt "fmt" - resource_compute_target_tcp_proxy_fmt "fmt" - resource_compute_url_map_fmt "fmt" - resource_compute_vpn_gateway_fmt "fmt" - resource_compute_vpn_tunnel_fmt "fmt" - resource_container_analysis_note_fmt "fmt" - resource_container_analysis_occurrence_fmt "fmt" - resource_container_cluster_fmt "fmt" - resource_container_cluster_migrate_fmt "fmt" - resource_container_node_pool_fmt "fmt" - resource_container_node_pool_migrate_fmt "fmt" - resource_container_registry_fmt "fmt" - resource_data_catalog_entry_fmt "fmt" - resource_data_catalog_entry_group_fmt "fmt" - resource_data_catalog_tag_fmt "fmt" - resource_data_catalog_tag_template_fmt "fmt" - resource_data_loss_prevention_deidentify_template_fmt "fmt" - resource_data_loss_prevention_inspect_template_fmt "fmt" - resource_data_loss_prevention_job_trigger_fmt "fmt" - resource_data_loss_prevention_stored_info_type_fmt "fmt" - resource_dataflow_job_fmt "fmt" - resource_dataproc_autoscaling_policy_fmt "fmt" - resource_dataproc_cluster_fmt "fmt" - resource_dataproc_job_fmt "fmt" - resource_dataproc_workflow_template_fmt "fmt" - resource_datastore_index_fmt "fmt" - resource_deployment_manager_deployment_fmt "fmt" - resource_dialogflow_agent_fmt "fmt" - resource_dialogflow_cx_agent_fmt "fmt" - resource_dialogflow_cx_entity_type_fmt "fmt" - resource_dialogflow_cx_environment_fmt "fmt" - resource_dialogflow_cx_flow_fmt "fmt" - resource_dialogflow_cx_intent_fmt "fmt" - resource_dialogflow_cx_page_fmt "fmt" - resource_dialogflow_cx_version_fmt "fmt" - resource_dialogflow_entity_type_fmt "fmt" - resource_dialogflow_fulfillment_fmt "fmt" - resource_dialogflow_intent_fmt "fmt" - resource_dns_managed_zone_fmt "fmt" - resource_dns_policy_fmt "fmt" - resource_dns_record_set_fmt "fmt" - resource_endpoints_service_fmt "fmt" - resource_endpoints_service_migration_fmt "fmt" - resource_essential_contacts_contact_fmt "fmt" - resource_eventarc_trigger_fmt "fmt" - resource_filestore_instance_fmt "fmt" - resource_firestore_document_fmt "fmt" - resource_firestore_index_fmt "fmt" - resource_folder_access_approval_settings_fmt "fmt" - resource_game_services_game_server_cluster_fmt "fmt" - resource_game_services_game_server_config_fmt "fmt" - resource_game_services_game_server_deployment_fmt "fmt" - resource_game_services_game_server_deployment_rollout_fmt "fmt" - resource_game_services_realm_fmt "fmt" - resource_gke_hub_membership_fmt "fmt" - resource_google_billing_subaccount_fmt "fmt" - resource_google_folder_fmt "fmt" - resource_google_folder_organization_policy_fmt "fmt" - resource_google_organization_iam_custom_role_fmt "fmt" - resource_google_organization_policy_fmt "fmt" - resource_google_project_default_service_accounts_fmt "fmt" - resource_google_project_fmt "fmt" - resource_google_project_iam_custom_role_fmt "fmt" - resource_google_project_migrate_fmt "fmt" - resource_google_project_organization_policy_fmt "fmt" - resource_google_project_service_fmt "fmt" - resource_google_service_account_fmt "fmt" - resource_google_service_account_key_fmt "fmt" - resource_google_service_networking_peered_dns_domain_fmt "fmt" - resource_healthcare_consent_store_fmt "fmt" - resource_healthcare_dataset_fmt "fmt" - resource_healthcare_dicom_store_fmt "fmt" - resource_healthcare_fhir_store_fmt "fmt" - resource_healthcare_hl7_v2_store_fmt "fmt" - resource_iam_audit_config_fmt "fmt" - resource_iam_binding_fmt "fmt" - resource_iam_member_fmt "fmt" - resource_iam_policy_fmt "fmt" - resource_iap_brand_fmt "fmt" - resource_iap_client_fmt "fmt" - resource_identity_platform_default_supported_idp_config_fmt "fmt" - resource_identity_platform_inbound_saml_config_fmt "fmt" - resource_identity_platform_oauth_idp_config_fmt "fmt" - resource_identity_platform_tenant_default_supported_idp_config_fmt "fmt" - resource_identity_platform_tenant_fmt "fmt" - resource_identity_platform_tenant_inbound_saml_config_fmt "fmt" - resource_identity_platform_tenant_oauth_idp_config_fmt "fmt" - resource_kms_crypto_key_fmt "fmt" - resource_kms_key_ring_fmt "fmt" - resource_kms_key_ring_import_job_fmt "fmt" - resource_kms_secret_ciphertext_fmt "fmt" - resource_logging_billing_account_bucket_config_fmt "fmt" - resource_logging_billing_account_sink_fmt "fmt" - resource_logging_bucket_config_fmt "fmt" - resource_logging_exclusion_fmt "fmt" - resource_logging_folder_bucket_config_fmt "fmt" - resource_logging_folder_sink_fmt "fmt" - resource_logging_metric_fmt "fmt" - resource_logging_organization_bucket_config_fmt "fmt" - resource_logging_organization_sink_fmt "fmt" - resource_logging_project_bucket_config_fmt "fmt" - resource_logging_project_sink_fmt "fmt" - resource_logging_sink_fmt "fmt" - resource_manager_operation_fmt "fmt" - resource_memcache_instance_fmt "fmt" - resource_ml_engine_model_fmt "fmt" - resource_monitoring_alert_policy_fmt "fmt" - resource_monitoring_custom_service_fmt "fmt" - resource_monitoring_dashboard_fmt "fmt" - resource_monitoring_group_fmt "fmt" - resource_monitoring_metric_descriptor_fmt "fmt" - resource_monitoring_notification_channel_fmt "fmt" - resource_monitoring_slo_fmt "fmt" - resource_monitoring_uptime_check_config_fmt "fmt" - resource_network_management_connectivity_test_resource_fmt "fmt" - resource_network_services_edge_cache_keyset_fmt "fmt" - resource_network_services_edge_cache_origin_fmt "fmt" - resource_network_services_edge_cache_service_fmt "fmt" - resource_notebooks_environment_fmt "fmt" - resource_notebooks_instance_fmt "fmt" - resource_notebooks_location_fmt "fmt" - resource_org_policy_policy_fmt "fmt" - resource_organization_access_approval_settings_fmt "fmt" - resource_os_config_patch_deployment_fmt "fmt" - resource_os_login_ssh_public_key_fmt "fmt" - resource_privateca_ca_pool_fmt "fmt" - resource_privateca_certificate_authority_fmt "fmt" - resource_privateca_certificate_fmt "fmt" - resource_privateca_certificate_template_fmt "fmt" - resource_project_access_approval_settings_fmt "fmt" - resource_pubsub_lite_reservation_fmt "fmt" - resource_pubsub_lite_subscription_fmt "fmt" - resource_pubsub_lite_topic_fmt "fmt" - resource_pubsub_schema_fmt "fmt" - resource_pubsub_subscription_fmt "fmt" - resource_pubsub_topic_fmt "fmt" - resource_redis_instance_fmt "fmt" - resource_resource_manager_lien_fmt "fmt" - resource_scc_notification_config_fmt "fmt" - resource_scc_source_fmt "fmt" - resource_secret_manager_secret_fmt "fmt" - resource_secret_manager_secret_version_fmt "fmt" - resource_service_networking_connection_fmt "fmt" - resource_sourcerepo_repository_fmt "fmt" - resource_spanner_database_fmt "fmt" - resource_spanner_instance_fmt "fmt" - resource_sql_database_fmt "fmt" - resource_sql_database_instance_fmt "fmt" - resource_sql_source_representation_instance_fmt "fmt" - resource_sql_ssl_cert_fmt "fmt" - resource_sql_user_fmt "fmt" - resource_sql_user_migrate_fmt "fmt" - resource_storage_bucket_access_control_fmt "fmt" - resource_storage_bucket_acl_fmt "fmt" - resource_storage_bucket_fmt "fmt" - resource_storage_bucket_object_fmt "fmt" - resource_storage_default_object_access_control_fmt "fmt" - resource_storage_default_object_acl_fmt "fmt" - resource_storage_hmac_key_fmt "fmt" - resource_storage_notification_fmt "fmt" - resource_storage_object_access_control_fmt "fmt" - resource_storage_object_acl_fmt "fmt" - resource_storage_transfer_job_fmt "fmt" - resource_tags_tag_binding_fmt "fmt" - resource_tags_tag_key_fmt "fmt" - resource_tags_tag_value_fmt "fmt" - resource_tpu_node_fmt "fmt" - resource_usage_export_bucket_fmt "fmt" - resource_vertex_ai_dataset_fmt "fmt" - resource_vpc_access_connector_fmt "fmt" - resource_workflows_workflow_fmt "fmt" - retry_transport_fmt "fmt" - self_link_helpers_fmt "fmt" - service_account_waiter_fmt "fmt" - service_usage_operation_fmt "fmt" - serviceman_operation_fmt "fmt" - serviceusage_batching_fmt "fmt" - spanner_operation_fmt "fmt" - sqladmin_operation_fmt "fmt" - stateful_mig_polling_fmt "fmt" - tags_operation_fmt "fmt" - test_utils_fmt "fmt" - tpgtools_utils_fmt "fmt" - tpu_operation_fmt "fmt" - transport_fmt "fmt" - utils_fmt "fmt" - validation_fmt "fmt" - vertex_ai_operation_fmt "fmt" - vpc_access_operation_fmt "fmt" - workflows_operation_fmt "fmt" - hashcode_crc32 "hash/crc32" - error_retry_predicates_io "io" - resource_storage_bucket_object_io "io" - data_source_google_netblock_ip_ranges_ioutil "io/ioutil" - data_source_storage_bucket_object_content_ioutil "io/ioutil" - path_or_contents_ioutil "io/ioutil" - resource_storage_bucket_object_ioutil "io/ioutil" - retry_transport_ioutil "io/ioutil" - batcher_log "log" - cloudrun_polling_log "log" - common_diff_suppress_log "log" - common_operation_log "log" - common_polling_log "log" - compute_operation_log "log" - config_log "log" - container_operation_log "log" - data_source_cloud_run_locations_log "log" - data_source_dns_keys_log "log" - data_source_google_composer_image_versions_log "log" - data_source_google_compute_image_log "log" - data_source_google_compute_node_types_log "log" - data_source_google_compute_region_instance_group_log "log" - data_source_google_compute_regions_log "log" - data_source_google_compute_zones_log "log" - data_source_google_kms_crypto_key_version_log "log" - data_source_google_kms_secret_ciphertext_log "log" - data_source_google_kms_secret_log "log" - data_source_google_service_account_access_token_log "log" - data_source_google_sql_ca_certs_log "log" - data_source_secret_manager_secret_version_log "log" - data_source_storage_object_signed_url_log "log" - data_source_tpu_tensorflow_versions_log "log" - dcl_logger_log "log" - error_retry_predicates_log "log" - iam_log "log" - import_log "log" - metadata_log "log" - mutexkv_log "log" - resource_access_context_manager_access_level_condition_log "log" - resource_access_context_manager_access_level_log "log" - resource_access_context_manager_access_levels_log "log" - resource_access_context_manager_access_policy_log "log" - resource_access_context_manager_gcp_user_access_binding_log "log" - resource_access_context_manager_service_perimeter_log "log" - resource_access_context_manager_service_perimeter_resource_log "log" - resource_access_context_manager_service_perimeters_log "log" - resource_active_directory_domain_log "log" - resource_active_directory_domain_trust_log "log" - resource_apigee_envgroup_attachment_log "log" - resource_apigee_envgroup_log "log" - resource_apigee_environment_log "log" - resource_apigee_instance_attachment_log "log" - resource_apigee_instance_log "log" - resource_apigee_organization_log "log" - resource_app_engine_application_log "log" - resource_app_engine_application_url_dispatch_rules_log "log" - resource_app_engine_domain_mapping_log "log" - resource_app_engine_firewall_rule_log "log" - resource_app_engine_flexible_app_version_log "log" - resource_app_engine_service_network_settings_log "log" - resource_app_engine_service_split_traffic_log "log" - resource_app_engine_standard_app_version_log "log" - resource_assured_workloads_workload_log "log" - resource_bigquery_data_transfer_config_log "log" - resource_bigquery_dataset_access_log "log" - resource_bigquery_dataset_log "log" - resource_bigquery_job_log "log" - resource_bigquery_reservation_log "log" - resource_bigquery_routine_log "log" - resource_bigquery_table_log "log" - resource_bigtable_app_profile_log "log" - resource_bigtable_gc_policy_log "log" - resource_bigtable_instance_log "log" - resource_bigtable_instance_migrate_log "log" - resource_bigtable_table_log "log" - resource_billing_budget_log "log" - resource_binary_authorization_attestor_log "log" - resource_binary_authorization_policy_log "log" - resource_cloud_asset_folder_feed_log "log" - resource_cloud_asset_organization_feed_log "log" - resource_cloud_asset_project_feed_log "log" - resource_cloud_identity_group_log "log" - resource_cloud_identity_group_membership_log "log" - resource_cloud_run_domain_mapping_log "log" - resource_cloud_run_service_log "log" - resource_cloud_scheduler_job_log "log" - resource_cloud_tasks_queue_log "log" - resource_cloudbuild_trigger_log "log" - resource_cloudfunctions_function_log "log" - resource_cloudiot_device_log "log" - resource_cloudiot_registry_log "log" - resource_composer_environment_log "log" - resource_compute_address_log "log" - resource_compute_attached_disk_log "log" - resource_compute_autoscaler_log "log" - resource_compute_backend_bucket_log "log" - resource_compute_backend_bucket_signed_url_key_log "log" - resource_compute_backend_service_log "log" - resource_compute_backend_service_signed_url_key_log "log" - resource_compute_disk_log "log" - resource_compute_disk_resource_policy_attachment_log "log" - resource_compute_external_vpn_gateway_log "log" - resource_compute_firewall_log "log" - resource_compute_firewall_migrate_log "log" - resource_compute_firewall_policy_association_log "log" - resource_compute_firewall_policy_log "log" - resource_compute_firewall_policy_rule_log "log" - resource_compute_forwarding_rule_log "log" - resource_compute_global_address_log "log" - resource_compute_global_forwarding_rule_log "log" - resource_compute_global_network_endpoint_group_log "log" - resource_compute_global_network_endpoint_log "log" - resource_compute_ha_vpn_gateway_log "log" - resource_compute_health_check_log "log" - resource_compute_http_health_check_log "log" - resource_compute_https_health_check_log "log" - resource_compute_image_log "log" - resource_compute_instance_from_template_log "log" - resource_compute_instance_group_log "log" - resource_compute_instance_group_manager_log "log" - resource_compute_instance_group_migrate_log "log" - resource_compute_instance_group_named_port_log "log" - resource_compute_instance_log "log" - resource_compute_instance_migrate_log "log" - resource_compute_instance_template_migrate_log "log" - resource_compute_interconnect_attachment_log "log" - resource_compute_managed_ssl_certificate_log "log" - resource_compute_network_endpoint_group_log "log" - resource_compute_network_endpoint_log "log" - resource_compute_network_log "log" - resource_compute_network_peering_log "log" - resource_compute_network_peering_routes_config_log "log" - resource_compute_node_group_log "log" - resource_compute_node_template_log "log" - resource_compute_packet_mirroring_log "log" - resource_compute_per_instance_config_log "log" - resource_compute_project_default_network_tier_log "log" - resource_compute_project_metadata_item_log "log" - resource_compute_project_metadata_log "log" - resource_compute_region_autoscaler_log "log" - resource_compute_region_backend_service_log "log" - resource_compute_region_disk_log "log" - resource_compute_region_disk_resource_policy_attachment_log "log" - resource_compute_region_health_check_log "log" - resource_compute_region_instance_group_manager_log "log" - resource_compute_region_network_endpoint_group_log "log" - resource_compute_region_per_instance_config_log "log" - resource_compute_region_ssl_certificate_log "log" - resource_compute_region_target_http_proxy_log "log" - resource_compute_region_target_https_proxy_log "log" - resource_compute_region_url_map_log "log" - resource_compute_reservation_log "log" - resource_compute_resource_policy_log "log" - resource_compute_route_log "log" - resource_compute_router_interface_log "log" - resource_compute_router_log "log" - resource_compute_router_nat_log "log" - resource_compute_router_peer_log "log" - resource_compute_security_policy_log "log" - resource_compute_service_attachment_log "log" - resource_compute_shared_vpc_host_project_log "log" - resource_compute_shared_vpc_service_project_log "log" - resource_compute_snapshot_log "log" - resource_compute_ssl_certificate_log "log" - resource_compute_ssl_policy_log "log" - resource_compute_subnetwork_log "log" - resource_compute_target_grpc_proxy_log "log" - resource_compute_target_http_proxy_log "log" - resource_compute_target_https_proxy_log "log" - resource_compute_target_instance_log "log" - resource_compute_target_pool_log "log" - resource_compute_target_ssl_proxy_log "log" - resource_compute_target_tcp_proxy_log "log" - resource_compute_url_map_log "log" - resource_compute_vpn_gateway_log "log" - resource_compute_vpn_tunnel_log "log" - resource_container_analysis_note_log "log" - resource_container_analysis_occurrence_log "log" - resource_container_cluster_log "log" - resource_container_cluster_migrate_log "log" - resource_container_node_pool_log "log" - resource_container_node_pool_migrate_log "log" - resource_container_registry_log "log" - resource_data_catalog_entry_group_log "log" - resource_data_catalog_entry_log "log" - resource_data_catalog_tag_log "log" - resource_data_catalog_tag_template_log "log" - resource_data_loss_prevention_deidentify_template_log "log" - resource_data_loss_prevention_inspect_template_log "log" - resource_data_loss_prevention_job_trigger_log "log" - resource_data_loss_prevention_stored_info_type_log "log" - resource_dataflow_job_log "log" - resource_dataproc_autoscaling_policy_log "log" - resource_dataproc_cluster_log "log" - resource_dataproc_job_log "log" - resource_dataproc_workflow_template_log "log" - resource_datastore_index_log "log" - resource_deployment_manager_deployment_log "log" - resource_dialogflow_agent_log "log" - resource_dialogflow_cx_agent_log "log" - resource_dialogflow_cx_entity_type_log "log" - resource_dialogflow_cx_environment_log "log" - resource_dialogflow_cx_flow_log "log" - resource_dialogflow_cx_intent_log "log" - resource_dialogflow_cx_page_log "log" - resource_dialogflow_cx_version_log "log" - resource_dialogflow_entity_type_log "log" - resource_dialogflow_fulfillment_log "log" - resource_dialogflow_intent_log "log" - resource_dns_managed_zone_log "log" - resource_dns_policy_log "log" - resource_dns_record_set_log "log" - resource_endpoints_service_log "log" - resource_endpoints_service_migration_log "log" - resource_essential_contacts_contact_log "log" - resource_eventarc_trigger_log "log" - resource_filestore_instance_log "log" - resource_firestore_document_log "log" - resource_firestore_index_log "log" - resource_folder_access_approval_settings_log "log" - resource_game_services_game_server_cluster_log "log" - resource_game_services_game_server_config_log "log" - resource_game_services_game_server_deployment_log "log" - resource_game_services_game_server_deployment_rollout_log "log" - resource_game_services_realm_log "log" - resource_gke_hub_membership_log "log" - resource_google_project_default_service_accounts_log "log" - resource_google_project_log "log" - resource_google_project_migrate_log "log" - resource_google_project_service_log "log" - resource_google_service_account_key_log "log" - resource_google_service_networking_peered_dns_domain_log "log" - resource_healthcare_consent_store_log "log" - resource_healthcare_dataset_log "log" - resource_healthcare_dicom_store_log "log" - resource_healthcare_fhir_store_log "log" - resource_healthcare_hl7_v2_store_log "log" - resource_iam_audit_config_log "log" - resource_iam_binding_log "log" - resource_iam_member_log "log" - resource_iap_brand_log "log" - resource_iap_client_log "log" - resource_identity_platform_default_supported_idp_config_log "log" - resource_identity_platform_inbound_saml_config_log "log" - resource_identity_platform_oauth_idp_config_log "log" - resource_identity_platform_tenant_default_supported_idp_config_log "log" - resource_identity_platform_tenant_inbound_saml_config_log "log" - resource_identity_platform_tenant_log "log" - resource_identity_platform_tenant_oauth_idp_config_log "log" - resource_kms_crypto_key_log "log" - resource_kms_key_ring_import_job_log "log" - resource_kms_key_ring_log "log" - resource_kms_secret_ciphertext_log "log" - resource_logging_bucket_config_log "log" - resource_logging_metric_log "log" - resource_memcache_instance_log "log" - resource_ml_engine_model_log "log" - resource_monitoring_alert_policy_log "log" - resource_monitoring_custom_service_log "log" - resource_monitoring_group_log "log" - resource_monitoring_metric_descriptor_log "log" - resource_monitoring_notification_channel_log "log" - resource_monitoring_slo_log "log" - resource_monitoring_uptime_check_config_log "log" - resource_network_management_connectivity_test_resource_log "log" - resource_network_services_edge_cache_keyset_log "log" - resource_network_services_edge_cache_origin_log "log" - resource_network_services_edge_cache_service_log "log" - resource_notebooks_environment_log "log" - resource_notebooks_instance_log "log" - resource_notebooks_location_log "log" - resource_org_policy_policy_log "log" - resource_organization_access_approval_settings_log "log" - resource_os_config_patch_deployment_log "log" - resource_os_login_ssh_public_key_log "log" - resource_privateca_ca_pool_log "log" - resource_privateca_certificate_authority_log "log" - resource_privateca_certificate_log "log" - resource_privateca_certificate_template_log "log" - resource_project_access_approval_settings_log "log" - resource_pubsub_lite_reservation_log "log" - resource_pubsub_lite_subscription_log "log" - resource_pubsub_lite_topic_log "log" - resource_pubsub_schema_log "log" - resource_pubsub_subscription_log "log" - resource_pubsub_topic_log "log" - resource_redis_instance_log "log" - resource_resource_manager_lien_log "log" - resource_scc_notification_config_log "log" - resource_scc_source_log "log" - resource_secret_manager_secret_log "log" - resource_secret_manager_secret_version_log "log" - resource_service_networking_connection_log "log" - resource_sourcerepo_repository_log "log" - resource_spanner_database_log "log" - resource_spanner_instance_log "log" - resource_sql_database_instance_log "log" - resource_sql_database_log "log" - resource_sql_source_representation_instance_log "log" - resource_sql_ssl_cert_log "log" - resource_sql_user_log "log" - resource_sql_user_migrate_log "log" - resource_storage_bucket_access_control_log "log" - resource_storage_bucket_acl_log "log" - resource_storage_bucket_log "log" - resource_storage_bucket_object_log "log" - resource_storage_default_object_access_control_log "log" - resource_storage_hmac_key_log "log" - resource_storage_object_access_control_log "log" - resource_storage_transfer_job_log "log" - resource_tags_tag_binding_log "log" - resource_tags_tag_key_log "log" - resource_tags_tag_value_log "log" - resource_tpu_node_log "log" - resource_usage_export_bucket_log "log" - resource_vertex_ai_dataset_log "log" - resource_vpc_access_connector_log "log" - resource_workflows_workflow_log "log" - retry_transport_log "log" - retry_utils_log "log" - service_usage_operation_log "log" - serviceusage_batching_log "log" - sql_utils_log "log" - sqladmin_operation_log "log" - tpgtools_utils_log "log" - utils_log "log" - resource_storage_bucket_math "math" - common_diff_suppress_net "net" - error_retry_predicates_net "net" - resource_compute_subnetwork_net "net" - resource_compute_vpn_tunnel_net "net" - resource_dns_record_set_net "net" - validation_net "net" - config_http "net/http" - data_source_google_netblock_ip_ranges_http "net/http" - data_source_storage_bucket_object_content_http "net/http" - dataproc_job_operation_http "net/http" - header_transport_http "net/http" - resource_google_project_http "net/http" - resource_storage_bucket_object_http "net/http" - retry_transport_http "net/http" - transport_http "net/http" - retry_transport_httputil "net/http/httputil" - data_source_google_storage_bucket_object_url "net/url" - data_source_monitoring_service_urlneturl "net/url" - data_source_storage_object_signed_url_url "net/url" - error_retry_predicates_url "net/url" - mtls_util_url "net/url" - resource_cloudfunctions_function_url "net/url" - resource_service_networking_connection_url "net/url" - self_link_helpers_url "net/url" - transport_url "net/url" - bigtable_client_factory_os "os" - data_source_storage_object_signed_url_os "os" - path_or_contents_os "os" - provider_os "os" - resource_storage_bucket_object_os "os" - utils_os "os" - common_diff_suppress_reflect "reflect" - compute_instance_helpers_reflect "reflect" - convert_reflect "reflect" - iam_reflect "reflect" - resource_access_context_manager_access_level_condition_reflect "reflect" - resource_access_context_manager_access_level_reflect "reflect" - resource_access_context_manager_access_levels_reflect "reflect" - resource_access_context_manager_access_policy_reflect "reflect" - resource_access_context_manager_gcp_user_access_binding_reflect "reflect" - resource_access_context_manager_service_perimeter_reflect "reflect" - resource_access_context_manager_service_perimeter_resource_reflect "reflect" - resource_access_context_manager_service_perimeters_reflect "reflect" - resource_active_directory_domain_reflect "reflect" - resource_active_directory_domain_trust_reflect "reflect" - resource_apigee_envgroup_attachment_reflect "reflect" - resource_apigee_envgroup_reflect "reflect" - resource_apigee_environment_reflect "reflect" - resource_apigee_instance_attachment_reflect "reflect" - resource_apigee_instance_reflect "reflect" - resource_apigee_organization_reflect "reflect" - resource_app_engine_application_url_dispatch_rules_reflect "reflect" - resource_app_engine_domain_mapping_reflect "reflect" - resource_app_engine_firewall_rule_reflect "reflect" - resource_app_engine_flexible_app_version_reflect "reflect" - resource_app_engine_service_network_settings_reflect "reflect" - resource_app_engine_service_split_traffic_reflect "reflect" - resource_app_engine_standard_app_version_reflect "reflect" - resource_bigquery_data_transfer_config_reflect "reflect" - resource_bigquery_dataset_access_reflect "reflect" - resource_bigquery_dataset_reflect "reflect" - resource_bigquery_job_reflect "reflect" - resource_bigquery_reservation_reflect "reflect" - resource_bigquery_routine_reflect "reflect" - resource_bigtable_app_profile_reflect "reflect" - resource_billing_budget_reflect "reflect" - resource_binary_authorization_attestor_reflect "reflect" - resource_binary_authorization_policy_reflect "reflect" - resource_cloud_asset_folder_feed_reflect "reflect" - resource_cloud_asset_organization_feed_reflect "reflect" - resource_cloud_asset_project_feed_reflect "reflect" - resource_cloud_identity_group_membership_reflect "reflect" - resource_cloud_identity_group_reflect "reflect" - resource_cloud_run_domain_mapping_reflect "reflect" - resource_cloud_run_service_reflect "reflect" - resource_cloud_scheduler_job_reflect "reflect" - resource_cloud_tasks_queue_reflect "reflect" - resource_cloudbuild_trigger_reflect "reflect" - resource_cloudiot_device_reflect "reflect" - resource_cloudiot_registry_reflect "reflect" - resource_compute_address_reflect "reflect" - resource_compute_autoscaler_reflect "reflect" - resource_compute_backend_bucket_reflect "reflect" - resource_compute_backend_bucket_signed_url_key_reflect "reflect" - resource_compute_backend_service_reflect "reflect" - resource_compute_backend_service_signed_url_key_reflect "reflect" - resource_compute_disk_reflect "reflect" - resource_compute_disk_resource_policy_attachment_reflect "reflect" - resource_compute_external_vpn_gateway_reflect "reflect" - resource_compute_firewall_reflect "reflect" - resource_compute_global_address_reflect "reflect" - resource_compute_global_network_endpoint_group_reflect "reflect" - resource_compute_global_network_endpoint_reflect "reflect" - resource_compute_ha_vpn_gateway_reflect "reflect" - resource_compute_health_check_reflect "reflect" - resource_compute_http_health_check_reflect "reflect" - resource_compute_https_health_check_reflect "reflect" - resource_compute_image_reflect "reflect" - resource_compute_instance_group_named_port_reflect "reflect" - resource_compute_instance_template_reflect "reflect" - resource_compute_interconnect_attachment_reflect "reflect" - resource_compute_managed_ssl_certificate_reflect "reflect" - resource_compute_network_endpoint_group_reflect "reflect" - resource_compute_network_endpoint_reflect "reflect" - resource_compute_network_peering_routes_config_reflect "reflect" - resource_compute_network_reflect "reflect" - resource_compute_node_group_reflect "reflect" - resource_compute_node_template_reflect "reflect" - resource_compute_packet_mirroring_reflect "reflect" - resource_compute_per_instance_config_reflect "reflect" - resource_compute_region_autoscaler_reflect "reflect" - resource_compute_region_backend_service_reflect "reflect" - resource_compute_region_disk_reflect "reflect" - resource_compute_region_disk_resource_policy_attachment_reflect "reflect" - resource_compute_region_health_check_reflect "reflect" - resource_compute_region_network_endpoint_group_reflect "reflect" - resource_compute_region_per_instance_config_reflect "reflect" - resource_compute_region_ssl_certificate_reflect "reflect" - resource_compute_region_target_http_proxy_reflect "reflect" - resource_compute_region_target_https_proxy_reflect "reflect" - resource_compute_region_url_map_reflect "reflect" - resource_compute_reservation_reflect "reflect" - resource_compute_resource_policy_reflect "reflect" - resource_compute_route_reflect "reflect" - resource_compute_router_nat_reflect "reflect" - resource_compute_router_peer_reflect "reflect" - resource_compute_router_reflect "reflect" - resource_compute_service_attachment_reflect "reflect" - resource_compute_snapshot_reflect "reflect" - resource_compute_ssl_certificate_reflect "reflect" - resource_compute_ssl_policy_reflect "reflect" - resource_compute_subnetwork_reflect "reflect" - resource_compute_target_grpc_proxy_reflect "reflect" - resource_compute_target_http_proxy_reflect "reflect" - resource_compute_target_https_proxy_reflect "reflect" - resource_compute_target_instance_reflect "reflect" - resource_compute_target_ssl_proxy_reflect "reflect" - resource_compute_target_tcp_proxy_reflect "reflect" - resource_compute_url_map_reflect "reflect" - resource_compute_vpn_gateway_reflect "reflect" - resource_compute_vpn_tunnel_reflect "reflect" - resource_container_analysis_note_reflect "reflect" - resource_container_analysis_occurrence_reflect "reflect" - resource_container_cluster_reflect "reflect" - resource_data_catalog_entry_group_reflect "reflect" - resource_data_catalog_entry_reflect "reflect" - resource_data_catalog_tag_reflect "reflect" - resource_data_catalog_tag_template_reflect "reflect" - resource_data_loss_prevention_deidentify_template_reflect "reflect" - resource_data_loss_prevention_inspect_template_reflect "reflect" - resource_data_loss_prevention_job_trigger_reflect "reflect" - resource_data_loss_prevention_stored_info_type_reflect "reflect" - resource_dataproc_autoscaling_policy_reflect "reflect" - resource_datastore_index_reflect "reflect" - resource_deployment_manager_deployment_reflect "reflect" - resource_dialogflow_agent_reflect "reflect" - resource_dialogflow_cx_agent_reflect "reflect" - resource_dialogflow_cx_entity_type_reflect "reflect" - resource_dialogflow_cx_environment_reflect "reflect" - resource_dialogflow_cx_flow_reflect "reflect" - resource_dialogflow_cx_intent_reflect "reflect" - resource_dialogflow_cx_page_reflect "reflect" - resource_dialogflow_cx_version_reflect "reflect" - resource_dialogflow_entity_type_reflect "reflect" - resource_dialogflow_fulfillment_reflect "reflect" - resource_dialogflow_intent_reflect "reflect" - resource_dns_managed_zone_reflect "reflect" - resource_dns_policy_reflect "reflect" - resource_essential_contacts_contact_reflect "reflect" - resource_filestore_instance_reflect "reflect" - resource_firestore_document_reflect "reflect" - resource_firestore_index_reflect "reflect" - resource_folder_access_approval_settings_reflect "reflect" - resource_game_services_game_server_cluster_reflect "reflect" - resource_game_services_game_server_config_reflect "reflect" - resource_game_services_game_server_deployment_reflect "reflect" - resource_game_services_game_server_deployment_rollout_reflect "reflect" - resource_game_services_realm_reflect "reflect" - resource_gke_hub_membership_reflect "reflect" - resource_healthcare_consent_store_reflect "reflect" - resource_healthcare_dataset_reflect "reflect" - resource_healthcare_dicom_store_reflect "reflect" - resource_healthcare_fhir_store_reflect "reflect" - resource_healthcare_hl7_v2_store_reflect "reflect" - resource_iap_brand_reflect "reflect" - resource_iap_client_reflect "reflect" - resource_identity_platform_default_supported_idp_config_reflect "reflect" - resource_identity_platform_inbound_saml_config_reflect "reflect" - resource_identity_platform_oauth_idp_config_reflect "reflect" - resource_identity_platform_tenant_default_supported_idp_config_reflect "reflect" - resource_identity_platform_tenant_inbound_saml_config_reflect "reflect" - resource_identity_platform_tenant_oauth_idp_config_reflect "reflect" - resource_identity_platform_tenant_reflect "reflect" - resource_kms_crypto_key_reflect "reflect" - resource_kms_key_ring_import_job_reflect "reflect" - resource_kms_key_ring_reflect "reflect" - resource_kms_secret_ciphertext_reflect "reflect" - resource_logging_metric_reflect "reflect" - resource_memcache_instance_reflect "reflect" - resource_ml_engine_model_reflect "reflect" - resource_monitoring_alert_policy_reflect "reflect" - resource_monitoring_custom_service_reflect "reflect" - resource_monitoring_dashboard_reflect "reflect" - resource_monitoring_group_reflect "reflect" - resource_monitoring_metric_descriptor_reflect "reflect" - resource_monitoring_notification_channel_reflect "reflect" - resource_monitoring_slo_reflect "reflect" - resource_monitoring_uptime_check_config_reflect "reflect" - resource_network_management_connectivity_test_resource_reflect "reflect" - resource_network_services_edge_cache_keyset_reflect "reflect" - resource_network_services_edge_cache_origin_reflect "reflect" - resource_network_services_edge_cache_service_reflect "reflect" - resource_notebooks_environment_reflect "reflect" - resource_notebooks_instance_reflect "reflect" - resource_notebooks_location_reflect "reflect" - resource_organization_access_approval_settings_reflect "reflect" - resource_os_config_patch_deployment_reflect "reflect" - resource_os_login_ssh_public_key_reflect "reflect" - resource_privateca_ca_pool_reflect "reflect" - resource_privateca_certificate_authority_reflect "reflect" - resource_privateca_certificate_reflect "reflect" - resource_project_access_approval_settings_reflect "reflect" - resource_pubsub_lite_reservation_reflect "reflect" - resource_pubsub_lite_subscription_reflect "reflect" - resource_pubsub_lite_topic_reflect "reflect" - resource_pubsub_schema_reflect "reflect" - resource_pubsub_subscription_reflect "reflect" - resource_pubsub_topic_reflect "reflect" - resource_redis_instance_reflect "reflect" - resource_resource_manager_lien_reflect "reflect" - resource_scc_notification_config_reflect "reflect" - resource_scc_source_reflect "reflect" - resource_secret_manager_secret_reflect "reflect" - resource_secret_manager_secret_version_reflect "reflect" - resource_sourcerepo_repository_reflect "reflect" - resource_spanner_database_reflect "reflect" - resource_spanner_instance_reflect "reflect" - resource_sql_database_reflect "reflect" - resource_sql_source_representation_instance_reflect "reflect" - resource_storage_bucket_access_control_reflect "reflect" - resource_storage_default_object_access_control_reflect "reflect" - resource_storage_hmac_key_reflect "reflect" - resource_storage_object_access_control_reflect "reflect" - resource_tags_tag_binding_reflect "reflect" - resource_tags_tag_key_reflect "reflect" - resource_tags_tag_value_reflect "reflect" - resource_tpu_node_reflect "reflect" - resource_vertex_ai_dataset_reflect "reflect" - resource_vpc_access_connector_reflect "reflect" - resource_workflows_workflow_reflect "reflect" - test_utils_reflect "reflect" - transport_reflect "reflect" - appengine_operation_regexp "regexp" - config_regexp "regexp" - data_source_google_compute_address_regexp "regexp" - data_source_google_iam_policy_regexp "regexp" - data_source_google_service_account_key_regexp "regexp" - data_source_secret_manager_secret_version_regexp "regexp" - field_helpers_regexp "regexp" - healthcare_utils_regexp "regexp" - iam_sourcerepo_repository_regexp "regexp" - iam_spanner_instance_regexp "regexp" - image_regexp "regexp" - import_regexp "regexp" - kms_utils_regexp "regexp" - logging_utils_regexp "regexp" - pubsub_utils_regexp "regexp" - resource_bigquery_dataset_regexp "regexp" - resource_bigquery_job_regexp "regexp" - resource_bigquery_table_regexp "regexp" - resource_binary_authorization_attestor_regexp "regexp" - resource_binary_authorization_policy_regexp "regexp" - resource_cloud_asset_folder_feed_regexp "regexp" - resource_cloud_asset_organization_feed_regexp "regexp" - resource_cloud_identity_group_membership_regexp "regexp" - resource_cloud_run_service_regexp "regexp" - resource_cloud_scheduler_job_regexp "regexp" - resource_cloudfunctions_function_regexp "regexp" - resource_cloudiot_registry_regexp "regexp" - resource_composer_environment_regexp "regexp" - resource_compute_backend_service_regexp "regexp" - resource_compute_node_group_regexp "regexp" - resource_compute_snapshot_regexp "regexp" - resource_compute_target_pool_regexp "regexp" - resource_container_cluster_regexp "regexp" - resource_container_node_pool_regexp "regexp" - resource_data_catalog_entry_group_regexp "regexp" - resource_data_catalog_entry_regexp "regexp" - resource_data_catalog_tag_regexp "regexp" - resource_data_catalog_tag_template_regexp "regexp" - resource_dataproc_cluster_regexp "regexp" - resource_endpoints_service_regexp "regexp" - resource_firestore_document_regexp "regexp" - resource_google_project_regexp "regexp" - resource_iam_binding_regexp "regexp" - resource_iam_member_regexp "regexp" - resource_kms_crypto_key_regexp "regexp" - resource_kms_secret_ciphertext_regexp "regexp" - resource_logging_bucket_config_regexp "regexp" - resource_logging_exclusion_regexp "regexp" - resource_pubsub_lite_subscription_regexp "regexp" - resource_pubsub_subscription_regexp "regexp" - resource_redis_instance_regexp "regexp" - resource_secret_manager_secret_version_regexp "regexp" - resource_service_networking_connection_regexp "regexp" - resource_spanner_instance_regexp "regexp" - resource_tpu_node_regexp "regexp" - self_link_helpers_regexp "regexp" - source_repo_utils_regexp "regexp" - transport_regexp "regexp" - validation_regexp "regexp" - resource_storage_bucket_runtime "runtime" - data_source_cloud_run_locations_sort "sort" - data_source_google_compute_instance_template_sort "sort" - data_source_google_compute_node_types_sort "sort" - data_source_google_compute_regions_sort "sort" - data_source_google_compute_zones_sort "sort" - data_source_google_iam_policy_sort "sort" - data_source_storage_object_signed_url_sort "sort" - data_source_tpu_tensorflow_versions_sort "sort" - iam_sort "sort" - metadata_sort "sort" - resource_bigquery_table_sort "sort" - resource_compute_firewall_migrate_sort "sort" - resource_compute_firewall_sort "sort" - resource_compute_network_peering_sort "sort" - utils_sort "sort" - common_diff_suppress_strconv "strconv" - data_source_google_compute_image_strconv "strconv" - data_source_google_iam_policy_strconv "strconv" - data_source_google_kms_crypto_key_version_strconv "strconv" - data_source_storage_object_signed_url_strconv "strconv" - import_strconv "strconv" - kms_utils_strconv "strconv" - privateca_utils_strconv "strconv" - resource_app_engine_firewall_rule_strconv "strconv" - resource_app_engine_flexible_app_version_strconv "strconv" - resource_app_engine_standard_app_version_strconv "strconv" - resource_bigquery_data_transfer_config_strconv "strconv" - resource_bigquery_dataset_strconv "strconv" - resource_bigquery_job_strconv "strconv" - resource_bigquery_reservation_strconv "strconv" - resource_bigquery_routine_strconv "strconv" - resource_bigquery_table_strconv "strconv" - resource_billing_budget_strconv "strconv" - resource_cloud_run_domain_mapping_strconv "strconv" - resource_cloud_run_service_strconv "strconv" - resource_cloud_scheduler_job_strconv "strconv" - resource_cloud_tasks_queue_strconv "strconv" - resource_cloudbuild_trigger_strconv "strconv" - resource_cloudfunctions_function_strconv "strconv" - resource_cloudiot_device_strconv "strconv" - resource_compute_address_strconv "strconv" - resource_compute_autoscaler_strconv "strconv" - resource_compute_backend_bucket_strconv "strconv" - resource_compute_backend_service_strconv "strconv" - resource_compute_disk_strconv "strconv" - resource_compute_external_vpn_gateway_strconv "strconv" - resource_compute_firewall_migrate_strconv "strconv" - resource_compute_firewall_strconv "strconv" - resource_compute_global_address_strconv "strconv" - resource_compute_global_network_endpoint_group_strconv "strconv" - resource_compute_ha_vpn_gateway_strconv "strconv" - resource_compute_health_check_strconv "strconv" - resource_compute_http_health_check_strconv "strconv" - resource_compute_https_health_check_strconv "strconv" - resource_compute_image_strconv "strconv" - resource_compute_instance_group_migrate_strconv "strconv" - resource_compute_instance_group_named_port_strconv "strconv" - resource_compute_instance_migrate_strconv "strconv" - resource_compute_instance_strconv "strconv" - resource_compute_interconnect_attachment_strconv "strconv" - resource_compute_managed_ssl_certificate_strconv "strconv" - resource_compute_network_endpoint_group_strconv "strconv" - resource_compute_network_strconv "strconv" - resource_compute_node_group_strconv "strconv" - resource_compute_packet_mirroring_strconv "strconv" - resource_compute_region_autoscaler_strconv "strconv" - resource_compute_region_backend_service_strconv "strconv" - resource_compute_region_disk_strconv "strconv" - resource_compute_region_health_check_strconv "strconv" - resource_compute_region_ssl_certificate_strconv "strconv" - resource_compute_region_target_http_proxy_strconv "strconv" - resource_compute_region_target_https_proxy_strconv "strconv" - resource_compute_region_url_map_strconv "strconv" - resource_compute_reservation_strconv "strconv" - resource_compute_resource_policy_strconv "strconv" - resource_compute_route_strconv "strconv" - resource_compute_router_nat_strconv "strconv" - resource_compute_router_peer_strconv "strconv" - resource_compute_router_strconv "strconv" - resource_compute_service_attachment_strconv "strconv" - resource_compute_snapshot_strconv "strconv" - resource_compute_ssl_certificate_strconv "strconv" - resource_compute_target_http_proxy_strconv "strconv" - resource_compute_target_https_proxy_strconv "strconv" - resource_compute_target_ssl_proxy_strconv "strconv" - resource_compute_target_tcp_proxy_strconv "strconv" - resource_compute_url_map_strconv "strconv" - resource_compute_vpn_gateway_strconv "strconv" - resource_compute_vpn_tunnel_strconv "strconv" - resource_container_cluster_migrate_strconv "strconv" - resource_data_catalog_entry_strconv "strconv" - resource_data_catalog_tag_strconv "strconv" - resource_data_catalog_tag_template_strconv "strconv" - resource_data_loss_prevention_deidentify_template_strconv "strconv" - resource_data_loss_prevention_inspect_template_strconv "strconv" - resource_data_loss_prevention_job_trigger_strconv "strconv" - resource_dataproc_autoscaling_policy_strconv "strconv" - resource_dataproc_cluster_strconv "strconv" - resource_dialogflow_cx_intent_strconv "strconv" - resource_dialogflow_intent_strconv "strconv" - resource_dns_managed_zone_strconv "strconv" - resource_endpoints_service_strconv "strconv" - resource_filestore_instance_strconv "strconv" - resource_google_project_strconv "strconv" - resource_google_service_networking_peered_dns_domain_strconv "strconv" - resource_healthcare_fhir_store_strconv "strconv" - resource_logging_metric_strconv "strconv" - resource_memcache_instance_strconv "strconv" - resource_monitoring_alert_policy_strconv "strconv" - resource_monitoring_uptime_check_config_strconv "strconv" - resource_network_management_connectivity_test_resource_strconv "strconv" - resource_network_services_edge_cache_origin_strconv "strconv" - resource_notebooks_instance_strconv "strconv" - resource_os_config_patch_deployment_strconv "strconv" - resource_pubsub_lite_reservation_strconv "strconv" - resource_pubsub_lite_topic_strconv "strconv" - resource_pubsub_subscription_strconv "strconv" - resource_redis_instance_strconv "strconv" - resource_resource_manager_lien_strconv "strconv" - resource_sourcerepo_repository_strconv "strconv" - resource_spanner_instance_strconv "strconv" - resource_sql_source_representation_instance_strconv "strconv" - resource_storage_bucket_acl_strconv "strconv" - resource_storage_bucket_strconv "strconv" - resource_storage_default_object_access_control_strconv "strconv" - resource_storage_object_access_control_strconv "strconv" - resource_tpu_node_strconv "strconv" - resource_vpc_access_connector_strconv "strconv" - validation_strconv "strconv" - common_diff_suppress_strings "strings" - config_strings "strings" - data_source_container_registry_image_strings "strings" - data_source_container_registry_repository_strings "strings" - data_source_google_billing_account_strings "strings" - data_source_google_compute_address_strings "strings" - data_source_google_compute_zones_strings "strings" - data_source_google_container_engine_versions_strings "strings" - data_source_google_folder_strings "strings" - data_source_google_iam_testable_permissions_strings "strings" - data_source_google_kms_crypto_key_version_strings "strings" - data_source_google_organization_strings "strings" - data_source_google_service_account_access_token_strings "strings" - data_source_google_service_account_id_token_strings "strings" - data_source_google_service_account_strings "strings" - data_source_google_storage_bucket_object_strings "strings" - data_source_monitoring_notification_channel_strings "strings" - data_source_storage_object_signed_url_strings "strings" - error_retry_predicates_strings "strings" - healthcare_utils_strings "strings" - iam_bigquery_dataset_strings "strings" - iam_folder_strings "strings" - iam_iap_web_type_app_engine_strings "strings" - iam_spanner_instance_strings "strings" - iam_strings "strings" - image_strings "strings" - import_strings "strings" - kms_utils_strings "strings" - mtls_util_strings "strings" - regional_utils_strings "strings" - resource_access_context_manager_access_level_strings "strings" - resource_access_context_manager_access_policy_strings "strings" - resource_access_context_manager_gcp_user_access_binding_strings "strings" - resource_access_context_manager_service_perimeter_strings "strings" - resource_active_directory_domain_strings "strings" - resource_apigee_envgroup_strings "strings" - resource_apigee_environment_strings "strings" - resource_apigee_instance_strings "strings" - resource_apigee_organization_strings "strings" - resource_app_engine_domain_mapping_strings "strings" - resource_app_engine_firewall_rule_strings "strings" - resource_app_engine_service_network_settings_strings "strings" - resource_app_engine_service_split_traffic_strings "strings" - resource_bigquery_data_transfer_config_strings "strings" - resource_bigquery_dataset_access_strings "strings" - resource_bigquery_reservation_strings "strings" - resource_bigquery_table_strings "strings" - resource_bigtable_app_profile_strings "strings" - resource_billing_budget_strings "strings" - resource_cloud_asset_folder_feed_strings "strings" - resource_cloud_asset_organization_feed_strings "strings" - resource_cloud_asset_project_feed_strings "strings" - resource_cloud_identity_group_strings "strings" - resource_cloud_run_domain_mapping_strings "strings" - resource_cloud_run_service_strings "strings" - resource_cloud_scheduler_job_strings "strings" - resource_cloud_tasks_queue_strings "strings" - resource_cloudfunctions_function_strings "strings" - resource_cloudiot_device_strings "strings" - resource_cloudiot_registry_strings "strings" - resource_composer_environment_strings "strings" - resource_compute_attached_disk_strings "strings" - resource_compute_disk_strings "strings" - resource_compute_firewall_migrate_strings "strings" - resource_compute_firewall_strings "strings" - resource_compute_health_check_strings "strings" - resource_compute_instance_group_manager_strings "strings" - resource_compute_instance_group_migrate_strings "strings" - resource_compute_instance_group_strings "strings" - resource_compute_instance_migrate_strings "strings" - resource_compute_instance_strings "strings" - resource_compute_instance_template_strings "strings" - resource_compute_network_peering_strings "strings" - resource_compute_region_disk_strings "strings" - resource_compute_region_instance_group_manager_strings "strings" - resource_compute_router_interface_strings "strings" - resource_compute_router_nat_strings "strings" - resource_compute_router_peer_strings "strings" - resource_compute_shared_vpc_service_project_strings "strings" - resource_compute_snapshot_strings "strings" - resource_compute_target_instance_strings "strings" - resource_compute_target_pool_strings "strings" - resource_compute_url_map_strings "strings" - resource_compute_vpn_tunnel_strings "strings" - resource_container_analysis_note_strings "strings" - resource_container_analysis_occurrence_strings "strings" - resource_container_cluster_migrate_strings "strings" - resource_container_cluster_strings "strings" - resource_container_node_pool_strings "strings" - resource_container_registry_strings "strings" - resource_data_catalog_entry_group_strings "strings" - resource_data_catalog_entry_strings "strings" - resource_data_catalog_tag_strings "strings" - resource_data_catalog_tag_template_strings "strings" - resource_data_loss_prevention_deidentify_template_strings "strings" - resource_data_loss_prevention_inspect_template_strings "strings" - resource_data_loss_prevention_job_trigger_strings "strings" - resource_data_loss_prevention_stored_info_type_strings "strings" - resource_dataflow_job_strings "strings" - resource_dataproc_cluster_strings "strings" - resource_dataproc_job_strings "strings" - resource_dialogflow_cx_agent_strings "strings" - resource_dialogflow_cx_entity_type_strings "strings" - resource_dialogflow_cx_environment_strings "strings" - resource_dialogflow_cx_flow_strings "strings" - resource_dialogflow_cx_intent_strings "strings" - resource_dialogflow_cx_page_strings "strings" - resource_dialogflow_cx_version_strings "strings" - resource_dialogflow_entity_type_strings "strings" - resource_dialogflow_fulfillment_strings "strings" - resource_dialogflow_intent_strings "strings" - resource_dns_managed_zone_strings "strings" - resource_dns_policy_strings "strings" - resource_dns_record_set_strings "strings" - resource_endpoints_service_strings "strings" - resource_essential_contacts_contact_strings "strings" - resource_filestore_instance_strings "strings" - resource_firestore_index_strings "strings" - resource_folder_access_approval_settings_strings "strings" - resource_game_services_game_server_cluster_strings "strings" - resource_game_services_game_server_deployment_rollout_strings "strings" - resource_game_services_game_server_deployment_strings "strings" - resource_game_services_realm_strings "strings" - resource_gke_hub_membership_strings "strings" - resource_google_billing_subaccount_strings "strings" - resource_google_folder_strings "strings" - resource_google_organization_policy_strings "strings" - resource_google_project_default_service_accounts_strings "strings" - resource_google_project_iam_custom_role_strings "strings" - resource_google_project_service_strings "strings" - resource_google_project_strings "strings" - resource_google_service_account_strings "strings" - resource_google_service_networking_peered_dns_domain_strings "strings" - resource_healthcare_consent_store_strings "strings" - resource_healthcare_dataset_strings "strings" - resource_healthcare_dicom_store_strings "strings" - resource_healthcare_fhir_store_strings "strings" - resource_healthcare_hl7_v2_store_strings "strings" - resource_iam_audit_config_strings "strings" - resource_iam_binding_strings "strings" - resource_iam_member_strings "strings" - resource_iap_brand_strings "strings" - resource_iap_client_strings "strings" - resource_identity_platform_default_supported_idp_config_strings "strings" - resource_identity_platform_inbound_saml_config_strings "strings" - resource_identity_platform_oauth_idp_config_strings "strings" - resource_identity_platform_tenant_default_supported_idp_config_strings "strings" - resource_identity_platform_tenant_inbound_saml_config_strings "strings" - resource_identity_platform_tenant_oauth_idp_config_strings "strings" - resource_identity_platform_tenant_strings "strings" - resource_kms_crypto_key_strings "strings" - resource_kms_key_ring_import_job_strings "strings" - resource_logging_billing_account_bucket_config_strings "strings" - resource_logging_bucket_config_strings "strings" - resource_logging_exclusion_strings "strings" - resource_logging_folder_bucket_config_strings "strings" - resource_logging_folder_sink_strings "strings" - resource_logging_organization_bucket_config_strings "strings" - resource_logging_organization_sink_strings "strings" - resource_logging_project_bucket_config_strings "strings" - resource_logging_sink_strings "strings" - resource_memcache_instance_strings "strings" - resource_monitoring_alert_policy_strings "strings" - resource_monitoring_custom_service_strings "strings" - resource_monitoring_slo_strings "strings" - resource_monitoring_uptime_check_config_strings "strings" - resource_network_management_connectivity_test_resource_strings "strings" - resource_network_services_edge_cache_keyset_strings "strings" - resource_network_services_edge_cache_origin_strings "strings" - resource_network_services_edge_cache_service_strings "strings" - resource_notebooks_instance_strings "strings" - resource_organization_access_approval_settings_strings "strings" - resource_os_login_ssh_public_key_strings "strings" - resource_privateca_ca_pool_strings "strings" - resource_project_access_approval_settings_strings "strings" - resource_pubsub_lite_reservation_strings "strings" - resource_pubsub_lite_subscription_strings "strings" - resource_pubsub_lite_topic_strings "strings" - resource_pubsub_subscription_strings "strings" - resource_pubsub_topic_strings "strings" - resource_redis_instance_strings "strings" - resource_resource_manager_lien_strings "strings" - resource_scc_notification_config_strings "strings" - resource_scc_source_strings "strings" - resource_secret_manager_secret_strings "strings" - resource_secret_manager_secret_version_strings "strings" - resource_service_networking_connection_strings "strings" - resource_sourcerepo_repository_strings "strings" - resource_spanner_instance_strings "strings" - resource_sql_database_instance_strings "strings" - resource_sql_source_representation_instance_strings "strings" - resource_sql_user_strings "strings" - resource_storage_bucket_acl_strings "strings" - resource_storage_bucket_object_strings "strings" - resource_storage_bucket_strings "strings" - resource_storage_notification_strings "strings" - resource_storage_object_acl_strings "strings" - resource_storage_transfer_job_strings "strings" - resource_tags_tag_binding_strings "strings" - resource_tags_tag_key_strings "strings" - resource_tags_tag_value_strings "strings" - resource_vertex_ai_dataset_strings "strings" - resource_workflows_workflow_strings "strings" - self_link_helpers_strings "strings" - service_usage_operation_strings "strings" - sql_utils_strings "strings" - stateful_mig_polling_strings "strings" - transport_strings "strings" - utils_strings "strings" - validation_strings "strings" - batcher_sync "sync" - common_polling_sync "sync" - mutexkv_sync "sync" - access_context_manager_operation_time "time" - active_directory_operation_time "time" - apigee_operation_time "time" - appengine_operation_time "time" - batcher_time "time" - cloudfunctions_operation_time "time" - common_diff_suppress_time "time" - common_operation_time "time" - common_polling_time "time" - composer_operation_time "time" - compute_operation_time "time" - config_time "time" - container_operation_time "time" - data_source_cloud_identity_group_memberships_time "time" - data_source_cloud_identity_groups_time "time" - data_source_google_container_engine_versions_time "time" - data_source_storage_object_signed_url_time "time" - dataproc_cluster_operation_time "time" - dataproc_job_operation_time "time" - datastore_operation_time "time" - deployment_manager_operation_time "time" - dialogflow_cx_operation_time "time" - dns_change_time "time" - filestore_operation_time "time" - firestore_operation_time "time" - game_services_operation_time "time" - gke_hub_operation_time "time" - iam_batching_time "time" - iam_time "time" - kms_utils_time "time" - memcache_operation_time "time" - ml_engine_operation_time "time" - network_management_operation_time "time" - network_services_operation_time "time" - notebooks_operation_time "time" - privateca_operation_time "time" - provider_dcl_client_creation_time "time" - provider_time "time" - redis_operation_time "time" - resource_access_context_manager_access_level_condition_time "time" - resource_access_context_manager_access_level_time "time" - resource_access_context_manager_access_levels_time "time" - resource_access_context_manager_access_policy_time "time" - resource_access_context_manager_gcp_user_access_binding_time "time" - resource_access_context_manager_service_perimeter_resource_time "time" - resource_access_context_manager_service_perimeter_time "time" - resource_access_context_manager_service_perimeters_time "time" - resource_active_directory_domain_time "time" - resource_active_directory_domain_trust_time "time" - resource_apigee_envgroup_attachment_time "time" - resource_apigee_envgroup_time "time" - resource_apigee_environment_time "time" - resource_apigee_instance_attachment_time "time" - resource_apigee_instance_time "time" - resource_apigee_organization_time "time" - resource_app_engine_application_time "time" - resource_app_engine_application_url_dispatch_rules_time "time" - resource_app_engine_domain_mapping_time "time" - resource_app_engine_firewall_rule_time "time" - resource_app_engine_flexible_app_version_time "time" - resource_app_engine_service_network_settings_time "time" - resource_app_engine_service_split_traffic_time "time" - resource_app_engine_standard_app_version_time "time" - resource_assured_workloads_workload_time "time" - resource_bigquery_data_transfer_config_time "time" - resource_bigquery_dataset_access_time "time" - resource_bigquery_dataset_time "time" - resource_bigquery_job_time "time" - resource_bigquery_reservation_time "time" - resource_bigquery_routine_time "time" - resource_bigtable_app_profile_time "time" - resource_bigtable_gc_policy_time "time" - resource_billing_budget_time "time" - resource_binary_authorization_attestor_time "time" - resource_binary_authorization_policy_time "time" - resource_cloud_asset_folder_feed_time "time" - resource_cloud_asset_organization_feed_time "time" - resource_cloud_asset_project_feed_time "time" - resource_cloud_identity_group_membership_time "time" - resource_cloud_identity_group_time "time" - resource_cloud_run_domain_mapping_time "time" - resource_cloud_run_service_time "time" - resource_cloud_scheduler_job_time "time" - resource_cloud_tasks_queue_time "time" - resource_cloudbuild_trigger_time "time" - resource_cloudfunctions_function_time "time" - resource_cloudiot_device_time "time" - resource_cloudiot_registry_time "time" - resource_composer_environment_time "time" - resource_compute_address_time "time" - resource_compute_attached_disk_time "time" - resource_compute_autoscaler_time "time" - resource_compute_backend_bucket_signed_url_key_time "time" - resource_compute_backend_bucket_time "time" - resource_compute_backend_service_signed_url_key_time "time" - resource_compute_backend_service_time "time" - resource_compute_disk_resource_policy_attachment_time "time" - resource_compute_disk_time "time" - resource_compute_external_vpn_gateway_time "time" - resource_compute_firewall_policy_association_time "time" - resource_compute_firewall_policy_rule_time "time" - resource_compute_firewall_policy_time "time" - resource_compute_firewall_time "time" - resource_compute_forwarding_rule_time "time" - resource_compute_global_address_time "time" - resource_compute_global_forwarding_rule_time "time" - resource_compute_global_network_endpoint_group_time "time" - resource_compute_global_network_endpoint_time "time" - resource_compute_ha_vpn_gateway_time "time" - resource_compute_health_check_time "time" - resource_compute_http_health_check_time "time" - resource_compute_https_health_check_time "time" - resource_compute_image_time "time" - resource_compute_instance_group_manager_time "time" - resource_compute_instance_group_named_port_time "time" - resource_compute_instance_group_time "time" - resource_compute_instance_template_time "time" - resource_compute_instance_time "time" - resource_compute_interconnect_attachment_time "time" - resource_compute_managed_ssl_certificate_time "time" - resource_compute_network_endpoint_group_time "time" - resource_compute_network_endpoint_time "time" - resource_compute_network_peering_routes_config_time "time" - resource_compute_network_peering_time "time" - resource_compute_network_time "time" - resource_compute_node_group_time "time" - resource_compute_node_template_time "time" - resource_compute_packet_mirroring_time "time" - resource_compute_per_instance_config_time "time" - resource_compute_project_default_network_tier_time "time" - resource_compute_project_metadata_item_time "time" - resource_compute_project_metadata_time "time" - resource_compute_region_autoscaler_time "time" - resource_compute_region_backend_service_time "time" - resource_compute_region_disk_resource_policy_attachment_time "time" - resource_compute_region_disk_time "time" - resource_compute_region_health_check_time "time" - resource_compute_region_instance_group_manager_time "time" - resource_compute_region_network_endpoint_group_time "time" - resource_compute_region_per_instance_config_time "time" - resource_compute_region_ssl_certificate_time "time" - resource_compute_region_target_http_proxy_time "time" - resource_compute_region_target_https_proxy_time "time" - resource_compute_region_url_map_time "time" - resource_compute_reservation_time "time" - resource_compute_resource_policy_time "time" - resource_compute_route_time "time" - resource_compute_router_interface_time "time" - resource_compute_router_nat_time "time" - resource_compute_router_peer_time "time" - resource_compute_router_time "time" - resource_compute_security_policy_time "time" - resource_compute_service_attachment_time "time" - resource_compute_shared_vpc_host_project_time "time" - resource_compute_shared_vpc_service_project_time "time" - resource_compute_snapshot_time "time" - resource_compute_ssl_certificate_time "time" - resource_compute_ssl_policy_time "time" - resource_compute_subnetwork_time "time" - resource_compute_target_grpc_proxy_time "time" - resource_compute_target_http_proxy_time "time" - resource_compute_target_https_proxy_time "time" - resource_compute_target_instance_time "time" - resource_compute_target_pool_time "time" - resource_compute_target_ssl_proxy_time "time" - resource_compute_target_tcp_proxy_time "time" - resource_compute_url_map_time "time" - resource_compute_vpn_gateway_time "time" - resource_compute_vpn_tunnel_time "time" - resource_container_analysis_note_time "time" - resource_container_analysis_occurrence_time "time" - resource_container_cluster_time "time" - resource_container_node_pool_time "time" - resource_data_catalog_entry_group_time "time" - resource_data_catalog_entry_time "time" - resource_data_catalog_tag_template_time "time" - resource_data_catalog_tag_time "time" - resource_data_loss_prevention_deidentify_template_time "time" - resource_data_loss_prevention_inspect_template_time "time" - resource_data_loss_prevention_job_trigger_time "time" - resource_data_loss_prevention_stored_info_type_time "time" - resource_dataflow_job_time "time" - resource_dataproc_autoscaling_policy_time "time" - resource_dataproc_cluster_time "time" - resource_dataproc_job_time "time" - resource_dataproc_workflow_template_time "time" - resource_datastore_index_time "time" - resource_deployment_manager_deployment_time "time" - resource_dialogflow_agent_time "time" - resource_dialogflow_cx_agent_time "time" - resource_dialogflow_cx_entity_type_time "time" - resource_dialogflow_cx_environment_time "time" - resource_dialogflow_cx_flow_time "time" - resource_dialogflow_cx_intent_time "time" - resource_dialogflow_cx_page_time "time" - resource_dialogflow_cx_version_time "time" - resource_dialogflow_entity_type_time "time" - resource_dialogflow_fulfillment_time "time" - resource_dialogflow_intent_time "time" - resource_dns_managed_zone_time "time" - resource_dns_policy_time "time" - resource_endpoints_service_time "time" - resource_essential_contacts_contact_time "time" - resource_eventarc_trigger_time "time" - resource_filestore_instance_time "time" - resource_firestore_document_time "time" - resource_firestore_index_time "time" - resource_folder_access_approval_settings_time "time" - resource_game_services_game_server_cluster_time "time" - resource_game_services_game_server_config_time "time" - resource_game_services_game_server_deployment_rollout_time "time" - resource_game_services_game_server_deployment_time "time" - resource_game_services_realm_time "time" - resource_gke_hub_membership_time "time" - resource_google_billing_subaccount_time "time" - resource_google_folder_organization_policy_time "time" - resource_google_folder_time "time" - resource_google_organization_policy_time "time" - resource_google_project_default_service_accounts_time "time" - resource_google_project_organization_policy_time "time" - resource_google_project_service_time "time" - resource_google_project_time "time" - resource_google_service_account_key_time "time" - resource_google_service_account_time "time" - resource_google_service_networking_peered_dns_domain_time "time" - resource_healthcare_consent_store_time "time" - resource_healthcare_dataset_time "time" - resource_healthcare_dicom_store_time "time" - resource_healthcare_fhir_store_time "time" - resource_healthcare_hl7_v2_store_time "time" - resource_iap_brand_time "time" - resource_iap_client_time "time" - resource_identity_platform_default_supported_idp_config_time "time" - resource_identity_platform_inbound_saml_config_time "time" - resource_identity_platform_oauth_idp_config_time "time" - resource_identity_platform_tenant_default_supported_idp_config_time "time" - resource_identity_platform_tenant_inbound_saml_config_time "time" - resource_identity_platform_tenant_oauth_idp_config_time "time" - resource_identity_platform_tenant_time "time" - resource_kms_crypto_key_time "time" - resource_kms_key_ring_import_job_time "time" - resource_kms_key_ring_time "time" - resource_kms_secret_ciphertext_time "time" - resource_logging_metric_time "time" - resource_manager_operation_time "time" - resource_memcache_instance_time "time" - resource_ml_engine_model_time "time" - resource_monitoring_alert_policy_time "time" - resource_monitoring_custom_service_time "time" - resource_monitoring_dashboard_time "time" - resource_monitoring_group_time "time" - resource_monitoring_metric_descriptor_time "time" - resource_monitoring_notification_channel_time "time" - resource_monitoring_slo_time "time" - resource_monitoring_uptime_check_config_time "time" - resource_network_management_connectivity_test_resource_time "time" - resource_network_services_edge_cache_keyset_time "time" - resource_network_services_edge_cache_origin_time "time" - resource_network_services_edge_cache_service_time "time" - resource_notebooks_environment_time "time" - resource_notebooks_instance_time "time" - resource_notebooks_location_time "time" - resource_org_policy_policy_time "time" - resource_organization_access_approval_settings_time "time" - resource_os_config_patch_deployment_time "time" - resource_os_login_ssh_public_key_time "time" - resource_privateca_ca_pool_time "time" - resource_privateca_certificate_authority_time "time" - resource_privateca_certificate_template_time "time" - resource_privateca_certificate_time "time" - resource_project_access_approval_settings_time "time" - resource_pubsub_lite_reservation_time "time" - resource_pubsub_lite_subscription_time "time" - resource_pubsub_lite_topic_time "time" - resource_pubsub_schema_time "time" - resource_pubsub_subscription_time "time" - resource_pubsub_topic_time "time" - resource_redis_instance_time "time" - resource_resource_manager_lien_time "time" - resource_scc_notification_config_time "time" - resource_scc_source_time "time" - resource_secret_manager_secret_time "time" - resource_secret_manager_secret_version_time "time" - resource_service_networking_connection_time "time" - resource_sourcerepo_repository_time "time" - resource_spanner_database_time "time" - resource_spanner_instance_time "time" - resource_sql_database_instance_time "time" - resource_sql_database_time "time" - resource_sql_source_representation_instance_time "time" - resource_sql_ssl_cert_time "time" - resource_sql_user_time "time" - resource_storage_bucket_access_control_time "time" - resource_storage_bucket_object_time "time" - resource_storage_bucket_time "time" - resource_storage_default_object_access_control_time "time" - resource_storage_hmac_key_time "time" - resource_storage_object_access_control_time "time" - resource_storage_transfer_job_time "time" - resource_tags_tag_binding_time "time" - resource_tags_tag_key_time "time" - resource_tags_tag_value_time "time" - resource_tpu_node_time "time" - resource_usage_export_bucket_time "time" - resource_vertex_ai_dataset_time "time" - resource_vpc_access_connector_time "time" - resource_workflows_workflow_time "time" - retry_transport_time "time" - retry_utils_time "time" - service_account_waiter_time "time" - service_networking_operation_time "time" - service_usage_operation_time "time" - serviceman_operation_time "time" - serviceusage_batching_time "time" - serviceusage_operation_time "time" - spanner_operation_time "time" - sqladmin_operation_time "time" - tags_operation_time "time" - test_utils_time "time" - tpu_operation_time "time" - transport_time "time" - utils_time "time" - validation_time "time" - vertex_ai_operation_time "time" - vpc_access_operation_time "time" - workflows_operation_time "time" - - bigtable_client_factory_bigtable "cloud.google.com/go/bigtable" - resource_bigtable_gc_policy_bigtable "cloud.google.com/go/bigtable" - resource_bigtable_instance_bigtable "cloud.google.com/go/bigtable" - dcl_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - provider_dcl_client_creation_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_assured_workloads_workload_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_compute_firewall_policy_association_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_compute_firewall_policy_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_compute_firewall_policy_rule_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_compute_forwarding_rule_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_compute_global_forwarding_rule_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_dataproc_workflow_template_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_eventarc_trigger_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_org_policy_policy_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - resource_privateca_certificate_template_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - tpgtools_utils_dcldcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" - provider_dcl_client_creation_assuredworkloadsassuredworkloads "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads" - resource_assured_workloads_workload_assuredworkloadsassuredworkloads "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/assuredworkloads" - provider_dcl_client_creation_cloudresourcemanagercloudresourcemanager "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudresourcemanager" - provider_dcl_client_creation_computecompute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - resource_compute_firewall_policy_association_computecompute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - resource_compute_firewall_policy_computecompute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - resource_compute_firewall_policy_rule_computecompute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - resource_compute_forwarding_rule_computecompute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - resource_compute_global_forwarding_rule_computecompute "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" - provider_dcl_client_creation_dataprocdataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" - resource_dataproc_workflow_template_dataprocdataproc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dataproc" - provider_dcl_client_creation_eventarceventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" - resource_eventarc_trigger_eventarceventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc" - provider_dcl_client_creation_orgpolicyorgpolicy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy" - resource_org_policy_policy_orgpolicyorgpolicy "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/orgpolicy" - provider_dcl_client_creation_privatecaprivateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" - resource_privateca_certificate_template_privatecaprivateca "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/privateca" - resource_compute_subnetwork_cidr "github.com/apparentlymart/go-cidr/cidr" - iam_spew "github.com/davecgh/go-spew/spew" - resource_iam_binding_spew "github.com/davecgh/go-spew/spew" - resource_iam_member_spew "github.com/davecgh/go-spew/spew" - resource_storage_bucket_workerpool "github.com/gammazero/workerpool" - batcher_errwrap "github.com/hashicorp/errwrap" - cloudrun_polling_errwrap "github.com/hashicorp/errwrap" - compute_instance_network_interface_helpers_errwrap "github.com/hashicorp/errwrap" - data_source_storage_object_signed_url_errwrap "github.com/hashicorp/errwrap" - iam_bigquery_dataset_errwrap "github.com/hashicorp/errwrap" - iam_bigquery_table_errwrap "github.com/hashicorp/errwrap" - iam_bigtable_instance_errwrap "github.com/hashicorp/errwrap" - iam_bigtable_table_errwrap "github.com/hashicorp/errwrap" - iam_billing_account_errwrap "github.com/hashicorp/errwrap" - iam_binary_authorization_attestor_errwrap "github.com/hashicorp/errwrap" - iam_cloud_run_service_errwrap "github.com/hashicorp/errwrap" - iam_cloudfunctions_function_errwrap "github.com/hashicorp/errwrap" - iam_compute_disk_errwrap "github.com/hashicorp/errwrap" - iam_compute_image_errwrap "github.com/hashicorp/errwrap" - iam_compute_instance_errwrap "github.com/hashicorp/errwrap" - iam_compute_region_disk_errwrap "github.com/hashicorp/errwrap" - iam_compute_subnetwork_errwrap "github.com/hashicorp/errwrap" - iam_data_catalog_entry_group_errwrap "github.com/hashicorp/errwrap" - iam_data_catalog_tag_template_errwrap "github.com/hashicorp/errwrap" - iam_dataproc_cluster_errwrap "github.com/hashicorp/errwrap" - iam_dataproc_job_errwrap "github.com/hashicorp/errwrap" - iam_endpoints_service_errwrap "github.com/hashicorp/errwrap" - iam_errwrap "github.com/hashicorp/errwrap" - iam_folder_errwrap "github.com/hashicorp/errwrap" - iam_healthcare_consent_store_errwrap "github.com/hashicorp/errwrap" - iam_healthcare_dataset_errwrap "github.com/hashicorp/errwrap" - iam_healthcare_dicom_store_errwrap "github.com/hashicorp/errwrap" - iam_healthcare_fhir_store_errwrap "github.com/hashicorp/errwrap" - iam_healthcare_hl7_v2_store_errwrap "github.com/hashicorp/errwrap" - iam_iap_app_engine_service_errwrap "github.com/hashicorp/errwrap" - iam_iap_app_engine_version_errwrap "github.com/hashicorp/errwrap" - iam_iap_tunnel_errwrap "github.com/hashicorp/errwrap" - iam_iap_tunnel_instance_errwrap "github.com/hashicorp/errwrap" - iam_iap_web_backend_service_errwrap "github.com/hashicorp/errwrap" - iam_iap_web_errwrap "github.com/hashicorp/errwrap" - iam_iap_web_type_app_engine_errwrap "github.com/hashicorp/errwrap" - iam_iap_web_type_compute_errwrap "github.com/hashicorp/errwrap" - iam_kms_crypto_key_errwrap "github.com/hashicorp/errwrap" - iam_kms_key_ring_errwrap "github.com/hashicorp/errwrap" - iam_notebooks_instance_errwrap "github.com/hashicorp/errwrap" - iam_organization_errwrap "github.com/hashicorp/errwrap" - iam_privateca_ca_pool_errwrap "github.com/hashicorp/errwrap" - iam_project_errwrap "github.com/hashicorp/errwrap" - iam_pubsub_subscription_errwrap "github.com/hashicorp/errwrap" - iam_pubsub_topic_errwrap "github.com/hashicorp/errwrap" - iam_secret_manager_secret_errwrap "github.com/hashicorp/errwrap" - iam_service_account_errwrap "github.com/hashicorp/errwrap" - iam_sourcerepo_repository_errwrap "github.com/hashicorp/errwrap" - iam_spanner_database_errwrap "github.com/hashicorp/errwrap" - iam_spanner_instance_errwrap "github.com/hashicorp/errwrap" - iam_storage_bucket_errwrap "github.com/hashicorp/errwrap" - iam_tags_tag_key_errwrap "github.com/hashicorp/errwrap" - iam_tags_tag_value_errwrap "github.com/hashicorp/errwrap" - logging_exclusion_billing_account_errwrap "github.com/hashicorp/errwrap" - logging_exclusion_folder_errwrap "github.com/hashicorp/errwrap" - logging_exclusion_organization_errwrap "github.com/hashicorp/errwrap" - logging_exclusion_project_errwrap "github.com/hashicorp/errwrap" - resource_compute_backend_service_errwrap "github.com/hashicorp/errwrap" - resource_compute_instance_errwrap "github.com/hashicorp/errwrap" - resource_compute_instance_template_errwrap "github.com/hashicorp/errwrap" - resource_compute_security_policy_errwrap "github.com/hashicorp/errwrap" - resource_container_cluster_errwrap "github.com/hashicorp/errwrap" - resource_google_project_errwrap "github.com/hashicorp/errwrap" - resource_service_networking_connection_errwrap "github.com/hashicorp/errwrap" - retry_utils_errwrap "github.com/hashicorp/errwrap" - sql_utils_errwrap "github.com/hashicorp/errwrap" - tpgtools_utils_errwrap "github.com/hashicorp/errwrap" - utils_errwrap "github.com/hashicorp/errwrap" - config_cleanhttp "github.com/hashicorp/go-cleanhttp" - resource_composer_environment_version "github.com/hashicorp/go-version" - resource_container_cluster_version "github.com/hashicorp/go-version" - provider_diag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - resource_app_engine_application_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_bigquery_table_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_bigtable_instance_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_compute_disk_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_compute_firewall_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_compute_instance_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_compute_instance_template_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_compute_region_disk_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_compute_subnetwork_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_container_cluster_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_container_node_pool_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_dataflow_job_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_redis_instance_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_sql_database_instance_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - resource_storage_bucket_customdiff "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - config_logging "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" - common_operation_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - common_polling_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - dns_change_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_compute_instance_group_manager_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_compute_instance_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_compute_instance_template_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_compute_interconnect_attachment_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_compute_region_instance_group_manager_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_compute_region_ssl_certificate_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_compute_ssl_certificate_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_container_cluster_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_container_node_pool_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_dataflow_job_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_spanner_instance_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_sql_database_instance_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_storage_bucket_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_storage_transfer_job_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - resource_workflows_workflow_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - retry_transport_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - retry_utils_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - service_account_waiter_resource "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - common_diff_suppress_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - compute_instance_helpers_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - compute_instance_network_interface_helpers_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_google_game_services_game_server_deployment_rollout_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_cloud_identity_group_memberships_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_cloud_identity_groups_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_cloud_run_locations_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_cloud_run_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_compute_health_check_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_compute_lb_ip_ranges_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_compute_network_endpoint_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_container_registry_image_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_container_registry_repository_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_dns_keys_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_dns_managed_zone_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_active_folder_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_app_engine_default_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_bigquery_default_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_billing_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_client_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_client_openid_userinfo_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_cloudfunctions_function_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_composer_environment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_composer_image_versions_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_address_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_backend_bucket_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_backend_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_default_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_forwarding_rule_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_global_address_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_ha_vpn_gateway_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_image_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_instance_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_instance_serial_port_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_instance_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_network_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_node_types_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_region_instance_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_region_ssl_certificate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_regions_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_resource_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_router_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_router_status_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_ssl_certificate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_ssl_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_subnetwork_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_vpn_gateway_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_compute_zones_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_container_cluster_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_container_engine_versions_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_folder_organization_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_folder_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_global_compute_forwarding_rule_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_iam_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_iam_role_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_iam_testable_permissions_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_kms_crypto_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_kms_crypto_key_version_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_kms_key_ring_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_kms_secret_ciphertext_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_kms_secret_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_monitoring_uptime_check_ips_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_netblock_ip_ranges_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_organization_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_project_organization_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_project_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_projects_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_service_account_access_token_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_service_account_id_token_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_service_account_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_service_networking_peered_dns_domain_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_sql_ca_certs_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_storage_bucket_object_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_storage_bucket_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_storage_project_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_google_storage_transfer_project_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_iap_client_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_monitoring_istio_canonical_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_monitoring_notification_channel_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_monitoring_service_app_engine_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_monitoring_service_cluster_istio_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_monitoring_service_mesh_istio_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_monitoring_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_pubsub_topic_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_redis_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_secret_manager_secret_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_secret_manager_secret_version_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_sourcerepo_repository_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_spanner_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_sql_backup_run_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_sql_database_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_storage_bucket_object_content_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_storage_object_signed_url_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - data_source_tpu_tensorflow_versions_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - datasource_helpers_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - expanders_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_bigquery_dataset_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_bigquery_table_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_bigtable_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_bigtable_table_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_billing_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_binary_authorization_attestor_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_cloud_run_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_cloudfunctions_function_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_compute_disk_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_compute_image_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_compute_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_compute_region_disk_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_compute_subnetwork_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_data_catalog_entry_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_data_catalog_tag_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_dataproc_cluster_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_dataproc_job_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_endpoints_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_folder_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_healthcare_consent_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_healthcare_dataset_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_healthcare_dicom_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_healthcare_fhir_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_healthcare_hl7_v2_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_app_engine_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_app_engine_version_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_tunnel_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_tunnel_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_web_backend_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_web_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_web_type_app_engine_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_iap_web_type_compute_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_kms_crypto_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_kms_key_ring_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_notebooks_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_organization_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_privateca_ca_pool_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_project_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_pubsub_subscription_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_pubsub_topic_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_secret_manager_secret_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_sourcerepo_repository_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_spanner_database_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_spanner_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_storage_bucket_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_tags_tag_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - iam_tags_tag_value_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - kms_utils_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - logging_exclusion_billing_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - logging_exclusion_folder_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - logging_exclusion_organization_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - logging_exclusion_project_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - node_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - orgpolicy_utils_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - privateca_utils_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - provider_dcl_endpoints_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - provider_handwritten_endpoint_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - provider_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_access_level_condition_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_access_level_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_access_levels_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_access_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_gcp_user_access_binding_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_service_perimeter_resource_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_service_perimeter_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_access_context_manager_service_perimeters_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_active_directory_domain_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_active_directory_domain_trust_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_apigee_envgroup_attachment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_apigee_envgroup_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_apigee_environment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_apigee_instance_attachment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_apigee_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_apigee_organization_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_application_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_application_url_dispatch_rules_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_domain_mapping_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_firewall_rule_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_flexible_app_version_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_service_network_settings_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_service_split_traffic_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_app_engine_standard_app_version_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_assured_workloads_workload_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_data_transfer_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_dataset_access_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_dataset_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_job_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_reservation_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_routine_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_table_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigtable_app_profile_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigtable_gc_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigtable_instance_migrate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigtable_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigtable_table_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_billing_budget_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_binary_authorization_attestor_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_binary_authorization_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_asset_folder_feed_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_asset_organization_feed_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_asset_project_feed_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_identity_group_membership_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_identity_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_run_domain_mapping_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_run_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_scheduler_job_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloud_tasks_queue_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloudbuild_trigger_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloudfunctions_function_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloudiot_device_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_cloudiot_registry_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_composer_environment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_address_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_attached_disk_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_autoscaler_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_backend_bucket_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_backend_bucket_signed_url_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_backend_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_backend_service_signed_url_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_disk_resource_policy_attachment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_disk_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_external_vpn_gateway_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_firewall_policy_association_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_firewall_policy_rule_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_firewall_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_firewall_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_forwarding_rule_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_global_address_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_global_forwarding_rule_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_global_network_endpoint_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_global_network_endpoint_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_ha_vpn_gateway_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_health_check_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_http_health_check_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_https_health_check_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_image_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_instance_from_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_instance_group_manager_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_instance_group_migrate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_instance_group_named_port_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_instance_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_instance_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_interconnect_attachment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_managed_ssl_certificate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_network_endpoint_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_network_endpoint_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_network_peering_routes_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_network_peering_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_network_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_node_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_node_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_packet_mirroring_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_per_instance_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_project_default_network_tier_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_project_metadata_item_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_project_metadata_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_autoscaler_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_backend_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_disk_resource_policy_attachment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_disk_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_health_check_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_instance_group_manager_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_network_endpoint_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_per_instance_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_ssl_certificate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_target_http_proxy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_target_https_proxy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_region_url_map_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_reservation_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_resource_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_route_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_router_interface_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_router_nat_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_router_peer_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_router_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_security_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_service_attachment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_shared_vpc_host_project_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_shared_vpc_service_project_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_snapshot_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_ssl_certificate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_ssl_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_subnetwork_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_target_grpc_proxy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_target_http_proxy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_target_https_proxy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_target_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_target_pool_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_target_ssl_proxy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_target_tcp_proxy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_url_map_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_vpn_gateway_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_compute_vpn_tunnel_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_container_analysis_note_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_container_analysis_occurrence_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_container_cluster_migrate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_container_cluster_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_container_node_pool_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_container_registry_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_catalog_entry_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_catalog_entry_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_catalog_tag_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_catalog_tag_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_loss_prevention_deidentify_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_loss_prevention_inspect_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_loss_prevention_job_trigger_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_data_loss_prevention_stored_info_type_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dataflow_job_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dataproc_autoscaling_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dataproc_cluster_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dataproc_job_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dataproc_workflow_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_datastore_index_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_deployment_manager_deployment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_agent_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_cx_agent_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_cx_entity_type_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_cx_environment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_cx_flow_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_cx_intent_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_cx_page_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_cx_version_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_entity_type_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_fulfillment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dialogflow_intent_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dns_managed_zone_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dns_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_dns_record_set_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_endpoints_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_essential_contacts_contact_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_eventarc_trigger_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_filestore_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_firestore_document_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_firestore_index_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_folder_access_approval_settings_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_game_services_game_server_cluster_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_game_services_game_server_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_game_services_game_server_deployment_rollout_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_game_services_game_server_deployment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_game_services_realm_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_gke_hub_membership_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_billing_subaccount_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_folder_organization_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_folder_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_organization_iam_custom_role_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_organization_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_project_default_service_accounts_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_project_iam_custom_role_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_project_organization_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_project_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_project_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_service_account_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_service_account_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_google_service_networking_peered_dns_domain_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_healthcare_consent_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_healthcare_dataset_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_healthcare_dicom_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_healthcare_fhir_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_healthcare_hl7_v2_store_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_iam_audit_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_iam_binding_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_iam_member_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_iam_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_iap_brand_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_iap_client_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_identity_platform_default_supported_idp_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_identity_platform_inbound_saml_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_identity_platform_oauth_idp_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_identity_platform_tenant_default_supported_idp_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_identity_platform_tenant_inbound_saml_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_identity_platform_tenant_oauth_idp_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_identity_platform_tenant_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_kms_crypto_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_kms_key_ring_import_job_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_kms_key_ring_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_kms_secret_ciphertext_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_billing_account_bucket_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_billing_account_sink_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_bucket_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_exclusion_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_folder_bucket_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_folder_sink_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_metric_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_organization_bucket_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_organization_sink_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_project_bucket_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_project_sink_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_logging_sink_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_memcache_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_ml_engine_model_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_alert_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_custom_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_dashboard_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_group_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_metric_descriptor_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_notification_channel_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_slo_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_monitoring_uptime_check_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_network_management_connectivity_test_resource_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_network_services_edge_cache_keyset_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_network_services_edge_cache_origin_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_network_services_edge_cache_service_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_notebooks_environment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_notebooks_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_notebooks_location_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_org_policy_policy_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_organization_access_approval_settings_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_os_config_patch_deployment_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_os_login_ssh_public_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_privateca_ca_pool_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_privateca_certificate_authority_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_privateca_certificate_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_privateca_certificate_template_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_project_access_approval_settings_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_pubsub_lite_reservation_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_pubsub_lite_subscription_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_pubsub_lite_topic_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_pubsub_schema_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_pubsub_subscription_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_pubsub_topic_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_redis_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_resource_manager_lien_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_scc_notification_config_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_scc_source_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_secret_manager_secret_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_secret_manager_secret_version_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_service_networking_connection_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_sourcerepo_repository_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_spanner_database_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_spanner_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_sql_database_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_sql_database_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_sql_source_representation_instance_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_sql_ssl_cert_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_sql_user_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_bucket_access_control_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_bucket_acl_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_bucket_object_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_bucket_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_default_object_access_control_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_default_object_acl_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_hmac_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_notification_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_object_access_control_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_object_acl_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_storage_transfer_job_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_tags_tag_binding_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_tags_tag_key_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_tags_tag_value_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_tpu_node_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_usage_export_bucket_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_vertex_ai_dataset_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_vpc_access_connector_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_workflows_workflow_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - self_link_helpers_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - service_scope_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - serviceusage_batching_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - stateful_mig_polling_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tpgtools_utils_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - utils_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - validation_schema "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - resource_bigquery_routine_structure "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - resource_bigquery_table_structure "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - resource_data_catalog_entry_structure "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - resource_firestore_document_structure "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - resource_healthcare_hl7_v2_store_structure "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - resource_monitoring_dashboard_structure "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" - compute_instance_helpers_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - data_source_google_compute_regions_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - data_source_google_compute_zones_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - data_source_google_iam_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - data_source_google_iam_testable_permissions_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - data_source_google_service_account_key_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - data_source_storage_object_signed_url_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - node_config_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_access_context_manager_access_level_condition_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_access_context_manager_access_level_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_access_context_manager_access_levels_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_access_context_manager_service_perimeter_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_access_context_manager_service_perimeters_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_active_directory_domain_trust_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_apigee_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_apigee_organization_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_app_engine_application_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_app_engine_domain_mapping_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_app_engine_firewall_rule_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_app_engine_flexible_app_version_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_app_engine_service_network_settings_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_app_engine_service_split_traffic_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_app_engine_standard_app_version_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_bigquery_job_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_bigquery_routine_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_bigquery_table_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_bigtable_gc_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_bigtable_instance_migrate_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_bigtable_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_billing_budget_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_binary_authorization_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloud_asset_folder_feed_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloud_asset_organization_feed_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloud_asset_project_feed_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloud_identity_group_membership_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloud_identity_group_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloud_run_domain_mapping_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloudbuild_trigger_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloudfunctions_function_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloudiot_device_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_cloudiot_registry_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_composer_environment_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_address_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_attached_disk_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_autoscaler_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_backend_bucket_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_backend_service_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_external_vpn_gateway_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_firewall_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_global_address_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_global_network_endpoint_group_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_health_check_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_image_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_instance_group_manager_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_instance_template_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_interconnect_attachment_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_managed_ssl_certificate_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_network_endpoint_group_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_network_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_node_group_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_node_template_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_packet_mirroring_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_per_instance_config_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_project_default_network_tier_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_region_autoscaler_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_region_backend_service_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_region_health_check_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_region_instance_group_manager_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_region_network_endpoint_group_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_region_per_instance_config_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_region_url_map_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_reservation_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_resource_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_router_nat_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_router_peer_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_router_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_security_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_ssl_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_subnetwork_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_target_https_proxy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_target_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_target_ssl_proxy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_target_tcp_proxy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_url_map_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_container_cluster_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_container_node_pool_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_data_catalog_entry_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_data_catalog_tag_template_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_data_loss_prevention_deidentify_template_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_data_loss_prevention_inspect_template_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_data_loss_prevention_job_trigger_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dataflow_job_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dataproc_cluster_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dataproc_job_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_datastore_index_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_deployment_manager_deployment_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_agent_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_cx_agent_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_cx_entity_type_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_cx_environment_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_cx_flow_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_cx_intent_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_cx_page_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_cx_version_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_entity_type_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_fulfillment_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dialogflow_intent_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dns_managed_zone_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_dns_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_filestore_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_firestore_document_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_firestore_index_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_folder_access_approval_settings_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_google_billing_subaccount_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_google_organization_iam_custom_role_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_google_project_default_service_accounts_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_google_project_iam_custom_role_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_google_service_account_key_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_google_service_account_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_healthcare_fhir_store_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_healthcare_hl7_v2_store_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_iam_binding_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_iam_member_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_kms_crypto_key_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_kms_key_ring_import_job_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_logging_metric_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_memcache_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_monitoring_alert_policy_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_monitoring_dashboard_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_monitoring_metric_descriptor_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_monitoring_slo_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_monitoring_uptime_check_config_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_network_management_connectivity_test_resource_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_network_services_edge_cache_origin_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_network_services_edge_cache_service_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_notebooks_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_organization_access_approval_settings_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_os_config_patch_deployment_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_privateca_ca_pool_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_privateca_certificate_authority_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_privateca_certificate_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_project_access_approval_settings_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_pubsub_lite_subscription_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_pubsub_schema_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_pubsub_topic_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_redis_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_scc_notification_config_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_scc_source_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_sourcerepo_repository_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_sql_database_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_sql_source_representation_instance_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_sql_user_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_bucket_access_control_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_bucket_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_default_object_access_control_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_hmac_key_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_notification_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_object_access_control_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_object_acl_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_storage_transfer_job_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_tags_tag_key_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_tags_tag_value_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_vpc_access_connector_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - validation_validation "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - resource_compute_firewall_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_compute_instance_group_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_compute_instance_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_compute_instance_template_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_container_cluster_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_container_node_pool_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_endpoints_service_migration_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_google_project_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - resource_sql_user_migrate_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - test_utils_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - utils_terraform "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - provider_version "github.com/hashicorp/terraform-provider-google/v4/version" - path_or_contents_homedir "github.com/mitchellh/go-homedir" - resource_compute_instance_hashstructure "github.com/mitchellh/hashstructure" - data_source_google_service_account_id_token_context "golang.org/x/net/context" - bigtable_client_factory_oauth2 "golang.org/x/oauth2" - config_oauth2 "golang.org/x/oauth2" - config_googlegoogleoauth "golang.org/x/oauth2/google" - data_source_storage_object_signed_url_google "golang.org/x/oauth2/google" - provider_googlegoogleoauth "golang.org/x/oauth2/google" - data_source_storage_object_signed_url_jwt "golang.org/x/oauth2/jwt" - appengine_operation_appengine "google.golang.org/api/appengine/v1" - config_appengineappengine "google.golang.org/api/appengine/v1" - resource_app_engine_application_appengineappengine "google.golang.org/api/appengine/v1" - config_bigquery "google.golang.org/api/bigquery/v2" - resource_bigquery_table_bigquery "google.golang.org/api/bigquery/v2" - config_bigtableadmin "google.golang.org/api/bigtableadmin/v2" - iam_bigtable_instance_bigtableadmin "google.golang.org/api/bigtableadmin/v2" - iam_bigtable_table_bigtableadmin "google.golang.org/api/bigtableadmin/v2" - resource_bigtable_app_profile_bigtableadmin "google.golang.org/api/bigtableadmin/v2" - config_cloudbilling "google.golang.org/api/cloudbilling/v1" - data_source_google_billing_account_cloudbilling "google.golang.org/api/cloudbilling/v1" - iam_billing_account_cloudbilling "google.golang.org/api/cloudbilling/v1" - resource_google_billing_subaccount_cloudbilling "google.golang.org/api/cloudbilling/v1" - resource_google_project_cloudbilling "google.golang.org/api/cloudbilling/v1" - config_cloudbuild "google.golang.org/api/cloudbuild/v1" - cloudfunctions_operation_cloudfunctions "google.golang.org/api/cloudfunctions/v1" - config_cloudfunctions "google.golang.org/api/cloudfunctions/v1" - resource_cloudfunctions_function_cloudfunctions "google.golang.org/api/cloudfunctions/v1" - config_cloudidentity "google.golang.org/api/cloudidentity/v1" - data_source_cloud_identity_group_memberships_cloudidentity "google.golang.org/api/cloudidentity/v1" - data_source_cloud_identity_groups_cloudidentity "google.golang.org/api/cloudidentity/v1" - config_cloudiot "google.golang.org/api/cloudiot/v1" - config_cloudkms "google.golang.org/api/cloudkms/v1" - data_source_google_kms_secret_ciphertext_cloudkms "google.golang.org/api/cloudkms/v1" - data_source_google_kms_secret_cloudkms "google.golang.org/api/cloudkms/v1" - iam_kms_crypto_key_cloudkms "google.golang.org/api/cloudkms/v1" - iam_kms_key_ring_cloudkms "google.golang.org/api/cloudkms/v1" - kms_utils_cloudkms "google.golang.org/api/cloudkms/v1" - common_operation_cloudresourcemanagercloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - config_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - data_source_google_iam_policy_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - data_source_google_organization_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_batching_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_bigquery_dataset_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_bigquery_table_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_bigtable_instance_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_bigtable_table_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_billing_account_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_binary_authorization_attestor_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_cloud_run_service_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_cloudfunctions_function_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_compute_disk_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_compute_image_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_compute_instance_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_compute_region_disk_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_compute_subnetwork_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_data_catalog_entry_group_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_data_catalog_tag_template_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_dataproc_cluster_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_dataproc_job_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_endpoints_service_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_folder_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_healthcare_consent_store_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_healthcare_dataset_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_healthcare_dicom_store_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_healthcare_fhir_store_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_healthcare_hl7_v2_store_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_app_engine_service_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_app_engine_version_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_tunnel_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_tunnel_instance_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_web_backend_service_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_web_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_web_type_app_engine_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_iap_web_type_compute_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_kms_crypto_key_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_kms_key_ring_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_notebooks_instance_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_organization_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_privateca_ca_pool_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_project_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_pubsub_subscription_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_pubsub_topic_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_secret_manager_secret_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_service_account_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_sourcerepo_repository_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_spanner_database_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_spanner_instance_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_storage_bucket_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_tags_tag_key_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - iam_tags_tag_value_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_google_folder_organization_policy_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_google_organization_policy_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_google_project_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_google_project_default_service_accounts_cloudresourcemanagercloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_google_project_migrate_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_google_project_organization_policy_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_iam_audit_config_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_iam_binding_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_iam_member_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - resource_iam_policy_cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" - config_cloudresourcemanagerresourceManagerV2 "google.golang.org/api/cloudresourcemanager/v2" - data_source_google_active_folder_cloudresourcemanagerresourceManagerV2 "google.golang.org/api/cloudresourcemanager/v2" - iam_folder_cloudresourcemanagerresourceManagerV2 "google.golang.org/api/cloudresourcemanager/v2" - resource_google_folder_cloudresourcemanagerresourceManagerV2 "google.golang.org/api/cloudresourcemanager/v2" - composer_operation_composer "google.golang.org/api/composer/v1" - config_composer "google.golang.org/api/composer/v1" - resource_composer_environment_composer "google.golang.org/api/composer/v1" - compute_backend_service_helpers_compute "google.golang.org/api/compute/v1" - compute_instance_helpers_compute "google.golang.org/api/compute/v1" - compute_instance_network_interface_helpers_compute "google.golang.org/api/compute/v1" - compute_operation_compute "google.golang.org/api/compute/v1" - config_compute "google.golang.org/api/compute/v1" - data_source_google_compute_image_compute "google.golang.org/api/compute/v1" - data_source_google_compute_instance_template_compute "google.golang.org/api/compute/v1" - data_source_google_compute_node_types_compute "google.golang.org/api/compute/v1" - data_source_google_compute_region_instance_group_compute "google.golang.org/api/compute/v1" - data_source_google_compute_regions_compute "google.golang.org/api/compute/v1" - data_source_google_compute_router_status_compute "google.golang.org/api/compute/v1" - data_source_google_compute_subnetwork_compute "google.golang.org/api/compute/v1" - data_source_google_compute_vpn_gateway_compute "google.golang.org/api/compute/v1" - data_source_google_compute_zones_compute "google.golang.org/api/compute/v1" - deployment_manager_operation_compute "google.golang.org/api/compute/v1" - metadata_compute "google.golang.org/api/compute/v1" - resource_compute_attached_disk_compute "google.golang.org/api/compute/v1" - resource_compute_instance_compute "google.golang.org/api/compute/v1" - resource_compute_instance_from_template_compute "google.golang.org/api/compute/v1" - resource_compute_instance_group_compute "google.golang.org/api/compute/v1" - resource_compute_instance_group_manager_compute "google.golang.org/api/compute/v1" - resource_compute_instance_migrate_compute "google.golang.org/api/compute/v1" - resource_compute_instance_template_compute "google.golang.org/api/compute/v1" - resource_compute_network_peering_compute "google.golang.org/api/compute/v1" - resource_compute_project_default_network_tier_compute "google.golang.org/api/compute/v1" - resource_compute_project_metadata_compute "google.golang.org/api/compute/v1" - resource_compute_project_metadata_item_compute "google.golang.org/api/compute/v1" - resource_compute_region_instance_group_manager_compute "google.golang.org/api/compute/v1" - resource_compute_router_interface_compute "google.golang.org/api/compute/v1" - resource_compute_security_policy_compute "google.golang.org/api/compute/v1" - resource_compute_shared_vpc_service_project_compute "google.golang.org/api/compute/v1" - resource_compute_target_pool_compute "google.golang.org/api/compute/v1" - resource_service_networking_connection_compute "google.golang.org/api/compute/v1" - resource_usage_export_bucket_compute "google.golang.org/api/compute/v1" - config_container "google.golang.org/api/container/v1" - container_operation_container "google.golang.org/api/container/v1" - node_config_container "google.golang.org/api/container/v1" - resource_container_cluster_container "google.golang.org/api/container/v1" - resource_container_node_pool_container "google.golang.org/api/container/v1" - config_dataflowdataflow "google.golang.org/api/dataflow/v1b3" - resource_dataflow_job_dataflowdataflow "google.golang.org/api/dataflow/v1b3" - config_dataproc "google.golang.org/api/dataproc/v1" - dataproc_cluster_operation_dataproc "google.golang.org/api/dataproc/v1" - dataproc_job_operation_dataproc "google.golang.org/api/dataproc/v1" - iam_dataproc_cluster_dataproc "google.golang.org/api/dataproc/v1" - iam_dataproc_job_dataproc "google.golang.org/api/dataproc/v1" - resource_dataproc_cluster_dataproc "google.golang.org/api/dataproc/v1" - resource_dataproc_job_dataproc "google.golang.org/api/dataproc/v1" - config_dns "google.golang.org/api/dns/v1" - data_source_dns_keys_dns "google.golang.org/api/dns/v1" - dns_change_dns "google.golang.org/api/dns/v1" - resource_dns_managed_zone_dns "google.golang.org/api/dns/v1" - resource_dns_record_set_dns "google.golang.org/api/dns/v1" - compute_instance_helpers_googleapi "google.golang.org/api/googleapi" - data_source_google_compute_region_instance_group_googleapi "google.golang.org/api/googleapi" - error_retry_predicates_googleapi "google.golang.org/api/googleapi" - image_googleapi "google.golang.org/api/googleapi" - resource_access_context_manager_access_level_condition_googleapi "google.golang.org/api/googleapi" - resource_access_context_manager_service_perimeter_resource_googleapi "google.golang.org/api/googleapi" - resource_bigquery_dataset_access_googleapi "google.golang.org/api/googleapi" - resource_bigquery_dataset_googleapi "google.golang.org/api/googleapi" - resource_bigquery_job_googleapi "google.golang.org/api/googleapi" - resource_bigquery_routine_googleapi "google.golang.org/api/googleapi" - resource_cloud_run_domain_mapping_googleapi "google.golang.org/api/googleapi" - resource_cloud_run_service_googleapi "google.golang.org/api/googleapi" - resource_compute_backend_service_googleapi "google.golang.org/api/googleapi" - resource_compute_disk_googleapi "google.golang.org/api/googleapi" - resource_compute_instance_group_googleapi "google.golang.org/api/googleapi" - resource_compute_instance_group_named_port_googleapi "google.golang.org/api/googleapi" - resource_compute_network_googleapi "google.golang.org/api/googleapi" - resource_compute_network_peering_googleapi "google.golang.org/api/googleapi" - resource_compute_region_backend_service_googleapi "google.golang.org/api/googleapi" - resource_compute_region_disk_googleapi "google.golang.org/api/googleapi" - resource_compute_router_interface_googleapi "google.golang.org/api/googleapi" - resource_compute_router_nat_googleapi "google.golang.org/api/googleapi" - resource_compute_router_peer_googleapi "google.golang.org/api/googleapi" - resource_compute_shared_vpc_service_project_googleapi "google.golang.org/api/googleapi" - resource_compute_target_pool_googleapi "google.golang.org/api/googleapi" - resource_data_loss_prevention_stored_info_type_googleapi "google.golang.org/api/googleapi" - resource_dataflow_job_googleapi "google.golang.org/api/googleapi" - resource_google_project_googleapi "google.golang.org/api/googleapi" - resource_google_project_service_googleapi "google.golang.org/api/googleapi" - resource_monitoring_slo_googleapi "google.golang.org/api/googleapi" - resource_secret_manager_secret_version_googleapi "google.golang.org/api/googleapi" - resource_sql_database_instance_googleapi "google.golang.org/api/googleapi" - resource_sql_source_representation_instance_googleapi "google.golang.org/api/googleapi" - resource_storage_bucket_googleapi "google.golang.org/api/googleapi" - resource_storage_bucket_object_googleapi "google.golang.org/api/googleapi" - resource_storage_hmac_key_googleapi "google.golang.org/api/googleapi" - retry_transport_googleapi "google.golang.org/api/googleapi" - service_account_waiter_googleapi "google.golang.org/api/googleapi" - service_usage_operation_googleapi "google.golang.org/api/googleapi" - serviceman_operation_googleapi "google.golang.org/api/googleapi" - serviceusage_operation_googleapi "google.golang.org/api/googleapi" - sql_utils_googleapi "google.golang.org/api/googleapi" - transport_googleapi "google.golang.org/api/googleapi" - utils_googleapi "google.golang.org/api/googleapi" - config_healthcarehealthcare "google.golang.org/api/healthcare/v1" - iam_healthcare_dataset_healthcarehealthcare "google.golang.org/api/healthcare/v1" - iam_healthcare_dicom_store_healthcarehealthcare "google.golang.org/api/healthcare/v1" - iam_healthcare_fhir_store_healthcarehealthcare "google.golang.org/api/healthcare/v1" - iam_healthcare_hl7_v2_store_healthcarehealthcare "google.golang.org/api/healthcare/v1" - config_iam "google.golang.org/api/iam/v1" - iam_service_account_iam "google.golang.org/api/iam/v1" - resource_google_organization_iam_custom_role_iam "google.golang.org/api/iam/v1" - resource_google_project_default_service_accounts_iam "google.golang.org/api/iam/v1" - resource_google_project_iam_custom_role_iam "google.golang.org/api/iam/v1" - resource_google_service_account_iam "google.golang.org/api/iam/v1" - resource_google_service_account_key_iam "google.golang.org/api/iam/v1" - service_account_waiter_iam "google.golang.org/api/iam/v1" - config_iamcredentialsiamcredentials "google.golang.org/api/iamcredentials/v1" - data_source_google_service_account_access_token_iamcredentialsiamcredentials "google.golang.org/api/iamcredentials/v1" - data_source_google_service_account_id_token_iamcredentialsiamcredentials "google.golang.org/api/iamcredentials/v1" - data_source_google_service_account_id_token_idtoken "google.golang.org/api/idtoken" - config_loggingcloudlogging "google.golang.org/api/logging/v2" - logging_exclusion_billing_account_logging "google.golang.org/api/logging/v2" - logging_exclusion_folder_logging "google.golang.org/api/logging/v2" - logging_exclusion_organization_logging "google.golang.org/api/logging/v2" - logging_exclusion_project_logging "google.golang.org/api/logging/v2" - resource_logging_exclusion_logging "google.golang.org/api/logging/v2" - resource_logging_sink_logging "google.golang.org/api/logging/v2" - bigtable_client_factory_option "google.golang.org/api/option" - config_option "google.golang.org/api/option" - data_source_google_service_account_id_token_option "google.golang.org/api/option" - mtls_util_internaloption "google.golang.org/api/option/internaloption" - config_pubsub "google.golang.org/api/pubsub/v1" - iam_pubsub_subscription_pubsub "google.golang.org/api/pubsub/v1" - config_servicemanagement "google.golang.org/api/servicemanagement/v1" - resource_endpoints_service_servicemanagement "google.golang.org/api/servicemanagement/v1" - serviceman_operation_servicemanagement "google.golang.org/api/servicemanagement/v1" - config_servicenetworking "google.golang.org/api/servicenetworking/v1" - resource_google_service_networking_peered_dns_domain_servicenetworking "google.golang.org/api/servicenetworking/v1" - resource_service_networking_connection_servicenetworking "google.golang.org/api/servicenetworking/v1" - service_networking_operation_servicenetworking "google.golang.org/api/servicenetworking/v1" - config_serviceusage "google.golang.org/api/serviceusage/v1" - resource_google_project_service_serviceusage "google.golang.org/api/serviceusage/v1" - resource_google_project_serviceusage "google.golang.org/api/serviceusage/v1" - serviceusage_operation_serviceusage "google.golang.org/api/serviceusage/v1" - config_sourcerepo "google.golang.org/api/sourcerepo/v1" - config_spanner "google.golang.org/api/spanner/v1" - iam_spanner_database_spanner "google.golang.org/api/spanner/v1" - iam_spanner_instance_spannerspanner "google.golang.org/api/spanner/v1" - config_sqladminsqladmin "google.golang.org/api/sqladmin/v1beta4" - data_source_sql_backup_run_sqladminsqladmin "google.golang.org/api/sqladmin/v1beta4" - error_retry_predicates_sqladminsqladmin "google.golang.org/api/sqladmin/v1beta4" - resource_sql_database_instance_sqladminsqladmin "google.golang.org/api/sqladmin/v1beta4" - resource_sql_ssl_cert_sqladminsqladmin "google.golang.org/api/sqladmin/v1beta4" - resource_sql_user_sqladminsqladmin "google.golang.org/api/sqladmin/v1beta4" - sqladmin_operation_sqladminsqladmin "google.golang.org/api/sqladmin/v1beta4" - config_storage "google.golang.org/api/storage/v1" - data_source_storage_bucket_object_content_storage "google.golang.org/api/storage/v1" - resource_storage_bucket_acl_storage "google.golang.org/api/storage/v1" - resource_storage_bucket_object_storage "google.golang.org/api/storage/v1" - resource_storage_bucket_storage "google.golang.org/api/storage/v1" - resource_storage_default_object_acl_storage "google.golang.org/api/storage/v1" - resource_storage_notification_storage "google.golang.org/api/storage/v1" - resource_storage_object_acl_storage "google.golang.org/api/storage/v1" - config_storagetransfer "google.golang.org/api/storagetransfer/v1" - resource_storage_transfer_job_storagetransfer "google.golang.org/api/storagetransfer/v1" - config_transport "google.golang.org/api/transport" - mtls_util_transport "google.golang.org/api/transport" - error_retry_predicates_status "google.golang.org/grpc/status" -) - -type AccessContextManagerOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *AccessContextManagerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, access_context_manager_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := access_context_manager_operation_fmt.Sprintf("https://accesscontextmanager.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createAccessContextManagerWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*AccessContextManagerOperationWaiter, error) { - w := &AccessContextManagerOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func accessContextManagerOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout access_context_manager_operation_time.Duration) error { - w, err := createAccessContextManagerWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return access_context_manager_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func accessContextManagerOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout access_context_manager_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createAccessContextManagerWaiter(config, op, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type ActiveDirectoryOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *ActiveDirectoryOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, active_directory_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := active_directory_operation_fmt.Sprintf("https://managedidentities.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createActiveDirectoryWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*ActiveDirectoryOperationWaiter, error) { - w := &ActiveDirectoryOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func activeDirectoryOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout active_directory_operation_time.Duration) error { - w, err := createActiveDirectoryWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return active_directory_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func activeDirectoryOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout active_directory_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createActiveDirectoryWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type ApigeeOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *ApigeeOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, apigee_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := apigee_operation_fmt.Sprintf("https://apigee.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createApigeeWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*ApigeeOperationWaiter, error) { - w := &ApigeeOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func apigeeOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout apigee_operation_time.Duration) error { - w, err := createApigeeWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return apigee_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func apigeeOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout apigee_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createApigeeWaiter(config, op, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -var ( - appEngineOperationIdRegexp = appengine_operation_regexp.MustCompile(appengine_operation_fmt.Sprintf("apps/%s/operations/(.*)", ProjectRegex)) -) - -type AppEngineOperationWaiter struct { - Service *appengine_operation_appengine.APIService - AppId string - CommonOperationWaiter -} - -func (w *AppEngineOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, appengine_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - matches := appEngineOperationIdRegexp.FindStringSubmatch(w.Op.Name) - if len(matches) != 2 { - return nil, appengine_operation_fmt.Errorf("Expected %d results of parsing operation name, got %d from %s", 2, len(matches), w.Op.Name) - } - return w.Service.Apps.Operations.Get(w.AppId, matches[1]).Do() -} - -func appEngineOperationWaitTimeWithResponse(config *Config, res interface{}, response *map[string]interface{}, appId, activity, userAgent string, timeout appengine_operation_time.Duration) error { - op := &appengine_operation_appengine.Operation{} - err := Convert(res, op) - if err != nil { - return err - } - - w := &AppEngineOperationWaiter{ - Service: config.NewAppEngineClient(userAgent), - AppId: appId, - } - - if err := w.SetOp(op); err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return appengine_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func appEngineOperationWaitTime(config *Config, res interface{}, appId, activity, userAgent string, timeout appengine_operation_time.Duration) error { - op := &appengine_operation_appengine.Operation{} - err := Convert(res, op) - if err != nil { - return err - } - - w := &AppEngineOperationWaiter{ - Service: config.NewAppEngineClient(userAgent), - AppId: appId, - } - - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -const defaultBatchSendIntervalSec = 3 - -type RequestBatcher struct { - batcher_sync.Mutex - - *batchingConfig - parentCtx batcher_context.Context - batches map[string]*startedBatch - debugId string -} - -type ( - BatchRequest struct { - ResourceName string - - Body interface{} - - CombineF BatcherCombineFunc - - SendF BatcherSendFunc - - DebugId string - } - - BatcherCombineFunc func(body interface{}, toAdd interface{}) (interface{}, error) - - BatcherSendFunc func(resourceName string, body interface{}) (interface{}, error) -) - -type batchResponse struct { - body interface{} - err error -} - -func (br *batchResponse) IsError() bool { - return br.err != nil -} - -type startedBatch struct { - batchKey string - - *BatchRequest - - subscribers []batchSubscriber - - timer *batcher_time.Timer -} - -type batchSubscriber struct { - singleRequest *BatchRequest - - respCh chan batchResponse -} - -type batchingConfig struct { - sendAfter batcher_time.Duration - enableBatching bool -} - -func NewRequestBatcher(debugId string, ctx batcher_context.Context, config *batchingConfig) *RequestBatcher { - batcher := &RequestBatcher{ - debugId: debugId, - parentCtx: ctx, - batchingConfig: config, - batches: make(map[string]*startedBatch), - } - - go func(b *RequestBatcher) { - - <-b.parentCtx.Done() - - batcher_log.Printf("[DEBUG] parent context canceled, cleaning up batcher batches") - b.stop() - }(batcher) - - return batcher -} - -func (b *RequestBatcher) stop() { - b.Lock() - defer b.Unlock() - - batcher_log.Printf("[DEBUG] Stopping batcher %q", b.debugId) - for batchKey, batch := range b.batches { - batcher_log.Printf("[DEBUG] Cancelling started batch for batchKey %q", batchKey) - batch.timer.Stop() - for _, l := range batch.subscribers { - close(l.respCh) - } - } -} - -func (b *RequestBatcher) SendRequestWithTimeout(batchKey string, request *BatchRequest, timeout batcher_time.Duration) (interface{}, error) { - if request == nil { - return nil, batcher_fmt.Errorf("error, cannot request batching for nil BatchRequest") - } - if request.CombineF == nil { - return nil, batcher_fmt.Errorf("error, cannot request batching for BatchRequest with nil CombineF") - } - if request.SendF == nil { - return nil, batcher_fmt.Errorf("error, cannot request batching for BatchRequest with nil SendF") - } - if !b.enableBatching { - batcher_log.Printf("[DEBUG] Batching is disabled, sending single request for %q", request.DebugId) - return request.SendF(request.ResourceName, request.Body) - } - - respCh, err := b.registerBatchRequest(batchKey, request) - if err != nil { - return nil, batcher_fmt.Errorf("error adding request to batch: %s", err) - } - - ctx, cancel := batcher_context.WithTimeout(b.parentCtx, timeout) - defer cancel() - - select { - case resp := <-respCh: - if resp.err != nil { - return nil, batcher_errwrap.Wrapf( - batcher_fmt.Sprintf("Request `%s` returned error: {{err}}", request.DebugId), - resp.err) - } - return resp.body, nil - case <-ctx.Done(): - break - } - if b.parentCtx.Err() != nil { - switch b.parentCtx.Err() { - case batcher_context.Canceled: - return nil, batcher_fmt.Errorf("Parent context of request %s canceled", batchKey) - case batcher_context.DeadlineExceeded: - return nil, batcher_fmt.Errorf("Parent context of request %s timed out", batchKey) - default: - return nil, batcher_fmt.Errorf("Parent context of request %s encountered an error: %v", batchKey, ctx.Err()) - } - } - switch ctx.Err() { - case batcher_context.Canceled: - return nil, batcher_fmt.Errorf("Request %s canceled", batchKey) - case batcher_context.DeadlineExceeded: - return nil, batcher_fmt.Errorf("Request %s timed out after %v", batchKey, timeout) - default: - return nil, batcher_fmt.Errorf("Error making request %s: %v", batchKey, ctx.Err()) - } -} - -func (b *RequestBatcher) registerBatchRequest(batchKey string, newRequest *BatchRequest) (<-chan batchResponse, error) { - b.Lock() - defer b.Unlock() - - if batch, ok := b.batches[batchKey]; ok { - return batch.addRequest(newRequest) - } - - batcher_log.Printf("[DEBUG] Creating new batch %q from request %q", newRequest.DebugId, batchKey) - - respCh := make(chan batchResponse, 1) - sub := batchSubscriber{ - singleRequest: newRequest, - respCh: respCh, - } - - b.batches[batchKey] = &startedBatch{ - BatchRequest: &BatchRequest{ - ResourceName: newRequest.ResourceName, - Body: newRequest.Body, - CombineF: newRequest.CombineF, - SendF: newRequest.SendF, - DebugId: batcher_fmt.Sprintf("Combined batch for started batch %q", batchKey), - }, - batchKey: batchKey, - subscribers: []batchSubscriber{sub}, - } - - b.batches[batchKey].timer = batcher_time.AfterFunc(b.sendAfter, func() { - batch := b.popBatch(batchKey) - if batch == nil { - batcher_log.Printf("[ERROR] batch should have been added to saved batches - just run as single request %q", newRequest.DebugId) - respCh <- newRequest.send() - close(respCh) - } else { - b.sendBatchWithSingleRetry(batchKey, batch) - } - }) - - return respCh, nil -} - -func (b *RequestBatcher) sendBatchWithSingleRetry(batchKey string, batch *startedBatch) { - batcher_log.Printf("[DEBUG] Sending batch %q combining %d requests)", batchKey, len(batch.subscribers)) - resp := batch.send() - - if resp.IsError() && len(batch.subscribers) > 1 { - batcher_log.Printf("[DEBUG] Batch failed with error: %v", resp.err) - batcher_log.Printf("[DEBUG] Sending each request in batch separately") - for _, sub := range batch.subscribers { - batcher_log.Printf("[DEBUG] Retrying single request %q", sub.singleRequest.DebugId) - singleResp := sub.singleRequest.send() - batcher_log.Printf("[DEBUG] Retried single request %q returned response: %v", sub.singleRequest.DebugId, singleResp) - - if singleResp.IsError() { - singleResp.err = batcher_errwrap.Wrapf( - batcher_fmt.Sprintf("Batch request and retried single request %q both failed. Final error: {{err}}", sub.singleRequest.DebugId), - singleResp.err) - } - sub.respCh <- singleResp - close(sub.respCh) - } - } else { - - for _, sub := range batch.subscribers { - sub.respCh <- resp - close(sub.respCh) - } - } -} - -func (b *RequestBatcher) popBatch(batchKey string) *startedBatch { - b.Lock() - defer b.Unlock() - - batch, ok := b.batches[batchKey] - if !ok { - batcher_log.Printf("[DEBUG] Batch with ID %q not found in batcher", batchKey) - return nil - } - - delete(b.batches, batchKey) - return batch -} - -func (batch *startedBatch) addRequest(newRequest *BatchRequest) (<-chan batchResponse, error) { - batcher_log.Printf("[DEBUG] Adding batch request %q to existing batch %q", newRequest.DebugId, batch.batchKey) - if batch.CombineF == nil { - return nil, batcher_fmt.Errorf("Provider Error: unable to add request %q to batch %q with no CombineF", newRequest.DebugId, batch.batchKey) - } - newBody, err := batch.CombineF(batch.Body, newRequest.Body) - if err != nil { - return nil, batcher_fmt.Errorf("Provider Error: Unable to combine request %q data into existing batch %q: %v", newRequest.DebugId, batch.batchKey, err) - } - batch.Body = newBody - - batcher_log.Printf("[DEBUG] Added batch request %q to batch. New batch body: %v", newRequest.DebugId, batch.Body) - - respCh := make(chan batchResponse, 1) - sub := batchSubscriber{ - singleRequest: newRequest, - respCh: respCh, - } - batch.subscribers = append(batch.subscribers, sub) - return respCh, nil -} - -func (req *BatchRequest) send() batchResponse { - if req.SendF == nil { - return batchResponse{ - err: batcher_fmt.Errorf("provider error: Batch request has no SendBatch function"), - } - } - v, err := req.SendF(req.ResourceName, req.Body) - return batchResponse{v, err} -} - -type BigtableClientFactory struct { - UserAgent string - TokenSource bigtable_client_factory_oauth2.TokenSource - BillingProject string - UserProjectOverride bool -} - -func (s BigtableClientFactory) NewInstanceAdminClient(project string) (*bigtable_client_factory_bigtable.InstanceAdminClient, error) { - var opts []bigtable_client_factory_option.ClientOption - if requestReason := bigtable_client_factory_os.Getenv("CLOUDSDK_CORE_REQUEST_REASON"); requestReason != "" { - opts = append(opts, bigtable_client_factory_option.WithRequestReason(requestReason)) - } - - if s.UserProjectOverride && s.BillingProject != "" { - opts = append(opts, bigtable_client_factory_option.WithQuotaProject(s.BillingProject)) - } - - opts = append(opts, bigtable_client_factory_option.WithTokenSource(s.TokenSource), bigtable_client_factory_option.WithUserAgent(s.UserAgent)) - return bigtable_client_factory_bigtable.NewInstanceAdminClient(bigtable_client_factory_context.Background(), project, opts...) -} - -func (s BigtableClientFactory) NewAdminClient(project, instance string) (*bigtable_client_factory_bigtable.AdminClient, error) { - var opts []bigtable_client_factory_option.ClientOption - if requestReason := bigtable_client_factory_os.Getenv("CLOUDSDK_CORE_REQUEST_REASON"); requestReason != "" { - opts = append(opts, bigtable_client_factory_option.WithRequestReason(requestReason)) - } - - if s.UserProjectOverride && s.BillingProject != "" { - opts = append(opts, bigtable_client_factory_option.WithQuotaProject(s.BillingProject)) - } - - opts = append(opts, bigtable_client_factory_option.WithTokenSource(s.TokenSource), bigtable_client_factory_option.WithUserAgent(s.UserAgent)) - return bigtable_client_factory_bigtable.NewAdminClient(bigtable_client_factory_context.Background(), project, instance, opts...) -} - -type CloudFunctionsOperationWaiter struct { - Service *cloudfunctions_operation_cloudfunctions.Service - CommonOperationWaiter -} - -func (w *CloudFunctionsOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, cloudfunctions_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - return w.Service.Operations.Get(w.Op.Name).Do() -} - -func cloudFunctionsOperationWait(config *Config, op *cloudfunctions_operation_cloudfunctions.Operation, activity, userAgent string, timeout cloudfunctions_operation_time.Duration) error { - w := &CloudFunctionsOperationWaiter{ - Service: config.NewCloudFunctionsClient(userAgent), - } - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -const readyStatusType string = "Ready" - -const pendingCertificateReason string = "CertificatePending" - -type Condition struct { - Type string - Status string - Reason string - Message string -} - -type KnativeStatus struct { - Metadata struct { - Name string - Namespace string - SelfLink string - } - Status struct { - Conditions []Condition - ObservedGeneration float64 - } -} - -func getGeneration(res map[string]interface{}) (int, error) { - metadata, ok := res["metadata"] - if !ok { - return 0, cloudrun_polling_fmt.Errorf("Unable to find knative metadata") - } - m, ok := metadata.(map[string]interface{}) - if !ok { - return 0, cloudrun_polling_fmt.Errorf("Unable to find generation in knative metadata") - } - gen, ok := m["generation"] - if !ok { - return 0, cloudrun_polling_fmt.Errorf("Unable to find generation in knative metadata") - } - return int(gen.(float64)), nil -} - -func PollCheckKnativeStatusFunc(knativeRestResponse map[string]interface{}) func(resp map[string]interface{}, respErr error) PollResult { - return func(resp map[string]interface{}, respErr error) PollResult { - if respErr != nil { - return ErrorPollResult(respErr) - } - s := KnativeStatus{} - if err := Convert(resp, &s); err != nil { - return ErrorPollResult(cloudrun_polling_errwrap.Wrapf("unable to get KnativeStatus: {{err}}", err)) - } - - gen, err := getGeneration(knativeRestResponse) - if err != nil { - return ErrorPollResult(cloudrun_polling_errwrap.Wrapf("unable to find Knative generation: {{err}}", err)) - } - if int(s.Status.ObservedGeneration) != gen { - return PendingStatusPollResult("waiting for observed generation to match") - } - for _, condition := range s.Status.Conditions { - if condition.Type == readyStatusType { - cloudrun_polling_log.Printf("[DEBUG] checking KnativeStatus Ready condition %s: %s", condition.Status, condition.Message) - switch condition.Status { - case "True": - - return SuccessPollResult() - case "Unknown": - - if condition.Reason == pendingCertificateReason { - return SuccessPollResult() - } - return PendingStatusPollResult(cloudrun_polling_fmt.Sprintf("%s:%s", condition.Status, condition.Message)) - case "False": - return ErrorPollResult(cloudrun_polling_fmt.Errorf(`resource is in failed state "Ready:False", message: %s`, condition.Message)) - } - } - } - return PendingStatusPollResult("no status yet") - } -} - -func optionalPrefixSuppress(prefix string) common_diff_suppress_schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - return prefix+old == new || prefix+new == old - } -} - -func ignoreMissingKeyInMap(key string) common_diff_suppress_schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - common_diff_suppress_log.Printf("[DEBUG] - suppressing diff %q with old %q, new %q", k, old, new) - if common_diff_suppress_strings.HasSuffix(k, ".%") { - oldNum, err := common_diff_suppress_strconv.Atoi(old) - if err != nil { - common_diff_suppress_log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", old) - return false - } - newNum, err := common_diff_suppress_strconv.Atoi(new) - if err != nil { - common_diff_suppress_log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", new) - return false - } - return oldNum+1 == newNum - } else if common_diff_suppress_strings.HasSuffix(k, "."+key) { - return old == "" - } - return false - } -} - -func optionalSurroundingSpacesSuppress(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - return common_diff_suppress_strings.TrimSpace(old) == common_diff_suppress_strings.TrimSpace(new) -} - -func emptyOrDefaultStringSuppress(defaultVal string) common_diff_suppress_schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) - } -} - -func ipCidrRangeDiffSuppress(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - - if len(new) > 0 && new[0] == '/' { - oldNetmaskStartPos := common_diff_suppress_strings.LastIndex(old, "/") - - if oldNetmaskStartPos != -1 { - oldNetmask := old[common_diff_suppress_strings.LastIndex(old, "/"):] - if oldNetmask == new { - return true - } - } - } - - return false -} - -func sha256DiffSuppress(_, old, new string, _ *common_diff_suppress_schema.ResourceData) bool { - return common_diff_suppress_hex.EncodeToString(common_diff_suppress_sha256.New().Sum([]byte(old))) == new -} - -func caseDiffSuppress(_, old, new string, _ *common_diff_suppress_schema.ResourceData) bool { - return common_diff_suppress_strings.ToUpper(old) == common_diff_suppress_strings.ToUpper(new) -} - -func portRangeDiffSuppress(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - return old == new+"-"+new -} - -func rfc3339TimeDiffSuppress(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { - return true - } - return false -} - -func emptyOrUnsetBlockDiffSuppress(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - o, n := d.GetChange(common_diff_suppress_strings.TrimSuffix(k, ".#")) - var l []interface{} - if old == "0" && new == "1" { - l = n.([]interface{}) - } else if new == "0" && old == "1" { - l = o.([]interface{}) - } else { - - return false - } - - contents := l[0].(map[string]interface{}) - for _, v := range contents { - if !isEmptyValue(common_diff_suppress_reflect.ValueOf(v)) { - return false - } - } - return true -} - -func locationDiffSuppress(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - return locationDiffSuppressHelper(old, new) || locationDiffSuppressHelper(new, old) -} - -func locationDiffSuppressHelper(a, b string) bool { - return common_diff_suppress_strings.Replace(a, "/locations/", "/regions/", 1) == b || - common_diff_suppress_strings.Replace(a, "/locations/", "/zones/", 1) == b -} - -func absoluteDomainSuppress(k, old, new string, _ *common_diff_suppress_schema.ResourceData) bool { - if common_diff_suppress_strings.HasPrefix(k, "managed.0.domains.") { - return old == common_diff_suppress_strings.TrimRight(new, ".") || new == common_diff_suppress_strings.TrimRight(old, ".") - } - return false -} - -func timestampDiffSuppress(format string) common_diff_suppress_schema.SchemaDiffSuppressFunc { - return func(_, old, new string, _ *common_diff_suppress_schema.ResourceData) bool { - oldT, err := common_diff_suppress_time.Parse(format, old) - if err != nil { - return false - } - - newT, err := common_diff_suppress_time.Parse(format, new) - if err != nil { - return false - } - - return oldT == newT - } -} - -func internalIpDiffSuppress(_, old, new string, _ *common_diff_suppress_schema.ResourceData) bool { - return (common_diff_suppress_net.ParseIP(old) != nil) && (common_diff_suppress_net.ParseIP(new) == nil) -} - -func durationDiffSuppress(k, old, new string, d *common_diff_suppress_schema.ResourceData) bool { - oDuration, err := common_diff_suppress_time.ParseDuration(old) - if err != nil { - return false - } - nDuration, err := common_diff_suppress_time.ParseDuration(new) - if err != nil { - return false - } - return oDuration == nDuration -} - -func compareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *common_diff_suppress_schema.ResourceData) bool { - - if common_diff_suppress_net.ParseIP(new) != nil { - return new == old - } - - return compareSelfLinkOrResourceName("", old, new, nil) -} - -func compareOptionalSubnet(_, old, new string, _ *common_diff_suppress_schema.ResourceData) bool { - if isEmptyValue(common_diff_suppress_reflect.ValueOf(new)) { - return true - } - - return compareSelfLinkOrResourceName("", old, new, nil) -} - -type CommonOpError struct { - *common_operation_cloudresourcemanagercloudresourcemanager.Status -} - -func (e *CommonOpError) Error() string { - return common_operation_fmt.Sprintf("Error code %v, message: %s", e.Code, e.Message) -} - -type Waiter interface { - State() string - - Error() error - - IsRetryable(error) bool - - SetOp(interface{}) error - - QueryOp() (interface{}, error) - - OpName() string - - PendingStates() []string - - TargetStates() []string -} - -type CommonOperationWaiter struct { - Op CommonOperation -} - -func (w *CommonOperationWaiter) State() string { - if w == nil { - return common_operation_fmt.Sprintf("Operation is nil!") - } - - return common_operation_fmt.Sprintf("done: %v", w.Op.Done) -} - -func (w *CommonOperationWaiter) Error() error { - if w != nil && w.Op.Error != nil { - return &CommonOpError{w.Op.Error} - } - return nil -} - -func (w *CommonOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *CommonOperationWaiter) SetOp(op interface{}) error { - if err := Convert(op, &w.Op); err != nil { - return err - } - return nil -} - -func (w *CommonOperationWaiter) OpName() string { - if w == nil { - return "" - } - - return w.Op.Name -} - -func (w *CommonOperationWaiter) PendingStates() []string { - return []string{"done: false"} -} - -func (w *CommonOperationWaiter) TargetStates() []string { - return []string{"done: true"} -} - -func OperationDone(w Waiter) bool { - for _, s := range w.TargetStates() { - if s == w.State() { - return true - } - } - return false -} - -func CommonRefreshFunc(w Waiter) common_operation_resource.StateRefreshFunc { - return func() (interface{}, string, error) { - op, err := w.QueryOp() - if err != nil { - - if isRetryableError(err, isNotFoundRetryableError("GET operation"), isOperationReadQuotaError) { - common_operation_log.Printf("[DEBUG] Dismissed retryable error on GET operation %q: %s", w.OpName(), err) - return nil, "done: false", nil - } - return nil, "", common_operation_fmt.Errorf("error while retrieving operation: %s", err) - } - - if err = w.SetOp(op); err != nil { - return nil, "", common_operation_fmt.Errorf("Cannot continue, unable to use operation: %s", err) - } - - if err = w.Error(); err != nil { - if w.IsRetryable(err) { - common_operation_log.Printf("[DEBUG] Retrying operation GET based on retryable err: %s", err) - return nil, w.State(), nil - } - return nil, "", err - } - - common_operation_log.Printf("[DEBUG] Got %v while polling for operation %s's status", w.State(), w.OpName()) - return op, w.State(), nil - } -} - -func OperationWait(w Waiter, activity string, timeout common_operation_time.Duration, pollInterval common_operation_time.Duration) error { - if OperationDone(w) { - if w.Error() != nil { - return w.Error() - } - return nil - } - - c := &common_operation_resource.StateChangeConf{ - Pending: w.PendingStates(), - Target: w.TargetStates(), - Refresh: CommonRefreshFunc(w), - Timeout: timeout, - MinTimeout: 2 * common_operation_time.Second, - PollInterval: pollInterval, - } - opRaw, err := c.WaitForState() - if err != nil { - return common_operation_fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - err = w.SetOp(opRaw) - if err != nil { - return err - } - if w.Error() != nil { - return w.Error() - } - - return nil -} - -type CommonOperation common_operation_cloudresourcemanagercloudresourcemanager.Operation - -type ( - PollReadFunc func() (resp map[string]interface{}, respErr error) - - PollCheckResponseFunc func(resp map[string]interface{}, respErr error) PollResult - - PollResult *common_polling_resource.RetryError -) - -func ErrorPollResult(err error) PollResult { - return common_polling_resource.NonRetryableError(err) -} - -func PendingStatusPollResult(status string) PollResult { - return common_polling_resource.RetryableError(common_polling_fmt.Errorf("got pending status %q", status)) -} - -func SuccessPollResult() PollResult { - return nil -} - -func PollingWaitTime(pollF PollReadFunc, checkResponse PollCheckResponseFunc, activity string, - timeout common_polling_time.Duration, targetOccurrences int) error { - common_polling_log.Printf("[DEBUG] %s: Polling until expected state is read", activity) - common_polling_log.Printf("[DEBUG] Target occurrences: %d", targetOccurrences) - if targetOccurrences == 1 { - return common_polling_resource.Retry(timeout, func() *common_polling_resource.RetryError { - readResp, readErr := pollF() - return checkResponse(readResp, readErr) - }) - } - return RetryWithTargetOccurrences(timeout, targetOccurrences, func() *common_polling_resource.RetryError { - readResp, readErr := pollF() - return checkResponse(readResp, readErr) - }) -} - -func RetryWithTargetOccurrences(timeout common_polling_time.Duration, targetOccurrences int, - f common_polling_resource.RetryFunc) error { - - var resultErr error - var resultErrMu common_polling_sync.Mutex - - c := &common_polling_resource.StateChangeConf{ - Pending: []string{"retryableerror"}, - Target: []string{"success"}, - Timeout: timeout, - MinTimeout: 500 * common_polling_time.Millisecond, - ContinuousTargetOccurence: targetOccurrences, - Refresh: func() (interface{}, string, error) { - rerr := f() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if rerr == nil { - resultErr = nil - return 42, "success", nil - } - - resultErr = rerr.Err - - if rerr.Retryable { - return 42, "retryableerror", nil - } - return nil, "quit", rerr.Err - }, - } - - _, waitErr := c.WaitForState() - - resultErrMu.Lock() - defer resultErrMu.Unlock() - - if resultErr == nil { - return waitErr - } - - return resultErr -} - -func PollCheckForExistence(_ map[string]interface{}, respErr error) PollResult { - if respErr != nil { - if isGoogleApiErrorWithCode(respErr, 404) { - return PendingStatusPollResult("not found") - } - return ErrorPollResult(respErr) - } - return SuccessPollResult() -} - -func PollCheckForExistenceWith403(_ map[string]interface{}, respErr error) PollResult { - if respErr != nil { - if isGoogleApiErrorWithCode(respErr, 404) || isGoogleApiErrorWithCode(respErr, 403) { - return PendingStatusPollResult("not found") - } - return ErrorPollResult(respErr) - } - return SuccessPollResult() -} - -func PollCheckForAbsence(_ map[string]interface{}, respErr error) PollResult { - if respErr != nil { - if isGoogleApiErrorWithCode(respErr, 404) { - return SuccessPollResult() - } - return ErrorPollResult(respErr) - } - return PendingStatusPollResult("found") -} - -type ComposerOperationWaiter struct { - Service *composer_operation_composer.ProjectsLocationsService - CommonOperationWaiter -} - -func (w *ComposerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, composer_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - return w.Service.Operations.Get(w.Op.Name).Do() -} - -func composerOperationWaitTime(config *Config, op *composer_operation_composer.Operation, project, activity, userAgent string, timeout composer_operation_time.Duration) error { - w := &ComposerOperationWaiter{ - Service: config.NewComposerClient(userAgent).Projects.Locations, - } - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func emptySecurityPolicyReference() *compute_backend_service_helpers_compute.SecurityPolicyReference { - return &compute_backend_service_helpers_compute.SecurityPolicyReference{} -} - -func instanceSchedulingNodeAffinitiesElemSchema() *compute_instance_helpers_schema.Resource { - return &compute_instance_helpers_schema.Resource{ - Schema: map[string]*compute_instance_helpers_schema.Schema{ - "key": { - Type: compute_instance_helpers_schema.TypeString, - Required: true, - }, - "operator": { - Type: compute_instance_helpers_schema.TypeString, - Required: true, - ValidateFunc: compute_instance_helpers_validation.StringInSlice([]string{"IN", "NOT_IN"}, false), - }, - "values": { - Type: compute_instance_helpers_schema.TypeSet, - Required: true, - Elem: &compute_instance_helpers_schema.Schema{Type: compute_instance_helpers_schema.TypeString}, - Set: compute_instance_helpers_schema.HashString, - }, - }, - } -} - -func expandAliasIpRanges(ranges []interface{}) []*compute_instance_helpers_compute.AliasIpRange { - ipRanges := make([]*compute_instance_helpers_compute.AliasIpRange, 0, len(ranges)) - for _, raw := range ranges { - data := raw.(map[string]interface{}) - ipRanges = append(ipRanges, &compute_instance_helpers_compute.AliasIpRange{ - IpCidrRange: data["ip_cidr_range"].(string), - SubnetworkRangeName: data["subnetwork_range_name"].(string), - }) - } - return ipRanges -} - -func flattenAliasIpRange(ranges []*compute_instance_helpers_compute.AliasIpRange) []map[string]interface{} { - rangesSchema := make([]map[string]interface{}, 0, len(ranges)) - for _, ipRange := range ranges { - rangesSchema = append(rangesSchema, map[string]interface{}{ - "ip_cidr_range": ipRange.IpCidrRange, - "subnetwork_range_name": ipRange.SubnetworkRangeName, - }) - } - return rangesSchema -} - -func expandScheduling(v interface{}) (*compute_instance_helpers_compute.Scheduling, error) { - if v == nil { - - return &compute_instance_helpers_compute.Scheduling{ - AutomaticRestart: compute_instance_helpers_googleapi.Bool(true), - }, nil - } - - ls := v.([]interface{}) - if len(ls) == 0 { - - return &compute_instance_helpers_compute.Scheduling{ - AutomaticRestart: compute_instance_helpers_googleapi.Bool(true), - }, nil - } - - if len(ls) > 1 || ls[0] == nil { - return nil, compute_instance_helpers_fmt.Errorf("expected exactly one scheduling block") - } - - original := ls[0].(map[string]interface{}) - scheduling := &compute_instance_helpers_compute.Scheduling{ - ForceSendFields: make([]string, 0, 4), - } - - if v, ok := original["automatic_restart"]; ok { - scheduling.AutomaticRestart = compute_instance_helpers_googleapi.Bool(v.(bool)) - scheduling.ForceSendFields = append(scheduling.ForceSendFields, "AutomaticRestart") - } - - if v, ok := original["preemptible"]; ok { - scheduling.Preemptible = v.(bool) - scheduling.ForceSendFields = append(scheduling.ForceSendFields, "Preemptible") - } - - if v, ok := original["on_host_maintenance"]; ok { - scheduling.OnHostMaintenance = v.(string) - scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnHostMaintenance") - } - - if v, ok := original["node_affinities"]; ok && v != nil { - naSet := v.(*compute_instance_helpers_schema.Set).List() - scheduling.NodeAffinities = make([]*compute_instance_helpers_compute.SchedulingNodeAffinity, len(ls)) - scheduling.ForceSendFields = append(scheduling.ForceSendFields, "NodeAffinities") - for _, nodeAffRaw := range naSet { - if nodeAffRaw == nil { - continue - } - nodeAff := nodeAffRaw.(map[string]interface{}) - transformed := &compute_instance_helpers_compute.SchedulingNodeAffinity{ - Key: nodeAff["key"].(string), - Operator: nodeAff["operator"].(string), - Values: convertStringArr(nodeAff["values"].(*compute_instance_helpers_schema.Set).List()), - } - scheduling.NodeAffinities = append(scheduling.NodeAffinities, transformed) - } - } - - if v, ok := original["min_node_cpus"]; ok { - scheduling.MinNodeCpus = int64(v.(int)) - } - - return scheduling, nil -} - -func flattenScheduling(resp *compute_instance_helpers_compute.Scheduling) []map[string]interface{} { - schedulingMap := map[string]interface{}{ - "on_host_maintenance": resp.OnHostMaintenance, - "preemptible": resp.Preemptible, - "min_node_cpus": resp.MinNodeCpus, - } - - if resp.AutomaticRestart != nil { - schedulingMap["automatic_restart"] = *resp.AutomaticRestart - } - - nodeAffinities := compute_instance_helpers_schema.NewSet(compute_instance_helpers_schema.HashResource(instanceSchedulingNodeAffinitiesElemSchema()), nil) - for _, na := range resp.NodeAffinities { - nodeAffinities.Add(map[string]interface{}{ - "key": na.Key, - "operator": na.Operator, - "values": compute_instance_helpers_schema.NewSet(compute_instance_helpers_schema.HashString, convertStringArrToInterface(na.Values)), - }) - } - schedulingMap["node_affinities"] = nodeAffinities - - return []map[string]interface{}{schedulingMap} -} - -func flattenAccessConfigs(accessConfigs []*compute_instance_helpers_compute.AccessConfig) ([]map[string]interface{}, string) { - flattened := make([]map[string]interface{}, len(accessConfigs)) - natIP := "" - for i, ac := range accessConfigs { - flattened[i] = map[string]interface{}{ - "nat_ip": ac.NatIP, - "network_tier": ac.NetworkTier, - } - if ac.SetPublicPtr { - flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName - } - if natIP == "" { - natIP = ac.NatIP - } - } - return flattened, natIP -} - -func flattenIpv6AccessConfigs(ipv6AccessConfigs []*compute_instance_helpers_compute.AccessConfig) []map[string]interface{} { - flattened := make([]map[string]interface{}, len(ipv6AccessConfigs)) - for i, ac := range ipv6AccessConfigs { - flattened[i] = map[string]interface{}{ - "network_tier": ac.NetworkTier, - } - flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName - } - return flattened -} - -func flattenNetworkInterfaces(d *compute_instance_helpers_schema.ResourceData, config *Config, networkInterfaces []*compute_instance_helpers_compute.NetworkInterface) ([]map[string]interface{}, string, string, string, error) { - flattened := make([]map[string]interface{}, len(networkInterfaces)) - var region, internalIP, externalIP string - - for i, iface := range networkInterfaces { - var ac []map[string]interface{} - ac, externalIP = flattenAccessConfigs(iface.AccessConfigs) - - subnet, err := ParseSubnetworkFieldValue(iface.Subnetwork, d, config) - if err != nil { - return nil, "", "", "", err - } - region = subnet.Region - - flattened[i] = map[string]interface{}{ - "network_ip": iface.NetworkIP, - "network": ConvertSelfLinkToV1(iface.Network), - "subnetwork": ConvertSelfLinkToV1(iface.Subnetwork), - "subnetwork_project": subnet.Project, - "access_config": ac, - "alias_ip_range": flattenAliasIpRange(iface.AliasIpRanges), - "nic_type": iface.NicType, - "stack_type": iface.StackType, - "ipv6_access_config": flattenIpv6AccessConfigs(iface.Ipv6AccessConfigs), - "queue_count": iface.QueueCount, - } - - if iface.Name != "" { - flattened[i]["name"] = iface.Name - } - if internalIP == "" { - internalIP = iface.NetworkIP - } - } - return flattened, region, internalIP, externalIP, nil -} - -func expandAccessConfigs(configs []interface{}) []*compute_instance_helpers_compute.AccessConfig { - acs := make([]*compute_instance_helpers_compute.AccessConfig, len(configs)) - for i, raw := range configs { - acs[i] = &compute_instance_helpers_compute.AccessConfig{} - acs[i].Type = "ONE_TO_ONE_NAT" - if raw != nil { - data := raw.(map[string]interface{}) - acs[i].NatIP = data["nat_ip"].(string) - acs[i].NetworkTier = data["network_tier"].(string) - if ptr, ok := data["public_ptr_domain_name"]; ok && ptr != "" { - acs[i].SetPublicPtr = true - acs[i].PublicPtrDomainName = ptr.(string) - } - } - } - return acs -} - -func expandIpv6AccessConfigs(configs []interface{}) []*compute_instance_helpers_compute.AccessConfig { - iacs := make([]*compute_instance_helpers_compute.AccessConfig, len(configs)) - for i, raw := range configs { - iacs[i] = &compute_instance_helpers_compute.AccessConfig{} - if raw != nil { - data := raw.(map[string]interface{}) - iacs[i].NetworkTier = data["network_tier"].(string) - if ptr, ok := data["public_ptr_domain_name"]; ok && ptr != "" { - iacs[i].PublicPtrDomainName = ptr.(string) - } - iacs[i].Type = "DIRECT_IPV6" - } - } - return iacs -} - -func expandNetworkInterfaces(d TerraformResourceData, config *Config) ([]*compute_instance_helpers_compute.NetworkInterface, error) { - configs := d.Get("network_interface").([]interface{}) - ifaces := make([]*compute_instance_helpers_compute.NetworkInterface, len(configs)) - for i, raw := range configs { - data := raw.(map[string]interface{}) - - network := data["network"].(string) - subnetwork := data["subnetwork"].(string) - if network == "" && subnetwork == "" { - return nil, compute_instance_helpers_fmt.Errorf("exactly one of network or subnetwork must be provided") - } - - nf, err := ParseNetworkFieldValue(network, d, config) - if err != nil { - return nil, compute_instance_helpers_fmt.Errorf("cannot determine self_link for network %q: %s", network, err) - } - - subnetProjectField := compute_instance_helpers_fmt.Sprintf("network_interface.%d.subnetwork_project", i) - sf, err := ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) - if err != nil { - return nil, compute_instance_helpers_fmt.Errorf("cannot determine self_link for subnetwork %q: %s", subnetwork, err) - } - - ifaces[i] = &compute_instance_helpers_compute.NetworkInterface{ - NetworkIP: data["network_ip"].(string), - Network: nf.RelativeLink(), - Subnetwork: sf.RelativeLink(), - AccessConfigs: expandAccessConfigs(data["access_config"].([]interface{})), - AliasIpRanges: expandAliasIpRanges(data["alias_ip_range"].([]interface{})), - NicType: data["nic_type"].(string), - StackType: data["stack_type"].(string), - QueueCount: int64(data["queue_count"].(int)), - Ipv6AccessConfigs: expandIpv6AccessConfigs(data["ipv6_access_config"].([]interface{})), - } - } - return ifaces, nil -} - -func flattenServiceAccounts(serviceAccounts []*compute_instance_helpers_compute.ServiceAccount) []map[string]interface{} { - result := make([]map[string]interface{}, len(serviceAccounts)) - for i, serviceAccount := range serviceAccounts { - result[i] = map[string]interface{}{ - "email": serviceAccount.Email, - "scopes": compute_instance_helpers_schema.NewSet(stringScopeHashcode, convertStringArrToInterface(serviceAccount.Scopes)), - } - } - return result -} - -func expandServiceAccounts(configs []interface{}) []*compute_instance_helpers_compute.ServiceAccount { - accounts := make([]*compute_instance_helpers_compute.ServiceAccount, len(configs)) - for i, raw := range configs { - data := raw.(map[string]interface{}) - - accounts[i] = &compute_instance_helpers_compute.ServiceAccount{ - Email: data["email"].(string), - Scopes: canonicalizeServiceScopes(convertStringSet(data["scopes"].(*compute_instance_helpers_schema.Set))), - } - - if accounts[i].Email == "" { - accounts[i].Email = "default" - } - } - return accounts -} - -func flattenGuestAccelerators(accelerators []*compute_instance_helpers_compute.AcceleratorConfig) []map[string]interface{} { - acceleratorsSchema := make([]map[string]interface{}, len(accelerators)) - for i, accelerator := range accelerators { - acceleratorsSchema[i] = map[string]interface{}{ - "count": accelerator.AcceleratorCount, - "type": accelerator.AcceleratorType, - } - } - return acceleratorsSchema -} - -func resourceInstanceTags(d TerraformResourceData) *compute_instance_helpers_compute.Tags { - - var tags *compute_instance_helpers_compute.Tags - if v := d.Get("tags"); v != nil { - vs := v.(*compute_instance_helpers_schema.Set) - tags = new(compute_instance_helpers_compute.Tags) - tags.Items = make([]string, vs.Len()) - for i, v := range vs.List() { - tags.Items[i] = v.(string) - } - - tags.Fingerprint = d.Get("tags_fingerprint").(string) - } - - return tags -} - -func expandShieldedVmConfigs(d TerraformResourceData) *compute_instance_helpers_compute.ShieldedInstanceConfig { - if _, ok := d.GetOk("shielded_instance_config"); !ok { - return nil - } - - prefix := "shielded_instance_config.0" - return &compute_instance_helpers_compute.ShieldedInstanceConfig{ - EnableSecureBoot: d.Get(prefix + ".enable_secure_boot").(bool), - EnableVtpm: d.Get(prefix + ".enable_vtpm").(bool), - EnableIntegrityMonitoring: d.Get(prefix + ".enable_integrity_monitoring").(bool), - ForceSendFields: []string{"EnableSecureBoot", "EnableVtpm", "EnableIntegrityMonitoring"}, - } -} - -func expandConfidentialInstanceConfig(d TerraformResourceData) *compute_instance_helpers_compute.ConfidentialInstanceConfig { - if _, ok := d.GetOk("confidential_instance_config"); !ok { - return nil - } - - prefix := "confidential_instance_config.0" - return &compute_instance_helpers_compute.ConfidentialInstanceConfig{ - EnableConfidentialCompute: d.Get(prefix + ".enable_confidential_compute").(bool), - } -} - -func flattenConfidentialInstanceConfig(ConfidentialInstanceConfig *compute_instance_helpers_compute.ConfidentialInstanceConfig) []map[string]bool { - if ConfidentialInstanceConfig == nil { - return nil - } - - return []map[string]bool{{ - "enable_confidential_compute": ConfidentialInstanceConfig.EnableConfidentialCompute, - }} -} - -func expandAdvancedMachineFeatures(d TerraformResourceData) *compute_instance_helpers_compute.AdvancedMachineFeatures { - if _, ok := d.GetOk("advanced_machine_features"); !ok { - return nil - } - - prefix := "advanced_machine_features.0" - return &compute_instance_helpers_compute.AdvancedMachineFeatures{ - EnableNestedVirtualization: d.Get(prefix + ".enable_nested_virtualization").(bool), - ThreadsPerCore: int64(d.Get(prefix + ".threads_per_core").(int)), - } -} - -func flattenAdvancedMachineFeatures(AdvancedMachineFeatures *compute_instance_helpers_compute.AdvancedMachineFeatures) []map[string]interface{} { - if AdvancedMachineFeatures == nil { - return nil - } - return []map[string]interface{}{{ - "enable_nested_virtualization": AdvancedMachineFeatures.EnableNestedVirtualization, - "threads_per_core": AdvancedMachineFeatures.ThreadsPerCore, - }} -} - -func flattenShieldedVmConfig(shieldedVmConfig *compute_instance_helpers_compute.ShieldedInstanceConfig) []map[string]bool { - if shieldedVmConfig == nil { - return nil - } - - return []map[string]bool{{ - "enable_secure_boot": shieldedVmConfig.EnableSecureBoot, - "enable_vtpm": shieldedVmConfig.EnableVtpm, - "enable_integrity_monitoring": shieldedVmConfig.EnableIntegrityMonitoring, - }} -} - -func expandDisplayDevice(d TerraformResourceData) *compute_instance_helpers_compute.DisplayDevice { - if _, ok := d.GetOk("enable_display"); !ok { - return nil - } - return &compute_instance_helpers_compute.DisplayDevice{ - EnableDisplay: d.Get("enable_display").(bool), - ForceSendFields: []string{"EnableDisplay"}, - } -} - -func flattenEnableDisplay(displayDevice *compute_instance_helpers_compute.DisplayDevice) interface{} { - if displayDevice == nil { - return nil - } - - return displayDevice.EnableDisplay -} - -func schedulingHasChangeRequiringReboot(d *compute_instance_helpers_schema.ResourceData) bool { - o, n := d.GetChange("scheduling") - oScheduling := o.([]interface{})[0].(map[string]interface{}) - newScheduling := n.([]interface{})[0].(map[string]interface{}) - - return hasNodeAffinitiesChanged(oScheduling, newScheduling) -} - -func schedulingHasChangeWithoutReboot(d *compute_instance_helpers_schema.ResourceData) bool { - if !d.HasChange("scheduling") { - - return false - } - o, n := d.GetChange("scheduling") - oScheduling := o.([]interface{})[0].(map[string]interface{}) - newScheduling := n.([]interface{})[0].(map[string]interface{}) - - if schedulingHasChangeRequiringReboot(d) { - return false - } - - if oScheduling["automatic_restart"] != newScheduling["automatic_restart"] { - return true - } - - if oScheduling["preemptible"] != newScheduling["preemptible"] { - return true - } - - if oScheduling["on_host_maintenance"] != newScheduling["on_host_maintenance"] { - return true - } - - if oScheduling["min_node_cpus"] != newScheduling["min_node_cpus"] { - return true - } - - return false -} - -func hasNodeAffinitiesChanged(oScheduling, newScheduling map[string]interface{}) bool { - oldNAs := oScheduling["node_affinities"].(*compute_instance_helpers_schema.Set).List() - newNAs := newScheduling["node_affinities"].(*compute_instance_helpers_schema.Set).List() - if len(oldNAs) != len(newNAs) { - return true - } - for i := range oldNAs { - oldNodeAffinity := oldNAs[i].(map[string]interface{}) - newNodeAffinity := newNAs[i].(map[string]interface{}) - if oldNodeAffinity["key"] != newNodeAffinity["key"] { - return true - } - if oldNodeAffinity["operator"] != newNodeAffinity["operator"] { - return true - } - - if !compute_instance_helpers_reflect.DeepEqual(convertStringSet(oldNodeAffinity["values"].(*compute_instance_helpers_schema.Set)), convertStringSet(newNodeAffinity["values"].(*compute_instance_helpers_schema.Set))) { - return true - } - } - - return false -} - -func expandReservationAffinity(d *compute_instance_helpers_schema.ResourceData) (*compute_instance_helpers_compute.ReservationAffinity, error) { - _, ok := d.GetOk("reservation_affinity") - if !ok { - return nil, nil - } - - prefix := "reservation_affinity.0" - reservationAffinityType := d.Get(prefix + ".type").(string) - - affinity := compute_instance_helpers_compute.ReservationAffinity{ - ConsumeReservationType: reservationAffinityType, - ForceSendFields: []string{"ConsumeReservationType"}, - } - - _, hasSpecificReservation := d.GetOk(prefix + ".specific_reservation") - if (reservationAffinityType == "SPECIFIC_RESERVATION") != hasSpecificReservation { - return nil, compute_instance_helpers_fmt.Errorf("specific_reservation must be set when reservation_affinity is SPECIFIC_RESERVATION, and not set otherwise") - } - - prefix = prefix + ".specific_reservation.0" - if hasSpecificReservation { - affinity.Key = d.Get(prefix + ".key").(string) - affinity.ForceSendFields = append(affinity.ForceSendFields, "Key", "Values") - - for _, v := range d.Get(prefix + ".values").([]interface{}) { - affinity.Values = append(affinity.Values, v.(string)) - } - } - - return &affinity, nil -} - -func flattenReservationAffinity(affinity *compute_instance_helpers_compute.ReservationAffinity) []map[string]interface{} { - if affinity == nil { - return nil - } - - flattened := map[string]interface{}{ - "type": affinity.ConsumeReservationType, - } - - if affinity.ConsumeReservationType == "SPECIFIC_RESERVATION" { - flattened["specific_reservation"] = []map[string]interface{}{{ - "key": affinity.Key, - "values": affinity.Values, - }} - } - - return []map[string]interface{}{flattened} -} - -func computeInstanceDeleteAccessConfigs(d *compute_instance_network_interface_helpers_schema.ResourceData, config *Config, instNetworkInterface *compute_instance_network_interface_helpers_compute.NetworkInterface, project, zone, userAgent, instanceName string) error { - - for _, ac := range instNetworkInterface.AccessConfigs { - op, err := config.NewComputeClient(userAgent).Instances.DeleteAccessConfig( - project, zone, instanceName, ac.Name, instNetworkInterface.Name).Do() - if err != nil { - return compute_instance_network_interface_helpers_fmt.Errorf("Error deleting old access_config: %s", err) - } - opErr := computeOperationWaitTime(config, op, project, "old access_config to delete", userAgent, d.Timeout(compute_instance_network_interface_helpers_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - return nil -} - -func computeInstanceAddAccessConfigs(d *compute_instance_network_interface_helpers_schema.ResourceData, config *Config, instNetworkInterface *compute_instance_network_interface_helpers_compute.NetworkInterface, accessConfigs []*compute_instance_network_interface_helpers_compute.AccessConfig, project, zone, userAgent, instanceName string) error { - - for _, ac := range accessConfigs { - op, err := config.NewComputeClient(userAgent).Instances.AddAccessConfig(project, zone, instanceName, instNetworkInterface.Name, ac).Do() - if err != nil { - return compute_instance_network_interface_helpers_fmt.Errorf("Error adding new access_config: %s", err) - } - opErr := computeOperationWaitTime(config, op, project, "new access_config to add", userAgent, d.Timeout(compute_instance_network_interface_helpers_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - return nil -} - -func computeInstanceCreateUpdateWhileStoppedCall(d *compute_instance_network_interface_helpers_schema.ResourceData, config *Config, networkInterfacePatchObj *compute_instance_network_interface_helpers_compute.NetworkInterface, accessConfigs []*compute_instance_network_interface_helpers_compute.AccessConfig, accessConfigsHaveChanged bool, index int, project, zone, userAgent, instanceName string) func(inst *compute_instance_network_interface_helpers_compute.Instance) error { - - return func(instance *compute_instance_network_interface_helpers_compute.Instance) error { - - instNetworkInterface := instance.NetworkInterfaces[index] - networkInterfacePatchObj.Fingerprint = instNetworkInterface.Fingerprint - - if accessConfigsHaveChanged { - err := computeInstanceDeleteAccessConfigs(d, config, instNetworkInterface, project, zone, userAgent, instanceName) - if err != nil { - return err - } - } - - op, err := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instanceName, instNetworkInterface.Name, networkInterfacePatchObj).Do() - if err != nil { - return compute_instance_network_interface_helpers_errwrap.Wrapf("Error updating network interface: {{err}}", err) - } - opErr := computeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(compute_instance_network_interface_helpers_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - - if accessConfigsHaveChanged { - err := computeInstanceAddAccessConfigs(d, config, instNetworkInterface, accessConfigs, project, zone, userAgent, instanceName) - if err != nil { - return err - } - } - return nil - } -} - -type ComputeOperationWaiter struct { - Service *compute_operation_compute.Service - Op *compute_operation_compute.Operation - Context compute_operation_context.Context - Project string -} - -func (w *ComputeOperationWaiter) State() string { - if w == nil || w.Op == nil { - return "" - } - - return w.Op.Status -} - -func (w *ComputeOperationWaiter) Error() error { - if w != nil && w.Op != nil && w.Op.Error != nil { - return ComputeOperationError(*w.Op.Error) - } - return nil -} - -func (w *ComputeOperationWaiter) IsRetryable(err error) bool { - if oe, ok := err.(ComputeOperationError); ok { - for _, e := range oe.Errors { - if e.Code == "RESOURCE_NOT_READY" { - return true - } - } - } - return false -} - -func (w *ComputeOperationWaiter) SetOp(op interface{}) error { - var ok bool - w.Op, ok = op.(*compute_operation_compute.Operation) - if !ok { - return compute_operation_fmt.Errorf("Unable to set operation. Bad type!") - } - return nil -} - -func (w *ComputeOperationWaiter) QueryOp() (interface{}, error) { - if w == nil || w.Op == nil { - return nil, compute_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - if w.Context != nil { - select { - case <-w.Context.Done(): - compute_operation_log.Println("[WARN] request has been cancelled early") - return w.Op, compute_operation_errors.New("unable to finish polling, context has been cancelled") - default: - - } - } - if w.Op.Zone != "" { - zone := GetResourceNameFromSelfLink(w.Op.Zone) - return w.Service.ZoneOperations.Get(w.Project, zone, w.Op.Name).Do() - } else if w.Op.Region != "" { - region := GetResourceNameFromSelfLink(w.Op.Region) - return w.Service.RegionOperations.Get(w.Project, region, w.Op.Name).Do() - } - return w.Service.GlobalOperations.Get(w.Project, w.Op.Name).Do() -} - -func (w *ComputeOperationWaiter) OpName() string { - if w == nil || w.Op == nil { - return " Compute Op" - } - - return w.Op.Name -} - -func (w *ComputeOperationWaiter) PendingStates() []string { - return []string{"PENDING", "RUNNING"} -} - -func (w *ComputeOperationWaiter) TargetStates() []string { - return []string{"DONE"} -} - -func computeOperationWaitTime(config *Config, res interface{}, project, activity, userAgent string, timeout compute_operation_time.Duration) error { - op := &compute_operation_compute.Operation{} - err := Convert(res, op) - if err != nil { - return err - } - - w := &ComputeOperationWaiter{ - Service: config.NewComputeClient(userAgent), - Context: config.context, - Op: op, - Project: project, - } - - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type ComputeOperationError compute_operation_compute.OperationError - -func (e ComputeOperationError) Error() string { - var buf compute_operation_bytes.Buffer - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} - -type providerMeta struct { - ModuleName string `cty:"module_name"` -} - -type Config struct { - AccessToken string - Credentials string - ImpersonateServiceAccount string - ImpersonateServiceAccountDelegates []string - Project string - Region string - BillingProject string - Zone string - Scopes []string - BatchingConfig *batchingConfig - UserProjectOverride bool - RequestReason string - RequestTimeout config_time.Duration - - PollInterval config_time.Duration - - client *config_http.Client - context config_context.Context - userAgent string - - tokenSource config_oauth2.TokenSource - - AccessApprovalBasePath string - AccessContextManagerBasePath string - ActiveDirectoryBasePath string - ApigeeBasePath string - AppEngineBasePath string - BigQueryBasePath string - BigqueryDataTransferBasePath string - BigqueryReservationBasePath string - BigtableBasePath string - BillingBasePath string - BinaryAuthorizationBasePath string - CloudAssetBasePath string - CloudBuildBasePath string - CloudFunctionsBasePath string - CloudIdentityBasePath string - CloudIotBasePath string - CloudRunBasePath string - CloudSchedulerBasePath string - CloudTasksBasePath string - ComputeBasePath string - ContainerAnalysisBasePath string - DataCatalogBasePath string - DataLossPreventionBasePath string - DataprocBasePath string - DatastoreBasePath string - DeploymentManagerBasePath string - DialogflowBasePath string - DialogflowCXBasePath string - DNSBasePath string - EssentialContactsBasePath string - FilestoreBasePath string - FirestoreBasePath string - GameServicesBasePath string - GKEHubBasePath string - HealthcareBasePath string - IapBasePath string - IdentityPlatformBasePath string - KMSBasePath string - LoggingBasePath string - MemcacheBasePath string - MLEngineBasePath string - MonitoringBasePath string - NetworkManagementBasePath string - NetworkServicesBasePath string - NotebooksBasePath string - OSConfigBasePath string - OSLoginBasePath string - PrivatecaBasePath string - PubsubBasePath string - PubsubLiteBasePath string - RedisBasePath string - ResourceManagerBasePath string - SecretManagerBasePath string - SecurityCenterBasePath string - ServiceManagementBasePath string - ServiceUsageBasePath string - SourceRepoBasePath string - SpannerBasePath string - SQLBasePath string - StorageBasePath string - TagsBasePath string - TPUBasePath string - VertexAIBasePath string - VPCAccessBasePath string - WorkflowsBasePath string - - CloudBillingBasePath string - ComposerBasePath string - ContainerBasePath string - DataflowBasePath string - IamCredentialsBasePath string - ResourceManagerV2BasePath string - IAMBasePath string - CloudIoTBasePath string - ServiceNetworkingBasePath string - StorageTransferBasePath string - BigtableAdminBasePath string - - requestBatcherServiceUsage *RequestBatcher - requestBatcherIam *RequestBatcher - - AssuredWorkloadsBasePath string - CloudResourceManagerBasePath string - EventarcBasePath string - GkeHubBasePath string - OrgPolicyBasePath string -} - -const AccessApprovalBasePathKey = "AccessApproval" - -const AccessContextManagerBasePathKey = "AccessContextManager" - -const ActiveDirectoryBasePathKey = "ActiveDirectory" - -const ApigeeBasePathKey = "Apigee" - -const AppEngineBasePathKey = "AppEngine" - -const BigQueryBasePathKey = "BigQuery" - -const BigqueryDataTransferBasePathKey = "BigqueryDataTransfer" - -const BigqueryReservationBasePathKey = "BigqueryReservation" - -const BigtableBasePathKey = "Bigtable" - -const BillingBasePathKey = "Billing" - -const BinaryAuthorizationBasePathKey = "BinaryAuthorization" - -const CloudAssetBasePathKey = "CloudAsset" - -const CloudBuildBasePathKey = "CloudBuild" - -const CloudFunctionsBasePathKey = "CloudFunctions" - -const CloudIdentityBasePathKey = "CloudIdentity" - -const CloudIotBasePathKey = "CloudIot" - -const CloudRunBasePathKey = "CloudRun" - -const CloudSchedulerBasePathKey = "CloudScheduler" - -const CloudTasksBasePathKey = "CloudTasks" - -const ComputeBasePathKey = "Compute" - -const ContainerAnalysisBasePathKey = "ContainerAnalysis" - -const DataCatalogBasePathKey = "DataCatalog" - -const DataLossPreventionBasePathKey = "DataLossPrevention" - -const DataprocBasePathKey = "Dataproc" - -const DatastoreBasePathKey = "Datastore" - -const DeploymentManagerBasePathKey = "DeploymentManager" - -const DialogflowBasePathKey = "Dialogflow" - -const DialogflowCXBasePathKey = "DialogflowCX" - -const DNSBasePathKey = "DNS" - -const EssentialContactsBasePathKey = "EssentialContacts" - -const FilestoreBasePathKey = "Filestore" - -const FirestoreBasePathKey = "Firestore" - -const GameServicesBasePathKey = "GameServices" - -const GKEHubBasePathKey = "GKEHub" - -const HealthcareBasePathKey = "Healthcare" - -const IapBasePathKey = "Iap" - -const IdentityPlatformBasePathKey = "IdentityPlatform" - -const KMSBasePathKey = "KMS" - -const LoggingBasePathKey = "Logging" - -const MemcacheBasePathKey = "Memcache" - -const MLEngineBasePathKey = "MLEngine" - -const MonitoringBasePathKey = "Monitoring" - -const NetworkManagementBasePathKey = "NetworkManagement" - -const NetworkServicesBasePathKey = "NetworkServices" - -const NotebooksBasePathKey = "Notebooks" - -const OSConfigBasePathKey = "OSConfig" - -const OSLoginBasePathKey = "OSLogin" - -const PrivatecaBasePathKey = "Privateca" - -const PubsubBasePathKey = "Pubsub" - -const PubsubLiteBasePathKey = "PubsubLite" - -const RedisBasePathKey = "Redis" - -const ResourceManagerBasePathKey = "ResourceManager" - -const SecretManagerBasePathKey = "SecretManager" - -const SecurityCenterBasePathKey = "SecurityCenter" - -const ServiceManagementBasePathKey = "ServiceManagement" - -const ServiceUsageBasePathKey = "ServiceUsage" - -const SourceRepoBasePathKey = "SourceRepo" - -const SpannerBasePathKey = "Spanner" - -const SQLBasePathKey = "SQL" - -const StorageBasePathKey = "Storage" - -const TagsBasePathKey = "Tags" - -const TPUBasePathKey = "TPU" - -const VertexAIBasePathKey = "VertexAI" - -const VPCAccessBasePathKey = "VPCAccess" - -const WorkflowsBasePathKey = "Workflows" - -const CloudBillingBasePathKey = "CloudBilling" - -const ComposerBasePathKey = "Composer" - -const ContainerBasePathKey = "Container" - -const DataflowBasePathKey = "Dataflow" - -const IAMBasePathKey = "IAM" - -const IamCredentialsBasePathKey = "IamCredentials" - -const ResourceManagerV2BasePathKey = "ResourceManagerV2" - -const ServiceNetworkingBasePathKey = "ServiceNetworking" - -const StorageTransferBasePathKey = "StorageTransfer" - -const BigtableAdminBasePathKey = "BigtableAdmin" - -const GkeHubFeatureBasePathKey = "GkeHubFeatureBasePathKey" - -var DefaultBasePaths = map[string]string{ - AccessApprovalBasePathKey: "https://accessapproval.googleapis.com/v1/", - AccessContextManagerBasePathKey: "https://accesscontextmanager.googleapis.com/v1/", - ActiveDirectoryBasePathKey: "https://managedidentities.googleapis.com/v1/", - ApigeeBasePathKey: "https://apigee.googleapis.com/v1/", - AppEngineBasePathKey: "https://appengine.googleapis.com/v1/", - BigQueryBasePathKey: "https://bigquery.googleapis.com/bigquery/v2/", - BigqueryDataTransferBasePathKey: "https://bigquerydatatransfer.googleapis.com/v1/", - BigqueryReservationBasePathKey: "https://bigqueryreservation.googleapis.com/v1/", - BigtableBasePathKey: "https://bigtableadmin.googleapis.com/v2/", - BillingBasePathKey: "https://billingbudgets.googleapis.com/v1/", - BinaryAuthorizationBasePathKey: "https://binaryauthorization.googleapis.com/v1/", - CloudAssetBasePathKey: "https://cloudasset.googleapis.com/v1/", - CloudBuildBasePathKey: "https://cloudbuild.googleapis.com/v1/", - CloudFunctionsBasePathKey: "https://cloudfunctions.googleapis.com/v1/", - CloudIdentityBasePathKey: "https://cloudidentity.googleapis.com/v1/", - CloudIotBasePathKey: "https://cloudiot.googleapis.com/v1/", - CloudRunBasePathKey: "https://{{location}}-run.googleapis.com/", - CloudSchedulerBasePathKey: "https://cloudscheduler.googleapis.com/v1/", - CloudTasksBasePathKey: "https://cloudtasks.googleapis.com/v2/", - ComputeBasePathKey: "https://compute.googleapis.com/compute/v1/", - ContainerAnalysisBasePathKey: "https://containeranalysis.googleapis.com/v1/", - DataCatalogBasePathKey: "https://datacatalog.googleapis.com/v1/", - DataLossPreventionBasePathKey: "https://dlp.googleapis.com/v2/", - DataprocBasePathKey: "https://dataproc.googleapis.com/v1/", - DatastoreBasePathKey: "https://datastore.googleapis.com/v1/", - DeploymentManagerBasePathKey: "https://www.googleapis.com/deploymentmanager/v2/", - DialogflowBasePathKey: "https://dialogflow.googleapis.com/v2/", - DialogflowCXBasePathKey: "https://dialogflow.googleapis.com/v3/", - DNSBasePathKey: "https://dns.googleapis.com/dns/v1/", - EssentialContactsBasePathKey: "https://essentialcontacts.googleapis.com/v1/", - FilestoreBasePathKey: "https://file.googleapis.com/v1/", - FirestoreBasePathKey: "https://firestore.googleapis.com/v1/", - GameServicesBasePathKey: "https://gameservices.googleapis.com/v1/", - GKEHubBasePathKey: "https://gkehub.googleapis.com/v1/", - HealthcareBasePathKey: "https://healthcare.googleapis.com/v1/", - IapBasePathKey: "https://iap.googleapis.com/v1/", - IdentityPlatformBasePathKey: "https://identitytoolkit.googleapis.com/v2/", - KMSBasePathKey: "https://cloudkms.googleapis.com/v1/", - LoggingBasePathKey: "https://logging.googleapis.com/v2/", - MemcacheBasePathKey: "https://memcache.googleapis.com/v1/", - MLEngineBasePathKey: "https://ml.googleapis.com/v1/", - MonitoringBasePathKey: "https://monitoring.googleapis.com/", - NetworkManagementBasePathKey: "https://networkmanagement.googleapis.com/v1/", - NetworkServicesBasePathKey: "https://networkservices.googleapis.com/v1/", - NotebooksBasePathKey: "https://notebooks.googleapis.com/v1/", - OSConfigBasePathKey: "https://osconfig.googleapis.com/v1/", - OSLoginBasePathKey: "https://oslogin.googleapis.com/v1/", - PrivatecaBasePathKey: "https://privateca.googleapis.com/v1/", - PubsubBasePathKey: "https://pubsub.googleapis.com/v1/", - PubsubLiteBasePathKey: "https://{{region}}-pubsublite.googleapis.com/v1/admin/", - RedisBasePathKey: "https://redis.googleapis.com/v1/", - ResourceManagerBasePathKey: "https://cloudresourcemanager.googleapis.com/v1/", - SecretManagerBasePathKey: "https://secretmanager.googleapis.com/v1/", - SecurityCenterBasePathKey: "https://securitycenter.googleapis.com/v1/", - ServiceManagementBasePathKey: "https://servicemanagement.googleapis.com/v1/", - ServiceUsageBasePathKey: "https://serviceusage.googleapis.com/v1/", - SourceRepoBasePathKey: "https://sourcerepo.googleapis.com/v1/", - SpannerBasePathKey: "https://spanner.googleapis.com/v1/", - SQLBasePathKey: "https://sqladmin.googleapis.com/sql/v1beta4/", - StorageBasePathKey: "https://storage.googleapis.com/storage/v1/", - TagsBasePathKey: "https://cloudresourcemanager.googleapis.com/v3/", - TPUBasePathKey: "https://tpu.googleapis.com/v1/", - VertexAIBasePathKey: "https://{{region}}-aiplatform.googleapis.com/v1/", - VPCAccessBasePathKey: "https://vpcaccess.googleapis.com/v1/", - WorkflowsBasePathKey: "https://workflows.googleapis.com/v1/", - CloudBillingBasePathKey: "https://cloudbilling.googleapis.com/v1/", - ComposerBasePathKey: "https://composer.googleapis.com/v1/", - ContainerBasePathKey: "https://container.googleapis.com/v1/", - DataflowBasePathKey: "https://dataflow.googleapis.com/v1b3/", - IAMBasePathKey: "https://iam.googleapis.com/v1/", - IamCredentialsBasePathKey: "https://iamcredentials.googleapis.com/v1/", - ResourceManagerV2BasePathKey: "https://cloudresourcemanager.googleapis.com/v2/", - ServiceNetworkingBasePathKey: "https://servicenetworking.googleapis.com/v1/", - StorageTransferBasePathKey: "https://storagetransfer.googleapis.com/v1/", - BigtableAdminBasePathKey: "https://bigtableadmin.googleapis.com/v2/", - GkeHubFeatureBasePathKey: "https://gkehub.googleapis.com/v1beta/", -} - -var DefaultClientScopes = []string{ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/userinfo.email", -} - -func (c *Config) LoadAndValidate(ctx config_context.Context) error { - if len(c.Scopes) == 0 { - c.Scopes = DefaultClientScopes - } - - c.context = ctx - - tokenSource, err := c.getTokenSource(c.Scopes, false) - if err != nil { - return err - } - - c.tokenSource = tokenSource - - cleanCtx := config_context.WithValue(ctx, config_oauth2.HTTPClient, config_cleanhttp.DefaultClient()) - - client, _, err := config_transport.NewHTTPClient(cleanCtx, config_option.WithTokenSource(tokenSource)) - if err != nil { - return err - } - - err = c.logGoogleIdentities() - if err != nil { - return err - } - - loggingTransport := config_logging.NewTransport("Google", client.Transport) - - retryTransport := NewTransportWithDefaultRetries(loggingTransport) - - headerTransport := newTransportWithHeaders(retryTransport) - if c.RequestReason != "" { - headerTransport.Set("X-Goog-Request-Reason", c.RequestReason) - } - - if c.UserProjectOverride && c.BillingProject != "" { - headerTransport.Set("X-Goog-User-Project", c.BillingProject) - } - - client.Transport = headerTransport - - client.Timeout = c.synchronousTimeout() - - c.client = client - c.context = ctx - c.Region = GetRegionFromRegionSelfLink(c.Region) - c.requestBatcherServiceUsage = NewRequestBatcher("Service Usage", ctx, c.BatchingConfig) - c.requestBatcherIam = NewRequestBatcher("IAM", ctx, c.BatchingConfig) - c.PollInterval = 10 * config_time.Second - - return nil -} - -func expandProviderBatchingConfig(v interface{}) (*batchingConfig, error) { - config := &batchingConfig{ - sendAfter: config_time.Second * defaultBatchSendIntervalSec, - enableBatching: true, - } - - if v == nil { - return config, nil - } - ls := v.([]interface{}) - if len(ls) == 0 || ls[0] == nil { - return config, nil - } - - cfgV := ls[0].(map[string]interface{}) - if sendAfterV, ok := cfgV["send_after"]; ok { - sendAfter, err := config_time.ParseDuration(sendAfterV.(string)) - if err != nil { - return nil, config_fmt.Errorf("unable to parse duration from 'send_after' value %q", sendAfterV) - } - config.sendAfter = sendAfter - } - - if enable, ok := cfgV["enable_batching"]; ok { - config.enableBatching = enable.(bool) - } - - return config, nil -} - -func (c *Config) synchronousTimeout() config_time.Duration { - if c.RequestTimeout == 0 { - return 120 * config_time.Second - } - return c.RequestTimeout -} - -func (c *Config) logGoogleIdentities() error { - if c.ImpersonateServiceAccount == "" { - - tokenSource, err := c.getTokenSource(c.Scopes, true) - if err != nil { - return err - } - c.client = config_oauth2.NewClient(c.context, tokenSource) - - email, err := GetCurrentUserEmail(c, c.userAgent) - if err != nil { - config_log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) - } - - config_log.Printf("[INFO] Terraform is using this identity: %s", email) - - return nil - - } - - tokenSource, err := c.getTokenSource(c.Scopes, true) - if err != nil { - return err - } - c.client = config_oauth2.NewClient(c.context, tokenSource) - - email, err := GetCurrentUserEmail(c, c.userAgent) - if err != nil { - config_log.Printf("[INFO] error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) - } - - config_log.Printf("[INFO] Terraform is configured with service account impersonation, original identity: %s, impersonated identity: %s", email, c.ImpersonateServiceAccount) - - tokenSource, err = c.getTokenSource(c.Scopes, false) - if err != nil { - return err - } - c.client = config_oauth2.NewClient(c.context, tokenSource) - - return nil -} - -func (c *Config) getTokenSource(clientScopes []string, initialCredentialsOnly bool) (config_oauth2.TokenSource, error) { - creds, err := c.GetCredentials(clientScopes, initialCredentialsOnly) - if err != nil { - return nil, config_fmt.Errorf("%s", err) - } - return creds.TokenSource, nil -} - -func (c *Config) NewComputeClient(userAgent string) *config_compute.Service { - config_log.Printf("[INFO] Instantiating GCE client for path %s", c.ComputeBasePath) - clientCompute, err := config_compute.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client compute: %s", err) - return nil - } - clientCompute.UserAgent = userAgent - clientCompute.BasePath = c.ComputeBasePath - - return clientCompute -} - -func (c *Config) NewContainerClient(userAgent string) *config_container.Service { - containerClientBasePath := removeBasePathVersion(c.ContainerBasePath) - config_log.Printf("[INFO] Instantiating GKE client for path %s", containerClientBasePath) - clientContainer, err := config_container.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client container: %s", err) - return nil - } - clientContainer.UserAgent = userAgent - clientContainer.BasePath = containerClientBasePath - - return clientContainer -} - -func (c *Config) NewDnsClient(userAgent string) *config_dns.Service { - dnsClientBasePath := removeBasePathVersion(c.DNSBasePath) - dnsClientBasePath = config_strings.ReplaceAll(dnsClientBasePath, "/dns/", "") - config_log.Printf("[INFO] Instantiating Google Cloud DNS client for path %s", dnsClientBasePath) - clientDns, err := config_dns.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client dns: %s", err) - return nil - } - clientDns.UserAgent = userAgent - clientDns.BasePath = dnsClientBasePath - - return clientDns -} - -func (c *Config) NewKmsClientWithCtx(ctx config_context.Context, userAgent string) *config_cloudkms.Service { - kmsClientBasePath := removeBasePathVersion(c.KMSBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud KMS client for path %s", kmsClientBasePath) - clientKms, err := config_cloudkms.NewService(ctx, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client kms: %s", err) - return nil - } - clientKms.UserAgent = userAgent - clientKms.BasePath = kmsClientBasePath - - return clientKms -} - -func (c *Config) NewKmsClient(userAgent string) *config_cloudkms.Service { - return c.NewKmsClientWithCtx(c.context, userAgent) -} - -func (c *Config) NewLoggingClient(userAgent string) *config_loggingcloudlogging.Service { - loggingClientBasePath := removeBasePathVersion(c.LoggingBasePath) - config_log.Printf("[INFO] Instantiating Google Stackdriver Logging client for path %s", loggingClientBasePath) - clientLogging, err := config_loggingcloudlogging.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client logging: %s", err) - return nil - } - clientLogging.UserAgent = userAgent - clientLogging.BasePath = loggingClientBasePath - - return clientLogging -} - -func (c *Config) NewStorageClient(userAgent string) *config_storage.Service { - storageClientBasePath := c.StorageBasePath - config_log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) - clientStorage, err := config_storage.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client storage: %s", err) - return nil - } - clientStorage.UserAgent = userAgent - clientStorage.BasePath = storageClientBasePath - - return clientStorage -} - -func (c *Config) NewStorageClientWithTimeoutOverride(userAgent string, timeout config_time.Duration) *config_storage.Service { - storageClientBasePath := c.StorageBasePath - config_log.Printf("[INFO] Instantiating Google Storage client for path %s", storageClientBasePath) - - httpClient := &config_http.Client{ - Transport: c.client.Transport, - CheckRedirect: c.client.CheckRedirect, - Jar: c.client.Jar, - Timeout: timeout, - } - clientStorage, err := config_storage.NewService(c.context, config_option.WithHTTPClient(httpClient)) - if err != nil { - config_log.Printf("[WARN] Error creating client storage: %s", err) - return nil - } - clientStorage.UserAgent = userAgent - clientStorage.BasePath = storageClientBasePath - - return clientStorage -} - -func (c *Config) NewSqlAdminClient(userAgent string) *config_sqladminsqladmin.Service { - sqlClientBasePath := removeBasePathVersion(removeBasePathVersion(c.SQLBasePath)) - config_log.Printf("[INFO] Instantiating Google SqlAdmin client for path %s", sqlClientBasePath) - clientSqlAdmin, err := config_sqladminsqladmin.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client storage: %s", err) - return nil - } - clientSqlAdmin.UserAgent = userAgent - clientSqlAdmin.BasePath = sqlClientBasePath - - return clientSqlAdmin -} - -func (c *Config) NewPubsubClient(userAgent string) *config_pubsub.Service { - pubsubClientBasePath := removeBasePathVersion(c.PubsubBasePath) - config_log.Printf("[INFO] Instantiating Google Pubsub client for path %s", pubsubClientBasePath) - wrappedPubsubClient := ClientWithAdditionalRetries(c.client, pubsubTopicProjectNotReady) - clientPubsub, err := config_pubsub.NewService(c.context, config_option.WithHTTPClient(wrappedPubsubClient)) - if err != nil { - config_log.Printf("[WARN] Error creating client pubsub: %s", err) - return nil - } - clientPubsub.UserAgent = userAgent - clientPubsub.BasePath = pubsubClientBasePath - - return clientPubsub -} - -func (c *Config) NewDataflowClient(userAgent string) *config_dataflowdataflow.Service { - dataflowClientBasePath := removeBasePathVersion(c.DataflowBasePath) - config_log.Printf("[INFO] Instantiating Google Dataflow client for path %s", dataflowClientBasePath) - clientDataflow, err := config_dataflowdataflow.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client dataflow: %s", err) - return nil - } - clientDataflow.UserAgent = userAgent - clientDataflow.BasePath = dataflowClientBasePath - - return clientDataflow -} - -func (c *Config) NewResourceManagerClient(userAgent string) *config_cloudresourcemanager.Service { - resourceManagerBasePath := removeBasePathVersion(c.ResourceManagerBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud ResourceManager client for path %s", resourceManagerBasePath) - clientResourceManager, err := config_cloudresourcemanager.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client resource manager: %s", err) - return nil - } - clientResourceManager.UserAgent = userAgent - clientResourceManager.BasePath = resourceManagerBasePath - - return clientResourceManager -} - -func (c *Config) NewResourceManagerV2Client(userAgent string) *config_cloudresourcemanagerresourceManagerV2.Service { - resourceManagerV2BasePath := removeBasePathVersion(c.ResourceManagerV2BasePath) - config_log.Printf("[INFO] Instantiating Google Cloud ResourceManager V client for path %s", resourceManagerV2BasePath) - clientResourceManagerV2, err := config_cloudresourcemanagerresourceManagerV2.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client resource manager v2: %s", err) - return nil - } - clientResourceManagerV2.UserAgent = userAgent - clientResourceManagerV2.BasePath = resourceManagerV2BasePath - - return clientResourceManagerV2 -} - -func (c *Config) NewIamClient(userAgent string) *config_iam.Service { - iamClientBasePath := removeBasePathVersion(c.IAMBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud IAM client for path %s", iamClientBasePath) - clientIAM, err := config_iam.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client iam: %s", err) - return nil - } - clientIAM.UserAgent = userAgent - clientIAM.BasePath = iamClientBasePath - - return clientIAM -} - -func (c *Config) NewIamCredentialsClient(userAgent string) *config_iamcredentialsiamcredentials.Service { - iamCredentialsClientBasePath := removeBasePathVersion(c.IamCredentialsBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud IAMCredentials client for path %s", iamCredentialsClientBasePath) - clientIamCredentials, err := config_iamcredentialsiamcredentials.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client iam credentials: %s", err) - return nil - } - clientIamCredentials.UserAgent = userAgent - clientIamCredentials.BasePath = iamCredentialsClientBasePath - - return clientIamCredentials -} - -func (c *Config) NewServiceManClient(userAgent string) *config_servicemanagement.APIService { - serviceManagementClientBasePath := removeBasePathVersion(c.ServiceManagementBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Service Management client for path %s", serviceManagementClientBasePath) - clientServiceMan, err := config_servicemanagement.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client service management: %s", err) - return nil - } - clientServiceMan.UserAgent = userAgent - clientServiceMan.BasePath = serviceManagementClientBasePath - - return clientServiceMan -} - -func (c *Config) NewServiceUsageClient(userAgent string) *config_serviceusage.Service { - serviceUsageClientBasePath := removeBasePathVersion(c.ServiceUsageBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Service Usage client for path %s", serviceUsageClientBasePath) - clientServiceUsage, err := config_serviceusage.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client service usage: %s", err) - return nil - } - clientServiceUsage.UserAgent = userAgent - clientServiceUsage.BasePath = serviceUsageClientBasePath - - return clientServiceUsage -} - -func (c *Config) NewBillingClient(userAgent string) *config_cloudbilling.APIService { - cloudBillingClientBasePath := removeBasePathVersion(c.CloudBillingBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Billing client for path %s", cloudBillingClientBasePath) - clientBilling, err := config_cloudbilling.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client billing: %s", err) - return nil - } - clientBilling.UserAgent = userAgent - clientBilling.BasePath = cloudBillingClientBasePath - - return clientBilling -} - -func (c *Config) NewBuildClient(userAgent string) *config_cloudbuild.Service { - cloudBuildClientBasePath := removeBasePathVersion(c.CloudBuildBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Build client for path %s", cloudBuildClientBasePath) - clientBuild, err := config_cloudbuild.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client build: %s", err) - return nil - } - clientBuild.UserAgent = userAgent - clientBuild.BasePath = cloudBuildClientBasePath - - return clientBuild -} - -func (c *Config) NewCloudFunctionsClient(userAgent string) *config_cloudfunctions.Service { - cloudFunctionsClientBasePath := removeBasePathVersion(c.CloudFunctionsBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud CloudFunctions Client for path %s", cloudFunctionsClientBasePath) - clientCloudFunctions, err := config_cloudfunctions.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client cloud functions: %s", err) - return nil - } - clientCloudFunctions.UserAgent = userAgent - clientCloudFunctions.BasePath = cloudFunctionsClientBasePath - - return clientCloudFunctions -} - -func (c *Config) NewSourceRepoClient(userAgent string) *config_sourcerepo.Service { - sourceRepoClientBasePath := removeBasePathVersion(c.SourceRepoBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Source Repo client for path %s", sourceRepoClientBasePath) - clientSourceRepo, err := config_sourcerepo.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client source repo: %s", err) - return nil - } - clientSourceRepo.UserAgent = userAgent - clientSourceRepo.BasePath = sourceRepoClientBasePath - - return clientSourceRepo -} - -func (c *Config) NewBigQueryClient(userAgent string) *config_bigquery.Service { - bigQueryClientBasePath := c.BigQueryBasePath - config_log.Printf("[INFO] Instantiating Google Cloud BigQuery client for path %s", bigQueryClientBasePath) - wrappedBigQueryClient := ClientWithAdditionalRetries(c.client, iamMemberMissing) - clientBigQuery, err := config_bigquery.NewService(c.context, config_option.WithHTTPClient(wrappedBigQueryClient)) - if err != nil { - config_log.Printf("[WARN] Error creating client big query: %s", err) - return nil - } - clientBigQuery.UserAgent = userAgent - clientBigQuery.BasePath = bigQueryClientBasePath - - return clientBigQuery -} - -func (c *Config) NewSpannerClient(userAgent string) *config_spanner.Service { - spannerClientBasePath := removeBasePathVersion(c.SpannerBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Spanner client for path %s", spannerClientBasePath) - clientSpanner, err := config_spanner.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client source repo: %s", err) - return nil - } - clientSpanner.UserAgent = userAgent - clientSpanner.BasePath = spannerClientBasePath - - return clientSpanner -} - -func (c *Config) NewDataprocClient(userAgent string) *config_dataproc.Service { - dataprocClientBasePath := removeBasePathVersion(c.DataprocBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Dataproc client for path %s", dataprocClientBasePath) - clientDataproc, err := config_dataproc.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client dataproc: %s", err) - return nil - } - clientDataproc.UserAgent = userAgent - clientDataproc.BasePath = dataprocClientBasePath - - return clientDataproc -} - -func (c *Config) NewCloudIoTClient(userAgent string) *config_cloudiot.Service { - cloudIoTClientBasePath := removeBasePathVersion(c.CloudIoTBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud IoT Core client for path %s", cloudIoTClientBasePath) - clientCloudIoT, err := config_cloudiot.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client cloud iot: %s", err) - return nil - } - clientCloudIoT.UserAgent = userAgent - clientCloudIoT.BasePath = cloudIoTClientBasePath - - return clientCloudIoT -} - -func (c *Config) NewAppEngineClient(userAgent string) *config_appengineappengine.APIService { - appEngineClientBasePath := removeBasePathVersion(c.AppEngineBasePath) - config_log.Printf("[INFO] Instantiating App Engine client for path %s", appEngineClientBasePath) - clientAppEngine, err := config_appengineappengine.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client appengine: %s", err) - return nil - } - clientAppEngine.UserAgent = userAgent - clientAppEngine.BasePath = appEngineClientBasePath - - return clientAppEngine -} - -func (c *Config) NewComposerClient(userAgent string) *config_composer.Service { - composerClientBasePath := removeBasePathVersion(c.ComposerBasePath) - config_log.Printf("[INFO] Instantiating Cloud Composer client for path %s", composerClientBasePath) - clientComposer, err := config_composer.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client composer: %s", err) - return nil - } - clientComposer.UserAgent = userAgent - clientComposer.BasePath = composerClientBasePath - - return clientComposer -} - -func (c *Config) NewServiceNetworkingClient(userAgent string) *config_servicenetworking.APIService { - serviceNetworkingClientBasePath := removeBasePathVersion(c.ServiceNetworkingBasePath) - config_log.Printf("[INFO] Instantiating Service Networking client for path %s", serviceNetworkingClientBasePath) - clientServiceNetworking, err := config_servicenetworking.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client service networking: %s", err) - return nil - } - clientServiceNetworking.UserAgent = userAgent - clientServiceNetworking.BasePath = serviceNetworkingClientBasePath - - return clientServiceNetworking -} - -func (c *Config) NewStorageTransferClient(userAgent string) *config_storagetransfer.Service { - storageTransferClientBasePath := removeBasePathVersion(c.StorageTransferBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Storage Transfer client for path %s", storageTransferClientBasePath) - clientStorageTransfer, err := config_storagetransfer.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client storage transfer: %s", err) - return nil - } - clientStorageTransfer.UserAgent = userAgent - clientStorageTransfer.BasePath = storageTransferClientBasePath - - return clientStorageTransfer -} - -func (c *Config) NewHealthcareClient(userAgent string) *config_healthcarehealthcare.Service { - healthcareClientBasePath := removeBasePathVersion(c.HealthcareBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud Healthcare client for path %s", healthcareClientBasePath) - clientHealthcare, err := config_healthcarehealthcare.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client healthcare: %s", err) - return nil - } - clientHealthcare.UserAgent = userAgent - clientHealthcare.BasePath = healthcareClientBasePath - - return clientHealthcare -} - -func (c *Config) NewCloudIdentityClient(userAgent string) *config_cloudidentity.Service { - cloudidentityClientBasePath := removeBasePathVersion(c.CloudIdentityBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud CloudIdentity client for path %s", cloudidentityClientBasePath) - clientCloudIdentity, err := config_cloudidentity.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client cloud identity: %s", err) - return nil - } - clientCloudIdentity.UserAgent = userAgent - clientCloudIdentity.BasePath = cloudidentityClientBasePath - - return clientCloudIdentity -} - -func (c *Config) BigTableClientFactory(userAgent string) *BigtableClientFactory { - bigtableClientFactory := &BigtableClientFactory{ - UserAgent: userAgent, - TokenSource: c.tokenSource, - BillingProject: c.BillingProject, - UserProjectOverride: c.UserProjectOverride, - } - - return bigtableClientFactory -} - -func (c *Config) NewBigTableProjectsInstancesClient(userAgent string) *config_bigtableadmin.ProjectsInstancesService { - bigtableAdminBasePath := removeBasePathVersion(c.BigtableAdminBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) - clientBigtable, err := config_bigtableadmin.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client big table projects instances: %s", err) - return nil - } - clientBigtable.UserAgent = userAgent - clientBigtable.BasePath = bigtableAdminBasePath - clientBigtableProjectsInstances := config_bigtableadmin.NewProjectsInstancesService(clientBigtable) - - return clientBigtableProjectsInstances -} - -func (c *Config) NewBigTableProjectsInstancesTablesClient(userAgent string) *config_bigtableadmin.ProjectsInstancesTablesService { - bigtableAdminBasePath := removeBasePathVersion(c.BigtableAdminBasePath) - config_log.Printf("[INFO] Instantiating Google Cloud BigtableAdmin for path %s", bigtableAdminBasePath) - clientBigtable, err := config_bigtableadmin.NewService(c.context, config_option.WithHTTPClient(c.client)) - if err != nil { - config_log.Printf("[WARN] Error creating client projects instances tables: %s", err) - return nil - } - clientBigtable.UserAgent = userAgent - clientBigtable.BasePath = bigtableAdminBasePath - clientBigtableProjectsInstancesTables := config_bigtableadmin.NewProjectsInstancesTablesService(clientBigtable) - - return clientBigtableProjectsInstancesTables -} - -type staticTokenSource struct { - config_oauth2.TokenSource -} - -func (c *Config) GetCredentials(clientScopes []string, initialCredentialsOnly bool) (config_googlegoogleoauth.Credentials, error) { - if c.AccessToken != "" { - contents, _, err := pathOrContents(c.AccessToken) - if err != nil { - return config_googlegoogleoauth.Credentials{}, config_fmt.Errorf("Error loading access token: %s", err) - } - - token := &config_oauth2.Token{AccessToken: contents} - if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { - opts := []config_option.ClientOption{config_option.WithTokenSource(config_oauth2.StaticTokenSource(token)), config_option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), config_option.WithScopes(clientScopes...)} - creds, err := config_transport.Creds(config_context.TODO(), opts...) - if err != nil { - return config_googlegoogleoauth.Credentials{}, err - } - return *creds, nil - } - - config_log.Printf("[INFO] Authenticating using configured Google JSON 'access_token'...") - config_log.Printf("[INFO] -- Scopes: %s", clientScopes) - return config_googlegoogleoauth.Credentials{ - TokenSource: staticTokenSource{config_oauth2.StaticTokenSource(token)}, - }, nil - } - - if c.Credentials != "" { - contents, _, err := pathOrContents(c.Credentials) - if err != nil { - return config_googlegoogleoauth.Credentials{}, config_fmt.Errorf("error loading credentials: %s", err) - } - - if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { - opts := []config_option.ClientOption{config_option.WithCredentialsJSON([]byte(contents)), config_option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), config_option.WithScopes(clientScopes...)} - creds, err := config_transport.Creds(config_context.TODO(), opts...) - if err != nil { - return config_googlegoogleoauth.Credentials{}, err - } - return *creds, nil - } - - creds, err := config_googlegoogleoauth.CredentialsFromJSON(c.context, []byte(contents), clientScopes...) - if err != nil { - return config_googlegoogleoauth.Credentials{}, config_fmt.Errorf("unable to parse credentials from '%s': %s", contents, err) - } - - config_log.Printf("[INFO] Authenticating using configured Google JSON 'credentials'...") - config_log.Printf("[INFO] -- Scopes: %s", clientScopes) - return *creds, nil - } - - if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { - opts := config_option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...) - creds, err := config_transport.Creds(config_context.TODO(), opts, config_option.WithScopes(clientScopes...)) - if err != nil { - return config_googlegoogleoauth.Credentials{}, err - } - - return *creds, nil - } - - config_log.Printf("[INFO] Authenticating using DefaultClient...") - config_log.Printf("[INFO] -- Scopes: %s", clientScopes) - defaultTS, err := config_googlegoogleoauth.DefaultTokenSource(config_context.Background(), clientScopes...) - if err != nil { - return config_googlegoogleoauth.Credentials{}, config_fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) - } - - return config_googlegoogleoauth.Credentials{ - TokenSource: defaultTS, - }, err -} - -func removeBasePathVersion(url string) string { - re := config_regexp.MustCompile(`(?Phttp[s]://.*)(?P/[^/]+?/$)`) - return re.ReplaceAllString(url, "$1/") -} - -func ConfigureBasePaths(c *Config) { - - c.AccessApprovalBasePath = DefaultBasePaths[AccessApprovalBasePathKey] - c.AccessContextManagerBasePath = DefaultBasePaths[AccessContextManagerBasePathKey] - c.ActiveDirectoryBasePath = DefaultBasePaths[ActiveDirectoryBasePathKey] - c.ApigeeBasePath = DefaultBasePaths[ApigeeBasePathKey] - c.AppEngineBasePath = DefaultBasePaths[AppEngineBasePathKey] - c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] - c.BigqueryDataTransferBasePath = DefaultBasePaths[BigqueryDataTransferBasePathKey] - c.BigqueryReservationBasePath = DefaultBasePaths[BigqueryReservationBasePathKey] - c.BigtableBasePath = DefaultBasePaths[BigtableBasePathKey] - c.BillingBasePath = DefaultBasePaths[BillingBasePathKey] - c.BinaryAuthorizationBasePath = DefaultBasePaths[BinaryAuthorizationBasePathKey] - c.CloudAssetBasePath = DefaultBasePaths[CloudAssetBasePathKey] - c.CloudBuildBasePath = DefaultBasePaths[CloudBuildBasePathKey] - c.CloudFunctionsBasePath = DefaultBasePaths[CloudFunctionsBasePathKey] - c.CloudIdentityBasePath = DefaultBasePaths[CloudIdentityBasePathKey] - c.CloudIotBasePath = DefaultBasePaths[CloudIotBasePathKey] - c.CloudRunBasePath = DefaultBasePaths[CloudRunBasePathKey] - c.CloudSchedulerBasePath = DefaultBasePaths[CloudSchedulerBasePathKey] - c.CloudTasksBasePath = DefaultBasePaths[CloudTasksBasePathKey] - c.ComputeBasePath = DefaultBasePaths[ComputeBasePathKey] - c.ContainerAnalysisBasePath = DefaultBasePaths[ContainerAnalysisBasePathKey] - c.DataCatalogBasePath = DefaultBasePaths[DataCatalogBasePathKey] - c.DataLossPreventionBasePath = DefaultBasePaths[DataLossPreventionBasePathKey] - c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] - c.DatastoreBasePath = DefaultBasePaths[DatastoreBasePathKey] - c.DeploymentManagerBasePath = DefaultBasePaths[DeploymentManagerBasePathKey] - c.DialogflowBasePath = DefaultBasePaths[DialogflowBasePathKey] - c.DialogflowCXBasePath = DefaultBasePaths[DialogflowCXBasePathKey] - c.DNSBasePath = DefaultBasePaths[DNSBasePathKey] - c.EssentialContactsBasePath = DefaultBasePaths[EssentialContactsBasePathKey] - c.FilestoreBasePath = DefaultBasePaths[FilestoreBasePathKey] - c.FirestoreBasePath = DefaultBasePaths[FirestoreBasePathKey] - c.GameServicesBasePath = DefaultBasePaths[GameServicesBasePathKey] - c.GKEHubBasePath = DefaultBasePaths[GKEHubBasePathKey] - c.HealthcareBasePath = DefaultBasePaths[HealthcareBasePathKey] - c.IapBasePath = DefaultBasePaths[IapBasePathKey] - c.IdentityPlatformBasePath = DefaultBasePaths[IdentityPlatformBasePathKey] - c.KMSBasePath = DefaultBasePaths[KMSBasePathKey] - c.LoggingBasePath = DefaultBasePaths[LoggingBasePathKey] - c.MemcacheBasePath = DefaultBasePaths[MemcacheBasePathKey] - c.MLEngineBasePath = DefaultBasePaths[MLEngineBasePathKey] - c.MonitoringBasePath = DefaultBasePaths[MonitoringBasePathKey] - c.NetworkManagementBasePath = DefaultBasePaths[NetworkManagementBasePathKey] - c.NetworkServicesBasePath = DefaultBasePaths[NetworkServicesBasePathKey] - c.NotebooksBasePath = DefaultBasePaths[NotebooksBasePathKey] - c.OSConfigBasePath = DefaultBasePaths[OSConfigBasePathKey] - c.OSLoginBasePath = DefaultBasePaths[OSLoginBasePathKey] - c.PrivatecaBasePath = DefaultBasePaths[PrivatecaBasePathKey] - c.PubsubBasePath = DefaultBasePaths[PubsubBasePathKey] - c.PubsubLiteBasePath = DefaultBasePaths[PubsubLiteBasePathKey] - c.RedisBasePath = DefaultBasePaths[RedisBasePathKey] - c.ResourceManagerBasePath = DefaultBasePaths[ResourceManagerBasePathKey] - c.SecretManagerBasePath = DefaultBasePaths[SecretManagerBasePathKey] - c.SecurityCenterBasePath = DefaultBasePaths[SecurityCenterBasePathKey] - c.ServiceManagementBasePath = DefaultBasePaths[ServiceManagementBasePathKey] - c.ServiceUsageBasePath = DefaultBasePaths[ServiceUsageBasePathKey] - c.SourceRepoBasePath = DefaultBasePaths[SourceRepoBasePathKey] - c.SpannerBasePath = DefaultBasePaths[SpannerBasePathKey] - c.SQLBasePath = DefaultBasePaths[SQLBasePathKey] - c.StorageBasePath = DefaultBasePaths[StorageBasePathKey] - c.TagsBasePath = DefaultBasePaths[TagsBasePathKey] - c.TPUBasePath = DefaultBasePaths[TPUBasePathKey] - c.VertexAIBasePath = DefaultBasePaths[VertexAIBasePathKey] - c.VPCAccessBasePath = DefaultBasePaths[VPCAccessBasePathKey] - c.WorkflowsBasePath = DefaultBasePaths[WorkflowsBasePathKey] - - c.CloudBillingBasePath = DefaultBasePaths[CloudBillingBasePathKey] - c.ComposerBasePath = DefaultBasePaths[ComposerBasePathKey] - c.ContainerBasePath = DefaultBasePaths[ContainerBasePathKey] - c.DataprocBasePath = DefaultBasePaths[DataprocBasePathKey] - c.DataflowBasePath = DefaultBasePaths[DataflowBasePathKey] - c.IamCredentialsBasePath = DefaultBasePaths[IamCredentialsBasePathKey] - c.ResourceManagerV2BasePath = DefaultBasePaths[ResourceManagerV2BasePathKey] - c.IAMBasePath = DefaultBasePaths[IAMBasePathKey] - c.ServiceNetworkingBasePath = DefaultBasePaths[ServiceNetworkingBasePathKey] - c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] - c.StorageTransferBasePath = DefaultBasePaths[StorageTransferBasePathKey] - c.BigtableAdminBasePath = DefaultBasePaths[BigtableAdminBasePathKey] -} - -type ContainerOperationWaiter struct { - Service *container_operation_container.Service - Context container_operation_context.Context - Op *container_operation_container.Operation - Project string - Location string - UserProjectOverride bool -} - -func (w *ContainerOperationWaiter) State() string { - if w == nil || w.Op == nil { - return "" - } - return w.Op.Status -} - -func (w *ContainerOperationWaiter) Error() error { - if w == nil || w.Op == nil { - return nil - } - - for _, pending := range w.PendingStates() { - if w.Op.Status == pending { - return nil - } - } - - if w.Op.StatusMessage != "" { - return container_operation_fmt.Errorf(w.Op.StatusMessage) - } - - return nil -} - -func (w *ContainerOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *ContainerOperationWaiter) SetOp(op interface{}) error { - var ok bool - w.Op, ok = op.(*container_operation_container.Operation) - if !ok { - return container_operation_fmt.Errorf("Unable to set operation. Bad type!") - } - return nil -} - -func (w *ContainerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil || w.Op == nil { - return nil, container_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - name := container_operation_fmt.Sprintf("projects/%s/locations/%s/operations/%s", - w.Project, w.Location, w.Op.Name) - - var op *container_operation_container.Operation - select { - case <-w.Context.Done(): - container_operation_log.Println("[WARN] request has been cancelled early") - return op, container_operation_errors.New("unable to finish polling, context has been cancelled") - default: - - } - err := retryTimeDuration(func() (opErr error) { - opGetCall := w.Service.Projects.Locations.Operations.Get(name) - if w.UserProjectOverride { - opGetCall.Header().Add("X-Goog-User-Project", w.Project) - } - op, opErr = opGetCall.Do() - return opErr - }, DefaultRequestTimeout) - - return op, err -} - -func (w *ContainerOperationWaiter) OpName() string { - if w == nil || w.Op == nil { - return "" - } - return w.Op.Name -} - -func (w *ContainerOperationWaiter) PendingStates() []string { - return []string{"PENDING", "RUNNING"} -} - -func (w *ContainerOperationWaiter) TargetStates() []string { - return []string{"DONE"} -} - -func containerOperationWait(config *Config, op *container_operation_container.Operation, project, location, activity, userAgent string, timeout container_operation_time.Duration) error { - w := &ContainerOperationWaiter{ - Service: config.NewContainerClient(userAgent), - Context: config.context, - Op: op, - Project: project, - Location: location, - UserProjectOverride: config.UserProjectOverride, - } - - if err := w.SetOp(op); err != nil { - return err - } - - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func Convert(item, out interface{}) error { - bytes, err := convert_json.Marshal(item) - if err != nil { - return err - } - - err = convert_json.Unmarshal(bytes, out) - if err != nil { - return err - } - - if _, ok := item.(map[string]interface{}); !ok { - setOmittedFields(item, out) - } - - return nil -} - -func ConvertToMap(item interface{}) (map[string]interface{}, error) { - out := make(map[string]interface{}) - bytes, err := convert_json.Marshal(item) - if err != nil { - return nil, err - } - - err = convert_json.Unmarshal(bytes, &out) - if err != nil { - return nil, err - } - - return out, nil -} - -func setOmittedFields(item, out interface{}) { - - iVal := convert_reflect.ValueOf(item).Elem() - oVal := convert_reflect.ValueOf(out).Elem() - - for i := 0; i < iVal.NumField(); i++ { - iField := iVal.Field(i) - if isEmptyValue(iField) { - continue - } - - fieldInfo := iVal.Type().Field(i) - oField := oVal.FieldByName(fieldInfo.Name) - - if !oField.IsValid() { - continue - } - - if fieldInfo.Tag.Get("json") == "-" { - oField.Set(iField) - } - - if iField.Kind() == convert_reflect.Struct { - setOmittedFields(iField.Addr().Interface(), oField.Addr().Interface()) - } - if iField.Kind() == convert_reflect.Ptr && iField.Type().Elem().Kind() == convert_reflect.Struct { - setOmittedFields(iField.Interface(), oField.Interface()) - } - if iField.Kind() == convert_reflect.Slice && iField.Type().Elem().Kind() == convert_reflect.Struct { - for j := 0; j < iField.Len(); j++ { - setOmittedFields(iField.Index(j).Addr().Interface(), oField.Index(j).Addr().Interface()) - } - } - if iField.Kind() == convert_reflect.Slice && iField.Type().Elem().Kind() == convert_reflect.Ptr && - iField.Type().Elem().Elem().Kind() == convert_reflect.Struct { - for j := 0; j < iField.Len(); j++ { - setOmittedFields(iField.Index(j).Interface(), oField.Index(j).Interface()) - } - } - } -} - -func dataSourceGameServicesGameServerDeploymentRollout() *data_google_game_services_game_server_deployment_rollout_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceGameServicesGameServerDeploymentRollout().Schema) - addRequiredFieldsToSchema(dsSchema, "deployment_id") - - return &data_google_game_services_game_server_deployment_rollout_schema.Resource{ - Read: dataSourceGameServicesGameServerDeploymentRolloutRead, - Schema: dsSchema, - } -} - -func dataSourceGameServicesGameServerDeploymentRolloutRead(d *data_google_game_services_game_server_deployment_rollout_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return data_google_game_services_game_server_deployment_rollout_fmt.Errorf("Error constructing id: %s", err) - } - - d.SetId(id) - - return resourceGameServicesGameServerDeploymentRolloutRead(d, meta) -} - -func dataSourceGoogleCloudIdentityGroupMemberships() *data_source_cloud_identity_group_memberships_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceCloudIdentityGroupMembership().Schema) - - return &data_source_cloud_identity_group_memberships_schema.Resource{ - Read: dataSourceGoogleCloudIdentityGroupMembershipsRead, - - Schema: map[string]*data_source_cloud_identity_group_memberships_schema.Schema{ - "memberships": { - Type: data_source_cloud_identity_group_memberships_schema.TypeList, - Computed: true, - Description: `List of Cloud Identity group memberships.`, - Elem: &data_source_cloud_identity_group_memberships_schema.Resource{ - Schema: dsSchema, - }, - }, - "group": { - Type: data_source_cloud_identity_group_memberships_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Group to get memberships from.`, - }, - }, - } -} - -func dataSourceGoogleCloudIdentityGroupMembershipsRead(d *data_source_cloud_identity_group_memberships_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - result := []map[string]interface{}{} - membershipsCall := config.NewCloudIdentityClient(userAgent).Groups.Memberships.List(d.Get("group").(string)).View("FULL") - if config.UserProjectOverride { - billingProject := "" - - if project, err := getProject(d, config); err == nil { - billingProject = project - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if billingProject != "" { - membershipsCall.Header().Set("X-Goog-User-Project", billingProject) - } - } - - err = membershipsCall.Pages(config.context, func(resp *data_source_cloud_identity_group_memberships_cloudidentity.ListMembershipsResponse) error { - for _, member := range resp.Memberships { - result = append(result, map[string]interface{}{ - "name": member.Name, - "roles": flattenCloudIdentityGroupMembershipsRoles(member.Roles), - "preferred_member_key": flattenCloudIdentityGroupsEntityKey(member.PreferredMemberKey), - }) - } - - return nil - }) - if err != nil { - return handleNotFoundError(err, d, data_source_cloud_identity_group_memberships_fmt.Sprintf("CloudIdentityGroupMemberships %q", d.Id())) - } - - if err := d.Set("memberships", result); err != nil { - return data_source_cloud_identity_group_memberships_fmt.Errorf("Error setting memberships: %s", err) - } - d.SetId(data_source_cloud_identity_group_memberships_time.Now().UTC().String()) - return nil -} - -func flattenCloudIdentityGroupMembershipsRoles(roles []*data_source_cloud_identity_group_memberships_cloudidentity.MembershipRole) []interface{} { - transformed := []interface{}{} - - for _, role := range roles { - transformed = append(transformed, map[string]interface{}{ - "name": role.Name, - }) - } - return transformed -} - -func dataSourceGoogleCloudIdentityGroups() *data_source_cloud_identity_groups_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceCloudIdentityGroup().Schema) - - return &data_source_cloud_identity_groups_schema.Resource{ - Read: dataSourceGoogleCloudIdentityGroupsRead, - - Schema: map[string]*data_source_cloud_identity_groups_schema.Schema{ - "groups": { - Type: data_source_cloud_identity_groups_schema.TypeList, - Computed: true, - Description: `List of Cloud Identity groups.`, - Elem: &data_source_cloud_identity_groups_schema.Resource{ - Schema: dsSchema, - }, - }, - "parent": { - Type: data_source_cloud_identity_groups_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the entity under which this Group resides in the -Cloud Identity resource hierarchy. - -Must be of the form identitysources/{identity_source_id} for external-identity-mapped -groups or customers/{customer_id} for Google Groups.`, - }, - }, - } -} - -func dataSourceGoogleCloudIdentityGroupsRead(d *data_source_cloud_identity_groups_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - result := []map[string]interface{}{} - groupsCall := config.NewCloudIdentityClient(userAgent).Groups.List().Parent(d.Get("parent").(string)).View("FULL") - if config.UserProjectOverride { - billingProject := "" - - if project, err := getProject(d, config); err == nil { - billingProject = project - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if billingProject != "" { - groupsCall.Header().Set("X-Goog-User-Project", billingProject) - } - } - err = groupsCall.Pages(config.context, func(resp *data_source_cloud_identity_groups_cloudidentity.ListGroupsResponse) error { - for _, group := range resp.Groups { - result = append(result, map[string]interface{}{ - "name": group.Name, - "display_name": group.DisplayName, - "labels": group.Labels, - "description": group.Description, - "group_key": flattenCloudIdentityGroupsEntityKey(group.GroupKey), - }) - } - - return nil - }) - if err != nil { - return handleNotFoundError(err, d, data_source_cloud_identity_groups_fmt.Sprintf("CloudIdentityGroups %q", d.Id())) - } - - if err := d.Set("groups", result); err != nil { - return data_source_cloud_identity_groups_fmt.Errorf("Error setting groups: %s", err) - } - d.SetId(data_source_cloud_identity_groups_time.Now().UTC().String()) - return nil -} - -func flattenCloudIdentityGroupsEntityKey(entityKey *data_source_cloud_identity_groups_cloudidentity.EntityKey) []interface{} { - transformed := map[string]interface{}{ - "id": entityKey.Id, - "namespace": entityKey.Namespace, - } - return []interface{}{transformed} -} - -func dataSourceGoogleCloudRunLocations() *data_source_cloud_run_locations_schema.Resource { - return &data_source_cloud_run_locations_schema.Resource{ - Read: dataSourceGoogleCloudRunLocationsRead, - Schema: map[string]*data_source_cloud_run_locations_schema.Schema{ - "project": { - Type: data_source_cloud_run_locations_schema.TypeString, - Optional: true, - Computed: true, - }, - "locations": { - Type: data_source_cloud_run_locations_schema.TypeList, - Computed: true, - Elem: &data_source_cloud_run_locations_schema.Schema{Type: data_source_cloud_run_locations_schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleCloudRunLocationsRead(d *data_source_cloud_run_locations_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "https://run.googleapis.com/v1/projects/{{project}}/locations") - if err != nil { - return err - } - - res, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return data_source_cloud_run_locations_fmt.Errorf("Error listing Cloud Run Locations : %s", err) - } - - locationsRaw := flattenCloudRunLocations(res) - - locations := make([]string, len(locationsRaw)) - for i, loc := range locationsRaw { - locations[i] = loc.(string) - } - data_source_cloud_run_locations_sort.Strings(locations) - - data_source_cloud_run_locations_log.Printf("[DEBUG] Received Google Cloud Run Locations: %q", locations) - - if err := d.Set("project", project); err != nil { - return data_source_cloud_run_locations_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("locations", locations); err != nil { - return data_source_cloud_run_locations_fmt.Errorf("Error setting location: %s", err) - } - - d.SetId(data_source_cloud_run_locations_fmt.Sprintf("projects/%s", project)) - - return nil -} - -func flattenCloudRunLocations(resp map[string]interface{}) []interface{} { - regionList := resp["locations"].([]interface{}) - regions := make([]interface{}, len(regionList)) - for i, v := range regionList { - regionObj := v.(map[string]interface{}) - regions[i] = regionObj["locationId"] - } - return regions -} - -func dataSourceGoogleCloudRunService() *data_source_cloud_run_service_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceCloudRunService().Schema) - addRequiredFieldsToSchema(dsSchema, "name", "location") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_cloud_run_service_schema.Resource{ - Read: dataSourceGoogleCloudRunServiceRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleCloudRunServiceRead(d *data_source_cloud_run_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") - if err != nil { - return data_source_cloud_run_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceCloudRunServiceRead(d, meta) -} - -func dataSourceGoogleComputeHealthCheck() *data_source_compute_health_check_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeHealthCheck().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_compute_health_check_schema.Resource{ - Read: dataSourceGoogleComputeHealthCheckRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeHealthCheckRead(d *data_source_compute_health_check_schema.ResourceData, meta interface{}) error { - id, err := replaceVars(d, meta.(*Config), "projects/{{project}}/global/healthChecks/{{name}}") - if err != nil { - return err - } - d.SetId(id) - - return resourceComputeHealthCheckRead(d, meta) -} - -func dataSourceGoogleComputeLbIpRanges() *data_source_compute_lb_ip_ranges_schema.Resource { - return &data_source_compute_lb_ip_ranges_schema.Resource{ - Read: dataSourceGoogleComputeLbIpRangesRead, - - Schema: map[string]*data_source_compute_lb_ip_ranges_schema.Schema{ - "network": { - Type: data_source_compute_lb_ip_ranges_schema.TypeList, - Elem: &data_source_compute_lb_ip_ranges_schema.Schema{Type: data_source_compute_lb_ip_ranges_schema.TypeString}, - Computed: true, - }, - "http_ssl_tcp_internal": { - Type: data_source_compute_lb_ip_ranges_schema.TypeList, - Elem: &data_source_compute_lb_ip_ranges_schema.Schema{Type: data_source_compute_lb_ip_ranges_schema.TypeString}, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleComputeLbIpRangesRead(d *data_source_compute_lb_ip_ranges_schema.ResourceData, meta interface{}) error { - d.SetId("compute-lb-ip-ranges") - - networkIpRanges := []string{ - "209.85.152.0/22", - "209.85.204.0/22", - "35.191.0.0/16", - } - if err := d.Set("network", networkIpRanges); err != nil { - return data_source_compute_lb_ip_ranges_fmt.Errorf("Error setting network: %s", err) - } - - httpSslTcpInternalRanges := []string{ - "130.211.0.0/22", - "35.191.0.0/16", - } - if err := d.Set("http_ssl_tcp_internal", httpSslTcpInternalRanges); err != nil { - return data_source_compute_lb_ip_ranges_fmt.Errorf("Error setting http_ssl_tcp_internal: %s", err) - } - - return nil -} - -func dataSourceGoogleComputeNetworkEndpointGroup() *data_source_compute_network_endpoint_group_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeNetworkEndpointGroup().Schema) - - addOptionalFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "zone") - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "self_link") - - return &data_source_compute_network_endpoint_group_schema.Resource{ - Read: dataSourceComputeNetworkEndpointGroupRead, - Schema: dsSchema, - } -} - -func dataSourceComputeNetworkEndpointGroupRead(d *data_source_compute_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if name, ok := d.GetOk("name"); ok { - project, err := getProject(d, config) - if err != nil { - return err - } - zone, err := getZone(d, config) - if err != nil { - return err - } - d.SetId(data_source_compute_network_endpoint_group_fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", project, zone, name.(string))) - } else if selfLink, ok := d.GetOk("self_link"); ok { - parsed, err := ParseNetworkEndpointGroupFieldValue(selfLink.(string), d, config) - if err != nil { - return err - } - if err := d.Set("name", parsed.Name); err != nil { - return data_source_compute_network_endpoint_group_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("zone", parsed.Zone); err != nil { - return data_source_compute_network_endpoint_group_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", parsed.Project); err != nil { - return data_source_compute_network_endpoint_group_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_compute_network_endpoint_group_fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) - } else { - return data_source_compute_network_endpoint_group_errors.New("Must provide either `self_link` or `zone/name`") - } - - return resourceComputeNetworkEndpointGroupRead(d, meta) -} - -func dataSourceGoogleContainerImage() *data_source_container_registry_image_schema.Resource { - return &data_source_container_registry_image_schema.Resource{ - Read: containerRegistryImageRead, - Schema: map[string]*data_source_container_registry_image_schema.Schema{ - "name": { - Type: data_source_container_registry_image_schema.TypeString, - Required: true, - }, - "tag": { - Type: data_source_container_registry_image_schema.TypeString, - Optional: true, - }, - "digest": { - Type: data_source_container_registry_image_schema.TypeString, - Optional: true, - }, - "region": { - Type: data_source_container_registry_image_schema.TypeString, - Optional: true, - }, - "project": { - Type: data_source_container_registry_image_schema.TypeString, - Optional: true, - Computed: true, - }, - "image_url": { - Type: data_source_container_registry_image_schema.TypeString, - Computed: true, - }, - }, - } -} - -func containerRegistryImageRead(d *data_source_container_registry_image_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - if err := d.Set("project", project); err != nil { - return data_source_container_registry_image_fmt.Errorf("Error setting project: %s", err) - } - region, ok := d.GetOk("region") - var url_base string - escapedProject := data_source_container_registry_image_strings.Replace(project, ":", "/", -1) - if ok && region != nil && region != "" { - url_base = data_source_container_registry_image_fmt.Sprintf("%s.gcr.io/%s", region, escapedProject) - } else { - url_base = data_source_container_registry_image_fmt.Sprintf("gcr.io/%s", escapedProject) - } - tag, t_ok := d.GetOk("tag") - digest, d_ok := d.GetOk("digest") - if t_ok && tag != nil && tag != "" { - if err := d.Set("image_url", data_source_container_registry_image_fmt.Sprintf("%s/%s:%s", url_base, d.Get("name").(string), tag)); err != nil { - return data_source_container_registry_image_fmt.Errorf("Error setting image_url: %s", err) - } - } else if d_ok && digest != nil && digest != "" { - if err := d.Set("image_url", data_source_container_registry_image_fmt.Sprintf("%s/%s@%s", url_base, d.Get("name").(string), digest)); err != nil { - return data_source_container_registry_image_fmt.Errorf("Error setting image_url: %s", err) - } - } else { - if err := d.Set("image_url", data_source_container_registry_image_fmt.Sprintf("%s/%s", url_base, d.Get("name").(string))); err != nil { - return data_source_container_registry_image_fmt.Errorf("Error setting image_url: %s", err) - } - } - d.SetId(d.Get("image_url").(string)) - return nil -} - -func dataSourceGoogleContainerRepo() *data_source_container_registry_repository_schema.Resource { - return &data_source_container_registry_repository_schema.Resource{ - Read: containerRegistryRepoRead, - Schema: map[string]*data_source_container_registry_repository_schema.Schema{ - "region": { - Type: data_source_container_registry_repository_schema.TypeString, - Optional: true, - }, - "project": { - Type: data_source_container_registry_repository_schema.TypeString, - Optional: true, - Computed: true, - }, - "repository_url": { - Type: data_source_container_registry_repository_schema.TypeString, - Computed: true, - }, - }, - } -} - -func containerRegistryRepoRead(d *data_source_container_registry_repository_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - if err := d.Set("project", project); err != nil { - return data_source_container_registry_repository_fmt.Errorf("Error setting project: %s", err) - } - region, ok := d.GetOk("region") - escapedProject := data_source_container_registry_repository_strings.Replace(project, ":", "/", -1) - if ok && region != nil && region != "" { - if err := d.Set("repository_url", data_source_container_registry_repository_fmt.Sprintf("%s.gcr.io/%s", region, escapedProject)); err != nil { - return data_source_container_registry_repository_fmt.Errorf("Error setting repository_url: %s", err) - } - } else { - if err := d.Set("repository_url", data_source_container_registry_repository_fmt.Sprintf("gcr.io/%s", escapedProject)); err != nil { - return data_source_container_registry_repository_fmt.Errorf("Error setting repository_url: %s", err) - } - } - d.SetId(d.Get("repository_url").(string)) - return nil -} - -var dnssecAlgoNums = map[string]int{ - "rsasha1": 5, - "rsasha256": 8, - "rsasha512": 10, - "ecdsap256sha256": 13, - "ecdsap384sha384": 14, -} - -var dnssecDigestType = map[string]int{ - "sha1": 1, - "sha256": 2, - "sha384": 4, -} - -func dataSourceDNSKeys() *data_source_dns_keys_schema.Resource { - return &data_source_dns_keys_schema.Resource{ - Read: dataSourceDNSKeysRead, - - Schema: map[string]*data_source_dns_keys_schema.Schema{ - "managed_zone": { - Type: data_source_dns_keys_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "project": { - Type: data_source_dns_keys_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "key_signing_keys": { - Type: data_source_dns_keys_schema.TypeList, - Computed: true, - Elem: kskResource(), - }, - "zone_signing_keys": { - Type: data_source_dns_keys_schema.TypeList, - Computed: true, - Elem: dnsKeyResource(), - }, - }, - } -} - -func dnsKeyResource() *data_source_dns_keys_schema.Resource { - return &data_source_dns_keys_schema.Resource{ - Schema: map[string]*data_source_dns_keys_schema.Schema{ - "algorithm": { - Type: data_source_dns_keys_schema.TypeString, - Computed: true, - }, - "creation_time": { - Type: data_source_dns_keys_schema.TypeString, - Computed: true, - }, - "description": { - Type: data_source_dns_keys_schema.TypeString, - Computed: true, - }, - "digests": { - Type: data_source_dns_keys_schema.TypeList, - Computed: true, - Elem: &data_source_dns_keys_schema.Resource{ - Schema: map[string]*data_source_dns_keys_schema.Schema{ - "digest": { - Type: data_source_dns_keys_schema.TypeString, - Optional: true, - }, - "type": { - Type: data_source_dns_keys_schema.TypeString, - Optional: true, - }, - }, - }, - }, - "id": { - Type: data_source_dns_keys_schema.TypeString, - Computed: true, - }, - "is_active": { - Type: data_source_dns_keys_schema.TypeBool, - Computed: true, - }, - "key_length": { - Type: data_source_dns_keys_schema.TypeInt, - Computed: true, - }, - "key_tag": { - Type: data_source_dns_keys_schema.TypeInt, - Computed: true, - }, - "public_key": { - Type: data_source_dns_keys_schema.TypeString, - Computed: true, - }, - }, - } -} - -func kskResource() *data_source_dns_keys_schema.Resource { - resource := dnsKeyResource() - - resource.Schema["ds_record"] = &data_source_dns_keys_schema.Schema{ - Type: data_source_dns_keys_schema.TypeString, - Computed: true, - } - - return resource -} - -func generateDSRecord(signingKey *data_source_dns_keys_dns.DnsKey) (string, error) { - algoNum, found := dnssecAlgoNums[signingKey.Algorithm] - if !found { - return "", data_source_dns_keys_fmt.Errorf("DNSSEC Algorithm number for %s not found", signingKey.Algorithm) - } - - digestType, found := dnssecDigestType[signingKey.Digests[0].Type] - if !found { - return "", data_source_dns_keys_fmt.Errorf("DNSSEC Digest type for %s not found", signingKey.Digests[0].Type) - } - - return data_source_dns_keys_fmt.Sprintf("%d %d %d %s", - signingKey.KeyTag, - algoNum, - digestType, - signingKey.Digests[0].Digest), nil -} - -func flattenSigningKeys(signingKeys []*data_source_dns_keys_dns.DnsKey, keyType string) []map[string]interface{} { - var keys []map[string]interface{} - - for _, signingKey := range signingKeys { - if signingKey != nil && signingKey.Type == keyType { - data := map[string]interface{}{ - "algorithm": signingKey.Algorithm, - "creation_time": signingKey.CreationTime, - "description": signingKey.Description, - "digests": flattenDigests(signingKey.Digests), - "id": signingKey.Id, - "is_active": signingKey.IsActive, - "key_length": signingKey.KeyLength, - "key_tag": signingKey.KeyTag, - "public_key": signingKey.PublicKey, - } - - if signingKey.Type == "keySigning" && len(signingKey.Digests) > 0 { - dsRecord, err := generateDSRecord(signingKey) - if err == nil { - data["ds_record"] = dsRecord - } - } - - keys = append(keys, data) - } - } - - return keys -} - -func flattenDigests(dnsKeyDigests []*data_source_dns_keys_dns.DnsKeyDigest) []map[string]interface{} { - var digests []map[string]interface{} - - for _, dnsKeyDigest := range dnsKeyDigests { - if dnsKeyDigest != nil { - data := map[string]interface{}{ - "digest": dnsKeyDigest.Digest, - "type": dnsKeyDigest.Type, - } - - digests = append(digests, data) - } - } - - return digests -} - -func dataSourceDNSKeysRead(d *data_source_dns_keys_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - fv, err := parseProjectFieldValue("managedZones", d.Get("managed_zone").(string), "project", d, config, false) - if err != nil { - return err - } - project := fv.Project - managedZone := fv.Name - - if err := d.Set("project", project); err != nil { - return data_source_dns_keys_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_dns_keys_fmt.Sprintf("projects/%s/managedZones/%s", project, managedZone)) - - data_source_dns_keys_log.Printf("[DEBUG] Fetching DNS keys from managed zone %s", managedZone) - - response, err := config.NewDnsClient(userAgent).DnsKeys.List(project, managedZone).Do() - if err != nil && !isGoogleApiErrorWithCode(err, 404) { - return data_source_dns_keys_fmt.Errorf("error retrieving DNS keys: %s", err) - } else if isGoogleApiErrorWithCode(err, 404) { - return nil - } - - data_source_dns_keys_log.Printf("[DEBUG] Fetched DNS keys from managed zone %s", managedZone) - - if err := d.Set("key_signing_keys", flattenSigningKeys(response.DnsKeys, "keySigning")); err != nil { - return data_source_dns_keys_fmt.Errorf("Error setting key_signing_keys: %s", err) - } - if err := d.Set("zone_signing_keys", flattenSigningKeys(response.DnsKeys, "zoneSigning")); err != nil { - return data_source_dns_keys_fmt.Errorf("Error setting zone_signing_keys: %s", err) - } - - return nil -} - -func dataSourceDnsManagedZone() *data_source_dns_managed_zone_schema.Resource { - return &data_source_dns_managed_zone_schema.Resource{ - Read: dataSourceDnsManagedZoneRead, - - Schema: map[string]*data_source_dns_managed_zone_schema.Schema{ - "dns_name": { - Type: data_source_dns_managed_zone_schema.TypeString, - Computed: true, - }, - - "name": { - Type: data_source_dns_managed_zone_schema.TypeString, - Required: true, - }, - - "description": { - Type: data_source_dns_managed_zone_schema.TypeString, - Computed: true, - }, - - "name_servers": { - Type: data_source_dns_managed_zone_schema.TypeList, - Computed: true, - Elem: &data_source_dns_managed_zone_schema.Schema{ - Type: data_source_dns_managed_zone_schema.TypeString, - }, - }, - - "visibility": { - Type: data_source_dns_managed_zone_schema.TypeString, - Computed: true, - }, - - "project": { - Type: data_source_dns_managed_zone_schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceDnsManagedZoneRead(d *data_source_dns_managed_zone_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - d.SetId(data_source_dns_managed_zone_fmt.Sprintf("projects/%s/managedZones/%s", project, name)) - - zone, err := config.NewDnsClient(userAgent).ManagedZones.Get( - project, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_dns_managed_zone_fmt.Sprintf("dataSourceDnsManagedZone %q", name)) - } - - if err := d.Set("name_servers", zone.NameServers); err != nil { - return data_source_dns_managed_zone_fmt.Errorf("Error setting name_servers: %s", err) - } - if err := d.Set("name", zone.Name); err != nil { - return data_source_dns_managed_zone_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("dns_name", zone.DnsName); err != nil { - return data_source_dns_managed_zone_fmt.Errorf("Error setting dns_name: %s", err) - } - if err := d.Set("description", zone.Description); err != nil { - return data_source_dns_managed_zone_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("visibility", zone.Visibility); err != nil { - return data_source_dns_managed_zone_fmt.Errorf("Error setting visibility: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_dns_managed_zone_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func dataSourceGoogleActiveFolder() *data_source_google_active_folder_schema.Resource { - return &data_source_google_active_folder_schema.Resource{ - Read: dataSourceGoogleActiveFolderRead, - - Schema: map[string]*data_source_google_active_folder_schema.Schema{ - "parent": { - Type: data_source_google_active_folder_schema.TypeString, - Required: true, - }, - "display_name": { - Type: data_source_google_active_folder_schema.TypeString, - Required: true, - }, - "name": { - Type: data_source_google_active_folder_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleActiveFolderRead(d *data_source_google_active_folder_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - parent := d.Get("parent").(string) - displayName := d.Get("display_name").(string) - - queryString := data_source_google_active_folder_fmt.Sprintf("lifecycleState=ACTIVE AND parent=%s AND displayName=\"%s\"", parent, displayName) - searchRequest := &data_source_google_active_folder_cloudresourcemanagerresourceManagerV2.SearchFoldersRequest{ - Query: queryString, - } - searchResponse, err := config.NewResourceManagerV2Client(userAgent).Folders.Search(searchRequest).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_active_folder_fmt.Sprintf("Folder Not Found : %s", displayName)) - } - - for _, folder := range searchResponse.Folders { - if folder.DisplayName == displayName { - d.SetId(folder.Name) - if err := d.Set("name", folder.Name); err != nil { - return data_source_google_active_folder_fmt.Errorf("Error setting folder name: %s", err) - } - return nil - } - } - return data_source_google_active_folder_fmt.Errorf("Folder not found") -} - -func dataSourceGoogleAppEngineDefaultServiceAccount() *data_source_google_app_engine_default_service_account_schema.Resource { - return &data_source_google_app_engine_default_service_account_schema.Resource{ - Read: dataSourceGoogleAppEngineDefaultServiceAccountRead, - Schema: map[string]*data_source_google_app_engine_default_service_account_schema.Schema{ - "project": { - Type: data_source_google_app_engine_default_service_account_schema.TypeString, - Optional: true, - Computed: true, - }, - "email": { - Type: data_source_google_app_engine_default_service_account_schema.TypeString, - Computed: true, - }, - "unique_id": { - Type: data_source_google_app_engine_default_service_account_schema.TypeString, - Computed: true, - }, - "name": { - Type: data_source_google_app_engine_default_service_account_schema.TypeString, - Computed: true, - }, - "display_name": { - Type: data_source_google_app_engine_default_service_account_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleAppEngineDefaultServiceAccountRead(d *data_source_google_app_engine_default_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - serviceAccountEmail := data_source_google_app_engine_default_service_account_fmt.Sprintf("%s@appspot.gserviceaccount.com", project) - - serviceAccountName, err := serviceAccountFQN(serviceAccountEmail, d, config) - if err != nil { - return err - } - - sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(serviceAccountName).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_app_engine_default_service_account_fmt.Sprintf("Service Account %q", serviceAccountName)) - } - - d.SetId(sa.Name) - if err := d.Set("email", sa.Email); err != nil { - return data_source_google_app_engine_default_service_account_fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("unique_id", sa.UniqueId); err != nil { - return data_source_google_app_engine_default_service_account_fmt.Errorf("Error setting unique_id: %s", err) - } - if err := d.Set("project", sa.ProjectId); err != nil { - return data_source_google_app_engine_default_service_account_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("name", sa.Name); err != nil { - return data_source_google_app_engine_default_service_account_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", sa.DisplayName); err != nil { - return data_source_google_app_engine_default_service_account_fmt.Errorf("Error setting display_name: %s", err) - } - - return nil -} - -func dataSourceGoogleBigqueryDefaultServiceAccount() *data_source_google_bigquery_default_service_account_schema.Resource { - return &data_source_google_bigquery_default_service_account_schema.Resource{ - Read: dataSourceGoogleBigqueryDefaultServiceAccountRead, - Schema: map[string]*data_source_google_bigquery_default_service_account_schema.Schema{ - "email": { - Type: data_source_google_bigquery_default_service_account_schema.TypeString, - Computed: true, - }, - "project": { - Type: data_source_google_bigquery_default_service_account_schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleBigqueryDefaultServiceAccountRead(d *data_source_google_bigquery_default_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - projectResource, err := config.NewBigQueryClient(userAgent).Projects.GetServiceAccount(project).Do() - if err != nil { - return handleNotFoundError(err, d, "BigQuery service account not found") - } - - d.SetId(projectResource.Email) - if err := d.Set("email", projectResource.Email); err != nil { - return data_source_google_bigquery_default_service_account_fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_bigquery_default_service_account_fmt.Errorf("Error setting project: %s", err) - } - return nil -} - -func dataSourceGoogleBillingAccount() *data_source_google_billing_account_schema.Resource { - return &data_source_google_billing_account_schema.Resource{ - Read: dataSourceBillingAccountRead, - Schema: map[string]*data_source_google_billing_account_schema.Schema{ - "billing_account": { - Type: data_source_google_billing_account_schema.TypeString, - Optional: true, - ConflictsWith: []string{"display_name"}, - }, - "display_name": { - Type: data_source_google_billing_account_schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"billing_account"}, - }, - "open": { - Type: data_source_google_billing_account_schema.TypeBool, - Optional: true, - Computed: true, - }, - "name": { - Type: data_source_google_billing_account_schema.TypeString, - Computed: true, - }, - "project_ids": { - Type: data_source_google_billing_account_schema.TypeSet, - Computed: true, - Elem: &data_source_google_billing_account_schema.Schema{ - Type: data_source_google_billing_account_schema.TypeString, - }, - }, - }, - } -} - -func dataSourceBillingAccountRead(d *data_source_google_billing_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - open, openOk := d.GetOkExists("open") - - var billingAccount *data_source_google_billing_account_cloudbilling.BillingAccount - if v, ok := d.GetOk("billing_account"); ok { - resp, err := config.NewBillingClient(userAgent).BillingAccounts.Get(canonicalBillingAccountName(v.(string))).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_billing_account_fmt.Sprintf("Billing Account Not Found : %s", v)) - } - - if openOk && resp.Open != open.(bool) { - return data_source_google_billing_account_fmt.Errorf("Billing account not found: %s", v) - } - - billingAccount = resp - } else if v, ok := d.GetOk("display_name"); ok { - token := "" - for paginate := true; paginate; { - resp, err := config.NewBillingClient(userAgent).BillingAccounts.List().PageToken(token).Do() - if err != nil { - return data_source_google_billing_account_fmt.Errorf("Error reading billing accounts: %s", err) - } - - for _, ba := range resp.BillingAccounts { - if ba.DisplayName == v.(string) { - if openOk && ba.Open != open.(bool) { - continue - } - if billingAccount != nil { - return data_source_google_billing_account_fmt.Errorf("More than one matching billing account found") - } - billingAccount = ba - } - } - - token = resp.NextPageToken - paginate = token != "" - } - - if billingAccount == nil { - return data_source_google_billing_account_fmt.Errorf("Billing account not found: %s", v) - } - } else { - return data_source_google_billing_account_fmt.Errorf("one of billing_account or display_name must be set") - } - - resp, err := config.NewBillingClient(userAgent).BillingAccounts.Projects.List(billingAccount.Name).Do() - if err != nil { - return data_source_google_billing_account_fmt.Errorf("Error reading billing account projects: %s", err) - } - projectIds := flattenBillingProjects(resp.ProjectBillingInfo) - - d.SetId(GetResourceNameFromSelfLink(billingAccount.Name)) - if err := d.Set("name", billingAccount.Name); err != nil { - return data_source_google_billing_account_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", billingAccount.DisplayName); err != nil { - return data_source_google_billing_account_fmt.Errorf("Error setting display_name: %s", err) - } - if err := d.Set("open", billingAccount.Open); err != nil { - return data_source_google_billing_account_fmt.Errorf("Error setting open: %s", err) - } - if err := d.Set("project_ids", projectIds); err != nil { - return data_source_google_billing_account_fmt.Errorf("Error setting project_ids: %s", err) - } - - return nil -} - -func canonicalBillingAccountName(ba string) string { - if data_source_google_billing_account_strings.HasPrefix(ba, "billingAccounts/") { - return ba - } - - return "billingAccounts/" + ba -} - -func flattenBillingProjects(billingProjects []*data_source_google_billing_account_cloudbilling.ProjectBillingInfo) []string { - projectIds := make([]string, len(billingProjects)) - for i, billingProject := range billingProjects { - projectIds[i] = billingProject.ProjectId - } - - return projectIds -} - -func dataSourceGoogleClientConfig() *data_source_google_client_config_schema.Resource { - return &data_source_google_client_config_schema.Resource{ - Read: dataSourceClientConfigRead, - Schema: map[string]*data_source_google_client_config_schema.Schema{ - "project": { - Type: data_source_google_client_config_schema.TypeString, - Computed: true, - }, - - "region": { - Type: data_source_google_client_config_schema.TypeString, - Computed: true, - }, - - "zone": { - Type: data_source_google_client_config_schema.TypeString, - Computed: true, - }, - - "access_token": { - Type: data_source_google_client_config_schema.TypeString, - Computed: true, - Sensitive: true, - }, - }, - } -} - -func dataSourceClientConfigRead(d *data_source_google_client_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - d.SetId(data_source_google_client_config_fmt.Sprintf("projects/%s/regions/%s/zones/%s", config.Project, config.Region, config.Zone)) - if err := d.Set("project", config.Project); err != nil { - return data_source_google_client_config_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", config.Region); err != nil { - return data_source_google_client_config_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("zone", config.Zone); err != nil { - return data_source_google_client_config_fmt.Errorf("Error setting zone: %s", err) - } - - token, err := config.tokenSource.Token() - if err != nil { - return err - } - if err := d.Set("access_token", token.AccessToken); err != nil { - return data_source_google_client_config_fmt.Errorf("Error setting access_token: %s", err) - } - - return nil -} - -func dataSourceGoogleClientOpenIDUserinfo() *data_source_google_client_openid_userinfo_schema.Resource { - return &data_source_google_client_openid_userinfo_schema.Resource{ - Read: dataSourceGoogleClientOpenIDUserinfoRead, - Schema: map[string]*data_source_google_client_openid_userinfo_schema.Schema{ - "email": { - Type: data_source_google_client_openid_userinfo_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleClientOpenIDUserinfoRead(d *data_source_google_client_openid_userinfo_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - email, err := GetCurrentUserEmail(config, userAgent) - if err != nil { - return err - } - d.SetId(email) - if err := d.Set("email", email); err != nil { - return data_source_google_client_openid_userinfo_fmt.Errorf("Error setting email: %s", err) - } - return nil -} - -func dataSourceGoogleCloudFunctionsFunction() *data_source_google_cloudfunctions_function_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceCloudFunctionsFunction().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &data_source_google_cloudfunctions_function_schema.Resource{ - Read: dataSourceGoogleCloudFunctionsFunctionRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleCloudFunctionsFunctionRead(d *data_source_google_cloudfunctions_function_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - cloudFuncId := &cloudFunctionId{ - Project: project, - Region: region, - Name: d.Get("name").(string), - } - - d.SetId(cloudFuncId.cloudFunctionId()) - - err = resourceCloudFunctionsRead(d, meta) - if err != nil { - return err - } - - return nil -} - -func dataSourceGoogleComposerEnvironment() *data_source_google_composer_environment_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComposerEnvironment().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &data_source_google_composer_environment_schema.Resource{ - Read: dataSourceGoogleComposerEnvironmentRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComposerEnvironmentRead(d *data_source_google_composer_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - envName := d.Get("name").(string) - - d.SetId(data_source_google_composer_environment_fmt.Sprintf("projects/%s/locations/%s/environments/%s", project, region, envName)) - - return resourceComposerEnvironmentRead(d, meta) -} - -func dataSourceGoogleComposerImageVersions() *data_source_google_composer_image_versions_schema.Resource { - return &data_source_google_composer_image_versions_schema.Resource{ - Read: dataSourceGoogleComposerImageVersionsRead, - Schema: map[string]*data_source_google_composer_image_versions_schema.Schema{ - "project": { - Type: data_source_google_composer_image_versions_schema.TypeString, - Optional: true, - Computed: true, - }, - "region": { - Type: data_source_google_composer_image_versions_schema.TypeString, - Optional: true, - Computed: true, - }, - "image_versions": { - Type: data_source_google_composer_image_versions_schema.TypeList, - Computed: true, - Elem: &data_source_google_composer_image_versions_schema.Resource{ - Schema: map[string]*data_source_google_composer_image_versions_schema.Schema{ - "image_version_id": { - Type: data_source_google_composer_image_versions_schema.TypeString, - Computed: true, - }, - "supported_python_versions": { - Type: data_source_google_composer_image_versions_schema.TypeList, - Computed: true, - Elem: &data_source_google_composer_image_versions_schema.Schema{Type: data_source_google_composer_image_versions_schema.TypeString}, - }, - }, - }, - }, - }, - } -} - -func dataSourceGoogleComposerImageVersionsRead(d *data_source_google_composer_image_versions_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComposerBasePath}}projects/{{project}}/locations/{{region}}/imageVersions") - if err != nil { - return err - } - - versions, err := paginatedListRequest(project, url, userAgent, config, flattenGoogleComposerImageVersions) - if err != nil { - return data_source_google_composer_image_versions_fmt.Errorf("Error listing Composer image versions: %s", err) - } - - data_source_google_composer_image_versions_log.Printf("[DEBUG] Received Composer Image Versions: %q", versions) - - if err := d.Set("image_versions", versions); err != nil { - return data_source_google_composer_image_versions_fmt.Errorf("Error setting image_versions: %s", err) - } - if err := d.Set("region", region); err != nil { - return data_source_google_composer_image_versions_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_composer_image_versions_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_google_composer_image_versions_fmt.Sprintf("projects/%s/regions/%s", project, region)) - - return nil -} - -func flattenGoogleComposerImageVersions(resp map[string]interface{}) []interface{} { - verObjList := resp["imageVersions"].([]interface{}) - versions := make([]interface{}, len(verObjList)) - for i, v := range verObjList { - verObj := v.(map[string]interface{}) - versions[i] = map[string]interface{}{ - "image_version_id": verObj["imageVersionId"], - "supported_python_versions": verObj["supportedPythonVersions"], - } - } - return versions -} - -var ( - computeAddressIdTemplate = "projects/%s/regions/%s/addresses/%s" - computeAddressLinkRegex = data_source_google_compute_address_regexp.MustCompile("projects/(.+)/regions/(.+)/addresses/(.+)$") -) - -func dataSourceGoogleComputeAddress() *data_source_google_compute_address_schema.Resource { - return &data_source_google_compute_address_schema.Resource{ - Read: dataSourceGoogleComputeAddressRead, - - Schema: map[string]*data_source_google_compute_address_schema.Schema{ - "name": { - Type: data_source_google_compute_address_schema.TypeString, - Required: true, - }, - - "address": { - Type: data_source_google_compute_address_schema.TypeString, - Computed: true, - }, - - "status": { - Type: data_source_google_compute_address_schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: data_source_google_compute_address_schema.TypeString, - Computed: true, - }, - - "region": { - Type: data_source_google_compute_address_schema.TypeString, - Computed: true, - Optional: true, - }, - - "project": { - Type: data_source_google_compute_address_schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleComputeAddressRead(d *data_source_google_compute_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - - address, err := config.NewComputeClient(userAgent).Addresses.Get(project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_address_fmt.Sprintf("Address Not Found : %s", name)) - } - - if err := d.Set("address", address.Address); err != nil { - return data_source_google_compute_address_fmt.Errorf("Error setting address: %s", err) - } - if err := d.Set("status", address.Status); err != nil { - return data_source_google_compute_address_fmt.Errorf("Error setting status: %s", err) - } - if err := d.Set("self_link", address.SelfLink); err != nil { - return data_source_google_compute_address_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_address_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return data_source_google_compute_address_fmt.Errorf("Error setting region: %s", err) - } - - d.SetId(data_source_google_compute_address_fmt.Sprintf("projects/%s/regions/%s/addresses/%s", project, region, name)) - return nil -} - -type computeAddressId struct { - Project string - Region string - Name string -} - -func (s computeAddressId) canonicalId() string { - return data_source_google_compute_address_fmt.Sprintf(computeAddressIdTemplate, s.Project, s.Region, s.Name) -} - -func parseComputeAddressId(id string, config *Config) (*computeAddressId, error) { - var parts []string - if computeAddressLinkRegex.MatchString(id) { - parts = computeAddressLinkRegex.FindStringSubmatch(id) - - return &computeAddressId{ - Project: parts[1], - Region: parts[2], - Name: parts[3], - }, nil - } else { - parts = data_source_google_compute_address_strings.Split(id, "/") - } - - if len(parts) == 3 { - return &computeAddressId{ - Project: parts[0], - Region: parts[1], - Name: parts[2], - }, nil - } else if len(parts) == 2 { - - if config.Project == "" { - return nil, data_source_google_compute_address_fmt.Errorf("The default project for the provider must be set when using the `{region}/{name}` id format.") - } - - return &computeAddressId{ - Project: config.Project, - Region: parts[0], - Name: parts[1], - }, nil - } else if len(parts) == 1 { - - if config.Project == "" { - return nil, data_source_google_compute_address_fmt.Errorf("The default project for the provider must be set when using the `{name}` id format.") - } - if config.Region == "" { - return nil, data_source_google_compute_address_fmt.Errorf("The default region for the provider must be set when using the `{name}` id format.") - } - - return &computeAddressId{ - Project: config.Project, - Region: config.Region, - Name: parts[0], - }, nil - } - - return nil, data_source_google_compute_address_fmt.Errorf("Invalid compute address id. Expecting resource link, `{project}/{region}/{name}`, `{region}/{name}` or `{name}` format.") -} - -func dataSourceGoogleComputeBackendBucket() *data_source_google_compute_backend_bucket_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeBackendBucket().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_compute_backend_bucket_schema.Resource{ - Read: dataSourceComputeBackendBucketRead, - Schema: dsSchema, - } -} - -func dataSourceComputeBackendBucketRead(d *data_source_google_compute_backend_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - backendBucketName := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(data_source_google_compute_backend_bucket_fmt.Sprintf("projects/%s/global/backendBuckets/%s", project, backendBucketName)) - - return resourceComputeBackendBucketRead(d, meta) -} - -func dataSourceGoogleComputeBackendService() *data_source_google_compute_backend_service_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeBackendService().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_compute_backend_service_schema.Resource{ - Read: dataSourceComputeBackendServiceRead, - Schema: dsSchema, - } -} - -func dataSourceComputeBackendServiceRead(d *data_source_google_compute_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - serviceName := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(data_source_google_compute_backend_service_fmt.Sprintf("projects/%s/global/backendServices/%s", project, serviceName)) - - return resourceComputeBackendServiceRead(d, meta) -} - -func dataSourceGoogleComputeDefaultServiceAccount() *data_source_google_compute_default_service_account_schema.Resource { - return &data_source_google_compute_default_service_account_schema.Resource{ - Read: dataSourceGoogleComputeDefaultServiceAccountRead, - Schema: map[string]*data_source_google_compute_default_service_account_schema.Schema{ - "project": { - Type: data_source_google_compute_default_service_account_schema.TypeString, - Optional: true, - Computed: true, - }, - "email": { - Type: data_source_google_compute_default_service_account_schema.TypeString, - Computed: true, - }, - "unique_id": { - Type: data_source_google_compute_default_service_account_schema.TypeString, - Computed: true, - }, - "name": { - Type: data_source_google_compute_default_service_account_schema.TypeString, - Computed: true, - }, - "display_name": { - Type: data_source_google_compute_default_service_account_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleComputeDefaultServiceAccountRead(d *data_source_google_compute_default_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - projectCompResource, err := config.NewComputeClient(userAgent).Projects.Get(project).Do() - if err != nil { - return handleNotFoundError(err, d, "GCE default service account") - } - - serviceAccountName, err := serviceAccountFQN(projectCompResource.DefaultServiceAccount, d, config) - if err != nil { - return err - } - - sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(serviceAccountName).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_default_service_account_fmt.Sprintf("Service Account %q", serviceAccountName)) - } - - d.SetId(sa.Name) - if err := d.Set("email", sa.Email); err != nil { - return data_source_google_compute_default_service_account_fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("unique_id", sa.UniqueId); err != nil { - return data_source_google_compute_default_service_account_fmt.Errorf("Error setting unique_id: %s", err) - } - if err := d.Set("project", sa.ProjectId); err != nil { - return data_source_google_compute_default_service_account_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("name", sa.Name); err != nil { - return data_source_google_compute_default_service_account_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", sa.DisplayName); err != nil { - return data_source_google_compute_default_service_account_fmt.Errorf("Error setting display_name: %s", err) - } - - return nil -} - -func dataSourceGoogleComputeForwardingRule() *data_source_google_compute_forwarding_rule_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeForwardingRule().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &data_source_google_compute_forwarding_rule_schema.Resource{ - Read: dataSourceGoogleComputeForwardingRuleRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeForwardingRuleRead(d *data_source_google_compute_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(data_source_google_compute_forwarding_rule_fmt.Sprintf("projects/%s/regions/%s/forwardingRules/%s", project, region, name)) - - return resourceComputeForwardingRuleRead(d, meta) -} - -func dataSourceGoogleComputeGlobalAddress() *data_source_google_compute_global_address_schema.Resource { - return &data_source_google_compute_global_address_schema.Resource{ - Read: dataSourceGoogleComputeGlobalAddressRead, - - Schema: map[string]*data_source_google_compute_global_address_schema.Schema{ - "name": { - Type: data_source_google_compute_global_address_schema.TypeString, - Required: true, - }, - - "address": { - Type: data_source_google_compute_global_address_schema.TypeString, - Computed: true, - }, - - "status": { - Type: data_source_google_compute_global_address_schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: data_source_google_compute_global_address_schema.TypeString, - Computed: true, - }, - - "project": { - Type: data_source_google_compute_global_address_schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleComputeGlobalAddressRead(d *data_source_google_compute_global_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - address, err := config.NewComputeClient(userAgent).GlobalAddresses.Get(project, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_global_address_fmt.Sprintf("Global Address Not Found : %s", name)) - } - - if err := d.Set("address", address.Address); err != nil { - return data_source_google_compute_global_address_fmt.Errorf("Error setting address: %s", err) - } - if err := d.Set("status", address.Status); err != nil { - return data_source_google_compute_global_address_fmt.Errorf("Error setting status: %s", err) - } - if err := d.Set("self_link", address.SelfLink); err != nil { - return data_source_google_compute_global_address_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_global_address_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_google_compute_global_address_fmt.Sprintf("projects/%s/global/addresses/%s", project, name)) - return nil -} - -func dataSourceGoogleComputeHaVpnGateway() *data_source_google_compute_ha_vpn_gateway_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeHaVpnGateway().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &data_source_google_compute_ha_vpn_gateway_schema.Resource{ - Read: dataSourceGoogleComputeHaVpnGatewayRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeHaVpnGatewayRead(d *data_source_google_compute_ha_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(data_source_google_compute_ha_vpn_gateway_fmt.Sprintf("projects/%s/regions/%s/vpnGateways/%s", project, region, name)) - - return resourceComputeHaVpnGatewayRead(d, meta) -} - -func dataSourceGoogleComputeImage() *data_source_google_compute_image_schema.Resource { - return &data_source_google_compute_image_schema.Resource{ - Read: dataSourceGoogleComputeImageRead, - - Schema: map[string]*data_source_google_compute_image_schema.Schema{ - "name": { - Type: data_source_google_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ExactlyOneOf: []string{"name", "family", "filter"}, - }, - "family": { - Type: data_source_google_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ExactlyOneOf: []string{"name", "family", "filter"}, - }, - "filter": { - Type: data_source_google_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - ExactlyOneOf: []string{"name", "family", "filter"}, - }, - "archive_size_bytes": { - Type: data_source_google_compute_image_schema.TypeInt, - Computed: true, - }, - "creation_timestamp": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "description": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "disk_size_gb": { - Type: data_source_google_compute_image_schema.TypeInt, - Computed: true, - }, - "image_id": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "image_encryption_key_sha256": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "label_fingerprint": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "labels": { - Type: data_source_google_compute_image_schema.TypeMap, - Elem: &data_source_google_compute_image_schema.Schema{ - Type: data_source_google_compute_image_schema.TypeString, - }, - Computed: true, - }, - "licenses": { - Type: data_source_google_compute_image_schema.TypeList, - Elem: &data_source_google_compute_image_schema.Schema{ - Type: data_source_google_compute_image_schema.TypeString, - }, - Computed: true, - }, - "source_disk": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "source_disk_encryption_key_sha256": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "source_disk_id": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "source_image_id": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "status": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "self_link": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - }, - "project": { - Type: data_source_google_compute_image_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - }, - } -} - -func dataSourceGoogleComputeImageRead(d *data_source_google_compute_image_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - var image *data_source_google_compute_image_compute.Image - if v, ok := d.GetOk("name"); ok { - data_source_google_compute_image_log.Printf("[DEBUG] Fetching image %s", v.(string)) - image, err = config.NewComputeClient(userAgent).Images.Get(project, v.(string)).Do() - data_source_google_compute_image_log.Printf("[DEBUG] Fetched image %s", v.(string)) - } else if v, ok := d.GetOk("family"); ok { - data_source_google_compute_image_log.Printf("[DEBUG] Fetching latest non-deprecated image from family %s", v.(string)) - image, err = config.NewComputeClient(userAgent).Images.GetFromFamily(project, v.(string)).Do() - data_source_google_compute_image_log.Printf("[DEBUG] Fetched latest non-deprecated image from family %s", v.(string)) - } else if v, ok := d.GetOk("filter"); ok { - images, err := config.NewComputeClient(userAgent).Images.List(project).Filter(v.(string)).Do() - if err != nil { - return data_source_google_compute_image_fmt.Errorf("error retrieving list of images: %s", err) - } - - if len(images.Items) == 1 { - for _, im := range images.Items { - image = im - } - } else { - return data_source_google_compute_image_fmt.Errorf("your filter has returned more than one image or no image. Please refine your filter to return exactly one image") - } - } else { - return data_source_google_compute_image_fmt.Errorf("one of name, family or filters must be set") - } - - if err != nil { - return data_source_google_compute_image_fmt.Errorf("error retrieving image information: %s", err) - } - - var ieks256, sdeks256 string - - if image.SourceDiskEncryptionKey != nil { - sdeks256 = image.SourceDiskEncryptionKey.Sha256 - } - - if image.ImageEncryptionKey != nil { - ieks256 = image.ImageEncryptionKey.Sha256 - } - - if err := d.Set("project", project); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("name", image.Name); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("family", image.Family); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting family: %s", err) - } - if err := d.Set("archive_size_bytes", image.ArchiveSizeBytes); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting archive_size_bytes: %s", err) - } - if err := d.Set("creation_timestamp", image.CreationTimestamp); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting creation_timestamp: %s", err) - } - if err := d.Set("description", image.Description); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("disk_size_gb", image.DiskSizeGb); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting disk_size_gb: %s", err) - } - if err := d.Set("image_id", data_source_google_compute_image_strconv.FormatUint(image.Id, 10)); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting image_id: %s", err) - } - if err := d.Set("image_encryption_key_sha256", ieks256); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting image_encryption_key_sha256: %s", err) - } - if err := d.Set("label_fingerprint", image.LabelFingerprint); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting label_fingerprint: %s", err) - } - if err := d.Set("labels", image.Labels); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("licenses", image.Licenses); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting licenses: %s", err) - } - if err := d.Set("self_link", image.SelfLink); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("source_disk", image.SourceDisk); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting source_disk: %s", err) - } - if err := d.Set("source_disk_encryption_key_sha256", sdeks256); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting source_disk_encryption_key_sha256: %s", err) - } - if err := d.Set("source_disk_id", image.SourceDiskId); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting source_disk_id: %s", err) - } - if err := d.Set("source_image_id", image.SourceImageId); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting source_image_id: %s", err) - } - if err := d.Set("status", image.Status); err != nil { - return data_source_google_compute_image_fmt.Errorf("Error setting status: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") - if err != nil { - return data_source_google_compute_image_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return nil -} - -func dataSourceGoogleComputeInstance() *data_source_google_compute_instance_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeInstance().Schema) - - addOptionalFieldsToSchema(dsSchema, "name", "self_link", "project", "zone") - - return &data_source_google_compute_instance_schema.Resource{ - Read: dataSourceGoogleComputeInstanceRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeInstanceRead(d *data_source_google_compute_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, zone, name, err := GetZonalResourcePropertiesFromSelfLinkOrSchema(d, config) - if err != nil { - return err - } - - instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_instance_fmt.Sprintf("Instance %s", name)) - } - - md := flattenMetadataBeta(instance.Metadata) - if err = d.Set("metadata", md); err != nil { - return data_source_google_compute_instance_fmt.Errorf("error setting metadata: %s", err) - } - - if err := d.Set("can_ip_forward", instance.CanIpForward); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting can_ip_forward: %s", err) - } - if err := d.Set("machine_type", GetResourceNameFromSelfLink(instance.MachineType)); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting machine_type: %s", err) - } - - networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) - if err != nil { - return err - } - if err := d.Set("network_interface", networkInterfaces); err != nil { - return err - } - - sshIP := externalIP - if sshIP == "" { - sshIP = internalIP - } - - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": sshIP, - }) - - if instance.Metadata != nil { - if err := d.Set("metadata_fingerprint", instance.Metadata.Fingerprint); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting metadata_fingerprint: %s", err) - } - } - - if instance.Tags != nil { - if err := d.Set("tags_fingerprint", instance.Tags.Fingerprint); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting tags_fingerprint: %s", err) - } - if err := d.Set("tags", convertStringArrToInterface(instance.Tags.Items)); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting tags: %s", err) - } - } - - if err := d.Set("labels", instance.Labels); err != nil { - return err - } - - if instance.LabelFingerprint != "" { - if err := d.Set("label_fingerprint", instance.LabelFingerprint); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting label_fingerprint: %s", err) - } - } - - attachedDisks := []map[string]interface{}{} - scratchDisks := []map[string]interface{}{} - for _, disk := range instance.Disks { - if disk.Boot { - err = d.Set("boot_disk", flattenBootDisk(d, disk, config)) - if err != nil { - return err - } - } else if disk.Type == "SCRATCH" { - scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) - } else { - di := map[string]interface{}{ - "source": ConvertSelfLinkToV1(disk.Source), - "device_name": disk.DeviceName, - "mode": disk.Mode, - } - if key := disk.DiskEncryptionKey; key != nil { - di["disk_encryption_key_sha256"] = key.Sha256 - di["kms_key_self_link"] = key.KmsKeyName - } - attachedDisks = append(attachedDisks, di) - } - } - - ads := []map[string]interface{}{} - for _, d := range attachedDisks { - if d != nil { - ads = append(ads, d) - } - } - - err = d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)) - if err != nil { - return err - } - - err = d.Set("scheduling", flattenScheduling(instance.Scheduling)) - if err != nil { - return err - } - - err = d.Set("guest_accelerator", flattenGuestAccelerators(instance.GuestAccelerators)) - if err != nil { - return err - } - - err = d.Set("scratch_disk", scratchDisks) - if err != nil { - return err - } - - err = d.Set("shielded_instance_config", flattenShieldedVmConfig(instance.ShieldedInstanceConfig)) - if err != nil { - return err - } - - err = d.Set("enable_display", flattenEnableDisplay(instance.DisplayDevice)) - if err != nil { - return err - } - - if err := d.Set("attached_disk", ads); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting attached_disk: %s", err) - } - if err := d.Set("cpu_platform", instance.CpuPlatform); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting cpu_platform: %s", err) - } - if err := d.Set("min_cpu_platform", instance.MinCpuPlatform); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting min_cpu_platform: %s", err) - } - if err := d.Set("deletion_protection", instance.DeletionProtection); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting deletion_protection: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("instance_id", data_source_google_compute_instance_fmt.Sprintf("%d", instance.Id)); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting instance_id: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("current_status", instance.Status); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting current_status: %s", err) - } - if err := d.Set("name", instance.Name); err != nil { - return data_source_google_compute_instance_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(data_source_google_compute_instance_fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, instance.Zone, instance.Name)) - return nil -} - -func dataSourceGoogleComputeInstanceGroup() *data_source_google_compute_instance_group_schema.Resource { - return &data_source_google_compute_instance_group_schema.Resource{ - Read: dataSourceComputeInstanceGroupRead, - Schema: map[string]*data_source_google_compute_instance_group_schema.Schema{ - "name": { - Type: data_source_google_compute_instance_group_schema.TypeString, - Optional: true, - ConflictsWith: []string{"self_link"}, - }, - - "self_link": { - Type: data_source_google_compute_instance_group_schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"name", "zone"}, - }, - - "zone": { - Type: data_source_google_compute_instance_group_schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"self_link"}, - }, - - "project": { - Type: data_source_google_compute_instance_group_schema.TypeString, - Optional: true, - Computed: true, - }, - - "description": { - Type: data_source_google_compute_instance_group_schema.TypeString, - Computed: true, - }, - - "instances": { - Type: data_source_google_compute_instance_group_schema.TypeSet, - Computed: true, - Elem: &data_source_google_compute_instance_group_schema.Schema{Type: data_source_google_compute_instance_group_schema.TypeString}, - Set: data_source_google_compute_instance_group_schema.HashString, - }, - - "named_port": { - Type: data_source_google_compute_instance_group_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_instance_group_schema.Resource{ - Schema: map[string]*data_source_google_compute_instance_group_schema.Schema{ - "name": { - Type: data_source_google_compute_instance_group_schema.TypeString, - Computed: true, - }, - - "port": { - Type: data_source_google_compute_instance_group_schema.TypeInt, - Computed: true, - }, - }, - }, - }, - - "network": { - Type: data_source_google_compute_instance_group_schema.TypeString, - Computed: true, - }, - - "size": { - Type: data_source_google_compute_instance_group_schema.TypeInt, - Computed: true, - }, - }, - } -} - -func dataSourceComputeInstanceGroupRead(d *data_source_google_compute_instance_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if name, ok := d.GetOk("name"); ok { - zone, err := getZone(d, config) - if err != nil { - return err - } - project, err := getProject(d, config) - if err != nil { - return err - } - d.SetId(data_source_google_compute_instance_group_fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name.(string))) - } else if selfLink, ok := d.GetOk("self_link"); ok { - parsed, err := ParseInstanceGroupFieldValue(selfLink.(string), d, config) - if err != nil { - return err - } - if err := d.Set("name", parsed.Name); err != nil { - return data_source_google_compute_instance_group_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("zone", parsed.Zone); err != nil { - return data_source_google_compute_instance_group_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", parsed.Project); err != nil { - return data_source_google_compute_instance_group_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_google_compute_instance_group_fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", parsed.Project, parsed.Zone, parsed.Name)) - } else { - return data_source_google_compute_instance_group_errors.New("Must provide either `self_link` or `zone/name`") - } - - return resourceComputeInstanceGroupRead(d, meta) -} - -func dataSourceGoogleComputeInstanceSerialPort() *data_source_google_compute_instance_serial_port_schema.Resource { - return &data_source_google_compute_instance_serial_port_schema.Resource{ - Read: computeInstanceSerialPortRead, - Schema: map[string]*data_source_google_compute_instance_serial_port_schema.Schema{ - "port": { - Type: data_source_google_compute_instance_serial_port_schema.TypeInt, - Required: true, - }, - "instance": { - Type: data_source_google_compute_instance_serial_port_schema.TypeString, - Required: true, - }, - "zone": { - Type: data_source_google_compute_instance_serial_port_schema.TypeString, - Optional: true, - Computed: true, - }, - "project": { - Type: data_source_google_compute_instance_serial_port_schema.TypeString, - Optional: true, - Computed: true, - }, - "contents": { - Type: data_source_google_compute_instance_serial_port_schema.TypeString, - Computed: true, - }, - }, - } -} - -func computeInstanceSerialPortRead(d *data_source_google_compute_instance_serial_port_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_instance_serial_port_fmt.Errorf("Error setting project: %s", err) - } - zone, err := getZone(d, config) - if err != nil { - return err - } - if err := d.Set("zone", zone); err != nil { - return data_source_google_compute_instance_serial_port_fmt.Errorf("Error setting zone: %s", err) - } - - port := int64(d.Get("port").(int)) - output, err := config.NewComputeClient(userAgent).Instances.GetSerialPortOutput(project, zone, d.Get("instance").(string)).Port(port).Do() - if err != nil { - return err - } - - if err := d.Set("contents", output.Contents); err != nil { - return data_source_google_compute_instance_serial_port_fmt.Errorf("Error setting contents: %s", err) - } - d.SetId(output.SelfLink) - return nil -} - -func dataSourceGoogleComputeInstanceTemplate() *data_source_google_compute_instance_template_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeInstanceTemplate().Schema) - - dsSchema["filter"] = &data_source_google_compute_instance_template_schema.Schema{ - Type: data_source_google_compute_instance_template_schema.TypeString, - Optional: true, - } - dsSchema["most_recent"] = &data_source_google_compute_instance_template_schema.Schema{ - Type: data_source_google_compute_instance_template_schema.TypeBool, - Optional: true, - } - - addRequiredFieldsToSchema(dsSchema, "project") - - addOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent") - - dsSchema["name"].ExactlyOneOf = []string{"name", "filter"} - dsSchema["filter"].ExactlyOneOf = []string{"name", "filter"} - - return &data_source_google_compute_instance_template_schema.Resource{ - Read: datasourceComputeInstanceTemplateRead, - Schema: dsSchema, - } -} - -func datasourceComputeInstanceTemplateRead(d *data_source_google_compute_instance_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - if v, ok := d.GetOk("name"); ok { - return retrieveInstance(d, meta, project, v.(string)) - } - if v, ok := d.GetOk("filter"); ok { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - templates, err := config.NewComputeClient(userAgent).InstanceTemplates.List(project).Filter(v.(string)).Do() - if err != nil { - return data_source_google_compute_instance_template_fmt.Errorf("error retrieving list of instance templates: %s", err) - } - - mostRecent := d.Get("most_recent").(bool) - if mostRecent { - data_source_google_compute_instance_template_sort.Sort(ByCreationTimestamp(templates.Items)) - } - - count := len(templates.Items) - if count == 1 || count > 1 && mostRecent { - return retrieveInstance(d, meta, project, templates.Items[0].Name) - } - - return data_source_google_compute_instance_template_fmt.Errorf("your filter has returned %d instance template(s). Please refine your filter or set most_recent to return exactly one instance template", len(templates.Items)) - } - - return data_source_google_compute_instance_template_fmt.Errorf("one of name or filters must be set") -} - -func retrieveInstance(d *data_source_google_compute_instance_template_schema.ResourceData, meta interface{}, project, name string) error { - d.SetId("projects/" + project + "/global/instanceTemplates/" + name) - - return resourceComputeInstanceTemplateRead(d, meta) -} - -type ByCreationTimestamp []*data_source_google_compute_instance_template_compute.InstanceTemplate - -func (a ByCreationTimestamp) Len() int { return len(a) } - -func (a ByCreationTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (a ByCreationTimestamp) Less(i, j int) bool { - return a[i].CreationTimestamp > a[j].CreationTimestamp -} - -func dataSourceGoogleComputeNetwork() *data_source_google_compute_network_schema.Resource { - return &data_source_google_compute_network_schema.Resource{ - Read: dataSourceGoogleComputeNetworkRead, - - Schema: map[string]*data_source_google_compute_network_schema.Schema{ - "name": { - Type: data_source_google_compute_network_schema.TypeString, - Required: true, - }, - - "description": { - Type: data_source_google_compute_network_schema.TypeString, - Computed: true, - }, - - "gateway_ipv4": { - Type: data_source_google_compute_network_schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: data_source_google_compute_network_schema.TypeString, - Computed: true, - }, - - "project": { - Type: data_source_google_compute_network_schema.TypeString, - Optional: true, - }, - - "subnetworks_self_links": { - Type: data_source_google_compute_network_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_network_schema.Schema{Type: data_source_google_compute_network_schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleComputeNetworkRead(d *data_source_google_compute_network_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - network, err := config.NewComputeClient(userAgent).Networks.Get(project, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_network_fmt.Sprintf("Network Not Found : %s", name)) - } - if err := d.Set("gateway_ipv4", network.GatewayIPv4); err != nil { - return data_source_google_compute_network_fmt.Errorf("Error setting gateway_ipv4: %s", err) - } - if err := d.Set("self_link", network.SelfLink); err != nil { - return data_source_google_compute_network_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("description", network.Description); err != nil { - return data_source_google_compute_network_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("subnetworks_self_links", network.Subnetworks); err != nil { - return data_source_google_compute_network_fmt.Errorf("Error setting subnetworks_self_links: %s", err) - } - d.SetId(data_source_google_compute_network_fmt.Sprintf("projects/%s/global/networks/%s", project, network.Name)) - return nil -} - -func dataSourceGoogleComputeNodeTypes() *data_source_google_compute_node_types_schema.Resource { - return &data_source_google_compute_node_types_schema.Resource{ - Read: dataSourceGoogleComputeNodeTypesRead, - Schema: map[string]*data_source_google_compute_node_types_schema.Schema{ - "project": { - Type: data_source_google_compute_node_types_schema.TypeString, - Optional: true, - Computed: true, - }, - "zone": { - Type: data_source_google_compute_node_types_schema.TypeString, - Optional: true, - Computed: true, - }, - "names": { - Type: data_source_google_compute_node_types_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_node_types_schema.Schema{Type: data_source_google_compute_node_types_schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleComputeNodeTypesRead(d *data_source_google_compute_node_types_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return data_source_google_compute_node_types_fmt.Errorf("Please specify zone to get appropriate node types for zone. Unable to get zone: %s", err) - } - - resp, err := config.NewComputeClient(userAgent).NodeTypes.List(project, zone).Do() - if err != nil { - return err - } - nodeTypes := flattenComputeNodeTypes(resp.Items) - data_source_google_compute_node_types_log.Printf("[DEBUG] Received Google Compute Regions: %q", nodeTypes) - - if err := d.Set("names", nodeTypes); err != nil { - return data_source_google_compute_node_types_fmt.Errorf("Error setting names: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_node_types_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", zone); err != nil { - return data_source_google_compute_node_types_fmt.Errorf("Error setting zone: %s", err) - } - d.SetId(data_source_google_compute_node_types_fmt.Sprintf("projects/%s/zones/%s", project, zone)) - - return nil -} - -func flattenComputeNodeTypes(nodeTypes []*data_source_google_compute_node_types_compute.NodeType) []string { - result := make([]string, len(nodeTypes)) - for i, nodeType := range nodeTypes { - result[i] = nodeType.Name - } - data_source_google_compute_node_types_sort.Strings(result) - return result -} - -func dataSourceGoogleComputeRegionInstanceGroup() *data_source_google_compute_region_instance_group_schema.Resource { - return &data_source_google_compute_region_instance_group_schema.Resource{ - Read: dataSourceComputeRegionInstanceGroupRead, - Schema: map[string]*data_source_google_compute_region_instance_group_schema.Schema{ - "name": { - Type: data_source_google_compute_region_instance_group_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "instances": { - Type: data_source_google_compute_region_instance_group_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_region_instance_group_schema.Resource{ - Schema: map[string]*data_source_google_compute_region_instance_group_schema.Schema{ - "instance": { - Type: data_source_google_compute_region_instance_group_schema.TypeString, - Required: true, - }, - - "status": { - Type: data_source_google_compute_region_instance_group_schema.TypeString, - Required: true, - }, - - "named_ports": { - Type: data_source_google_compute_region_instance_group_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_region_instance_group_schema.Resource{ - Schema: map[string]*data_source_google_compute_region_instance_group_schema.Schema{ - "name": { - Type: data_source_google_compute_region_instance_group_schema.TypeString, - Required: true, - }, - "port": { - Type: data_source_google_compute_region_instance_group_schema.TypeInt, - Required: true, - }, - }, - }, - }, - }, - }, - }, - - "region": { - Type: data_source_google_compute_region_instance_group_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "project": { - Type: data_source_google_compute_region_instance_group_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "self_link": { - Type: data_source_google_compute_region_instance_group_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, - - "size": { - Type: data_source_google_compute_region_instance_group_schema.TypeInt, - Computed: true, - }, - }, - } -} - -func dataSourceComputeRegionInstanceGroupRead(d *data_source_google_compute_region_instance_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) - if err != nil { - return err - } - - instanceGroup, err := config.NewComputeClient(userAgent).RegionInstanceGroups.Get( - project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_region_instance_group_fmt.Sprintf("Region Instance Group %q", name)) - } - - members, err := config.NewComputeClient(userAgent).RegionInstanceGroups.ListInstances( - project, region, name, &data_source_google_compute_region_instance_group_compute.RegionInstanceGroupsListInstancesRequest{ - InstanceState: "ALL", - }).Do() - if err != nil { - if gerr, ok := err.(*data_source_google_compute_region_instance_group_googleapi.Error); ok && gerr.Code == 404 { - - if err := d.Set("instances", nil); err != nil { - return data_source_google_compute_region_instance_group_fmt.Errorf("Error setting instances: %s", err) - } - } else { - return data_source_google_compute_region_instance_group_fmt.Errorf("Error reading RegionInstanceGroup Members: %s", err) - } - } else { - if err := d.Set("instances", flattenInstancesWithNamedPorts(members.Items)); err != nil { - return data_source_google_compute_region_instance_group_fmt.Errorf("Error setting instances: %s", err) - } - } - d.SetId(data_source_google_compute_region_instance_group_fmt.Sprintf("projects/%s/regions/%s/instanceGroups/%s", project, region, name)) - if err := d.Set("self_link", instanceGroup.SelfLink); err != nil { - return data_source_google_compute_region_instance_group_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("name", name); err != nil { - return data_source_google_compute_region_instance_group_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_region_instance_group_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return data_source_google_compute_region_instance_group_fmt.Errorf("Error setting region: %s", err) - } - return nil -} - -func flattenInstancesWithNamedPorts(insts []*data_source_google_compute_region_instance_group_compute.InstanceWithNamedPorts) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(insts)) - data_source_google_compute_region_instance_group_log.Printf("There were %d instances.\n", len(insts)) - for _, inst := range insts { - instMap := make(map[string]interface{}) - instMap["instance"] = inst.Instance - instMap["named_ports"] = flattenNamedPorts(inst.NamedPorts) - instMap["status"] = inst.Status - result = append(result, instMap) - } - return result -} - -func flattenNamedPorts(namedPorts []*data_source_google_compute_region_instance_group_compute.NamedPort) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(namedPorts)) - for _, namedPort := range namedPorts { - namedPortMap := make(map[string]interface{}) - namedPortMap["name"] = namedPort.Name - namedPortMap["port"] = namedPort.Port - result = append(result, namedPortMap) - } - return result -} - -func dataSourceGoogleRegionComputeSslCertificate() *data_source_google_compute_region_ssl_certificate_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeRegionSslCertificate().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "region") - - return &data_source_google_compute_region_ssl_certificate_schema.Resource{ - Read: dataSourceComputeRegionSslCertificateRead, - Schema: dsSchema, - } -} - -func dataSourceComputeRegionSslCertificateRead(d *data_source_google_compute_region_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) - if err != nil { - return err - } - - d.SetId(data_source_google_compute_region_ssl_certificate_fmt.Sprintf("projects/%s/regions/%s/sslCertificates/%s", project, region, name)) - - return resourceComputeRegionSslCertificateRead(d, meta) -} - -func dataSourceGoogleComputeRegions() *data_source_google_compute_regions_schema.Resource { - return &data_source_google_compute_regions_schema.Resource{ - Read: dataSourceGoogleComputeRegionsRead, - Schema: map[string]*data_source_google_compute_regions_schema.Schema{ - "project": { - Type: data_source_google_compute_regions_schema.TypeString, - Optional: true, - Computed: true, - }, - "names": { - Type: data_source_google_compute_regions_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_regions_schema.Schema{Type: data_source_google_compute_regions_schema.TypeString}, - }, - "status": { - Type: data_source_google_compute_regions_schema.TypeString, - Optional: true, - ValidateFunc: data_source_google_compute_regions_validation.StringInSlice([]string{"UP", "DOWN"}, false), - }, - }, - } -} - -func dataSourceGoogleComputeRegionsRead(d *data_source_google_compute_regions_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - filter := "" - if s, ok := d.GetOk("status"); ok { - filter = data_source_google_compute_regions_fmt.Sprintf(" (status eq %s)", s) - } - - call := config.NewComputeClient(userAgent).Regions.List(project).Filter(filter) - - resp, err := call.Do() - if err != nil { - return err - } - - regions := flattenRegions(resp.Items) - data_source_google_compute_regions_log.Printf("[DEBUG] Received Google Compute Regions: %q", regions) - - if err := d.Set("names", regions); err != nil { - return data_source_google_compute_regions_fmt.Errorf("Error setting names: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_regions_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_google_compute_regions_fmt.Sprintf("projects/%s", project)) - - return nil -} - -func flattenRegions(regions []*data_source_google_compute_regions_compute.Region) []string { - result := make([]string, len(regions)) - for i, region := range regions { - result[i] = region.Name - } - data_source_google_compute_regions_sort.Strings(result) - return result -} - -func dataSourceGoogleComputeResourcePolicy() *data_source_google_compute_resource_policy_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeResourcePolicy().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "region") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_compute_resource_policy_schema.Resource{ - Read: dataSourceGoogleComputeResourcePolicyRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeResourcePolicyRead(d *data_source_google_compute_resource_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - d.SetId(data_source_google_compute_resource_policy_fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)) - - return resourceComputeResourcePolicyRead(d, meta) -} - -func dataSourceGoogleComputeRouter() *data_source_google_compute_router_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeRouter().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addRequiredFieldsToSchema(dsSchema, "network") - addOptionalFieldsToSchema(dsSchema, "region") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_compute_router_schema.Resource{ - Read: dataSourceComputeRouterRead, - Schema: dsSchema, - } -} - -func dataSourceComputeRouterRead(d *data_source_google_compute_router_schema.ResourceData, meta interface{}) error { - routerName := d.Get("name").(string) - - d.SetId(routerName) - return resourceComputeRouterRead(d, meta) -} - -func dataSourceGoogleComputeRouterStatus() *data_source_google_compute_router_status_schema.Resource { - routeElemSchema := datasourceSchemaFromResourceSchema(resourceComputeRoute().Schema) - - return &data_source_google_compute_router_status_schema.Resource{ - Read: dataSourceComputeRouterStatusRead, - Schema: map[string]*data_source_google_compute_router_status_schema.Schema{ - "name": { - Type: data_source_google_compute_router_status_schema.TypeString, - Description: "Name of the router to query.", - Required: true, - Computed: false, - }, - "project": { - Type: data_source_google_compute_router_status_schema.TypeString, - Description: "Project ID of the target router.", - Optional: true, - Computed: false, - }, - "region": { - Type: data_source_google_compute_router_status_schema.TypeString, - Description: "Region of the target router.", - Optional: true, - Computed: true, - }, - "network": { - Type: data_source_google_compute_router_status_schema.TypeString, - Description: "URI of the network to which this router belongs.", - Computed: true, - }, - "best_routes": { - Type: data_source_google_compute_router_status_schema.TypeList, - Description: "Best routes for this router's network.", - Elem: &data_source_google_compute_router_status_schema.Resource{ - Schema: routeElemSchema, - }, - Computed: true, - }, - "best_routes_for_router": { - Type: data_source_google_compute_router_status_schema.TypeList, - Description: "Best routes learned by this router.", - Elem: &data_source_google_compute_router_status_schema.Resource{ - Schema: routeElemSchema, - }, - Computed: true, - }, - }, - } -} - -func dataSourceComputeRouterStatusRead(d *data_source_google_compute_router_status_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - var name string - if n, ok := d.GetOk("name"); ok { - name = n.(string) - } - - resp, err := config.NewComputeClient(userAgent).Routers.GetRouterStatus(project, region, name).Do() - if err != nil { - return err - } - - status := resp.Result - - if err := d.Set("network", status.Network); err != nil { - return data_source_google_compute_router_status_fmt.Errorf("Error setting network: %s", err) - } - - if err := d.Set("best_routes", flattenRoutes(status.BestRoutes)); err != nil { - return data_source_google_compute_router_status_fmt.Errorf("Error setting best_routes: %s", err) - } - - if err := d.Set("best_routes_for_router", flattenRoutes(status.BestRoutesForRouter)); err != nil { - return data_source_google_compute_router_status_fmt.Errorf("Error setting best_routes_for_router: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return data_source_google_compute_router_status_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return nil -} - -func flattenRoutes(routes []*data_source_google_compute_router_status_compute.Route) []map[string]interface{} { - results := make([]map[string]interface{}, len(routes)) - - for i, route := range routes { - results[i] = map[string]interface{}{ - "dest_range": route.DestRange, - "name": route.Name, - "network": route.Network, - "description": route.Description, - "next_hop_gateway": route.NextHopGateway, - "next_hop_ilb": route.NextHopIlb, - "next_hop_ip": route.NextHopIp, - "next_hop_vpn_tunnel": route.NextHopVpnTunnel, - "priority": route.Priority, - "tags": route.Tags, - "next_hop_network": route.NextHopNetwork, - } - } - - return results -} - -func dataSourceGoogleComputeSslCertificate() *data_source_google_compute_ssl_certificate_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeSslCertificate().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_compute_ssl_certificate_schema.Resource{ - Read: dataSourceComputeSslCertificateRead, - Schema: dsSchema, - } -} - -func dataSourceComputeSslCertificateRead(d *data_source_google_compute_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - certificateName := d.Get("name").(string) - - d.SetId(data_source_google_compute_ssl_certificate_fmt.Sprintf("projects/%s/global/sslCertificates/%s", project, certificateName)) - - return resourceComputeSslCertificateRead(d, meta) -} - -func dataSourceGoogleComputeSslPolicy() *data_source_google_compute_ssl_policy_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeSslPolicy().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_compute_ssl_policy_schema.Resource{ - Read: datasourceComputeSslPolicyRead, - Schema: dsSchema, - } -} - -func datasourceComputeSslPolicyRead(d *data_source_google_compute_ssl_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - policyName := d.Get("name").(string) - - d.SetId(data_source_google_compute_ssl_policy_fmt.Sprintf("projects/%s/global/sslPolicies/%s", project, policyName)) - - return resourceComputeSslPolicyRead(d, meta) -} - -func dataSourceGoogleComputeSubnetwork() *data_source_google_compute_subnetwork_schema.Resource { - return &data_source_google_compute_subnetwork_schema.Resource{ - Read: dataSourceGoogleComputeSubnetworkRead, - - Schema: map[string]*data_source_google_compute_subnetwork_schema.Schema{ - "name": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Optional: true, - }, - "self_link": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Optional: true, - Computed: true, - }, - "description": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - }, - "ip_cidr_range": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - }, - "private_ip_google_access": { - Type: data_source_google_compute_subnetwork_schema.TypeBool, - Computed: true, - }, - "secondary_ip_range": { - Type: data_source_google_compute_subnetwork_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_subnetwork_schema.Resource{ - Schema: map[string]*data_source_google_compute_subnetwork_schema.Schema{ - "range_name": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - }, - "ip_cidr_range": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - }, - }, - }, - }, - "network": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - }, - "gateway_address": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - }, - "region": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - }, - - "project": { - Type: data_source_google_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleComputeSubnetworkRead(d *data_source_google_compute_subnetwork_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) - if err != nil { - return err - } - - subnetwork, err := config.NewComputeClient(userAgent).Subnetworks.Get(project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_subnetwork_fmt.Sprintf("Subnetwork Not Found : %s", name)) - } - - if err := d.Set("ip_cidr_range", subnetwork.IpCidrRange); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting ip_cidr_range: %s", err) - } - if err := d.Set("private_ip_google_access", subnetwork.PrivateIpGoogleAccess); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting private_ip_google_access: %s", err) - } - if err := d.Set("self_link", subnetwork.SelfLink); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("description", subnetwork.Description); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("gateway_address", subnetwork.GatewayAddress); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting gateway_address: %s", err) - } - if err := d.Set("network", subnetwork.Network); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("name", name); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("secondary_ip_range", flattenSecondaryRanges(subnetwork.SecondaryIpRanges)); err != nil { - return data_source_google_compute_subnetwork_fmt.Errorf("Error setting secondary_ip_range: %s", err) - } - - d.SetId(data_source_google_compute_subnetwork_fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", project, region, name)) - return nil -} - -func flattenSecondaryRanges(secondaryRanges []*data_source_google_compute_subnetwork_compute.SubnetworkSecondaryRange) []map[string]interface{} { - secondaryRangesSchema := make([]map[string]interface{}, 0, len(secondaryRanges)) - for _, secondaryRange := range secondaryRanges { - data := map[string]interface{}{ - "range_name": secondaryRange.RangeName, - "ip_cidr_range": secondaryRange.IpCidrRange, - } - - secondaryRangesSchema = append(secondaryRangesSchema, data) - } - return secondaryRangesSchema -} - -func dataSourceGoogleComputeVpnGateway() *data_source_google_compute_vpn_gateway_schema.Resource { - return &data_source_google_compute_vpn_gateway_schema.Resource{ - Read: dataSourceGoogleComputeVpnGatewayRead, - - Schema: map[string]*data_source_google_compute_vpn_gateway_schema.Schema{ - "name": { - Type: data_source_google_compute_vpn_gateway_schema.TypeString, - Required: true, - }, - - "region": { - Type: data_source_google_compute_vpn_gateway_schema.TypeString, - Optional: true, - Computed: true, - }, - - "project": { - Type: data_source_google_compute_vpn_gateway_schema.TypeString, - Optional: true, - Computed: true, - }, - - "description": { - Type: data_source_google_compute_vpn_gateway_schema.TypeString, - Computed: true, - }, - - "self_link": { - Type: data_source_google_compute_vpn_gateway_schema.TypeString, - Computed: true, - }, - - "network": { - Type: data_source_google_compute_vpn_gateway_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleComputeVpnGatewayRead(d *data_source_google_compute_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - vpnGatewaysService := data_source_google_compute_vpn_gateway_compute.NewTargetVpnGatewaysService(config.NewComputeClient(userAgent)) - - gateway, err := vpnGatewaysService.Get(project, region, name).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_compute_vpn_gateway_fmt.Sprintf("VPN Gateway Not Found : %s", name)) - } - if err := d.Set("network", ConvertSelfLinkToV1(gateway.Network)); err != nil { - return data_source_google_compute_vpn_gateway_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("region", gateway.Region); err != nil { - return data_source_google_compute_vpn_gateway_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("self_link", gateway.SelfLink); err != nil { - return data_source_google_compute_vpn_gateway_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("description", gateway.Description); err != nil { - return data_source_google_compute_vpn_gateway_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_vpn_gateway_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_google_compute_vpn_gateway_fmt.Sprintf("projects/%s/regions/%s/targetVpnGateways/%s", project, region, name)) - return nil -} - -func dataSourceGoogleComputeZones() *data_source_google_compute_zones_schema.Resource { - return &data_source_google_compute_zones_schema.Resource{ - Read: dataSourceGoogleComputeZonesRead, - Schema: map[string]*data_source_google_compute_zones_schema.Schema{ - "region": { - Type: data_source_google_compute_zones_schema.TypeString, - Optional: true, - }, - "project": { - Type: data_source_google_compute_zones_schema.TypeString, - Optional: true, - Computed: true, - }, - "names": { - Type: data_source_google_compute_zones_schema.TypeList, - Computed: true, - Elem: &data_source_google_compute_zones_schema.Schema{Type: data_source_google_compute_zones_schema.TypeString}, - }, - "status": { - Type: data_source_google_compute_zones_schema.TypeString, - Optional: true, - ValidateFunc: data_source_google_compute_zones_validation.StringInSlice([]string{"UP", "DOWN"}, false), - }, - }, - } -} - -func dataSourceGoogleComputeZonesRead(d *data_source_google_compute_zones_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region := config.Region - if r, ok := d.GetOk("region"); ok { - region = r.(string) - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - filter := "" - if s, ok := d.GetOk("status"); ok { - filter += data_source_google_compute_zones_fmt.Sprintf(" (status eq %s)", s) - } - - zones := []string{} - err = config.NewComputeClient(userAgent).Zones.List(project).Filter(filter).Pages(config.context, func(zl *data_source_google_compute_zones_compute.ZoneList) error { - for _, zone := range zl.Items { - - if data_source_google_compute_zones_strings.HasSuffix(zone.Region, "/"+region) { - zones = append(zones, zone.Name) - } - } - return nil - }) - - if err != nil { - return err - } - - data_source_google_compute_zones_sort.Strings(zones) - data_source_google_compute_zones_log.Printf("[DEBUG] Received Google Compute Zones: %q", zones) - - if err := d.Set("names", zones); err != nil { - return data_source_google_compute_zones_fmt.Errorf("Error setting names: %s", err) - } - if err := d.Set("region", region); err != nil { - return data_source_google_compute_zones_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_compute_zones_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_google_compute_zones_fmt.Sprintf("projects/%s/regions/%s", project, region)) - - return nil -} - -func dataSourceGoogleContainerCluster() *data_source_google_container_cluster_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceContainerCluster().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project", "location") - - return &data_source_google_container_cluster_schema.Resource{ - Read: datasourceContainerClusterRead, - Schema: dsSchema, - } -} - -func datasourceContainerClusterRead(d *data_source_google_container_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - clusterName := d.Get("name").(string) - - location, err := getLocation(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(containerClusterFullName(project, location, clusterName)) - - return resourceContainerClusterRead(d, meta) -} - -func dataSourceGoogleContainerEngineVersions() *data_source_google_container_engine_versions_schema.Resource { - return &data_source_google_container_engine_versions_schema.Resource{ - Read: dataSourceGoogleContainerEngineVersionsRead, - Schema: map[string]*data_source_google_container_engine_versions_schema.Schema{ - "project": { - Type: data_source_google_container_engine_versions_schema.TypeString, - Optional: true, - }, - "version_prefix": { - Type: data_source_google_container_engine_versions_schema.TypeString, - Optional: true, - }, - "location": { - Type: data_source_google_container_engine_versions_schema.TypeString, - Optional: true, - }, - "default_cluster_version": { - Type: data_source_google_container_engine_versions_schema.TypeString, - Computed: true, - }, - "latest_master_version": { - Type: data_source_google_container_engine_versions_schema.TypeString, - Computed: true, - }, - "latest_node_version": { - Type: data_source_google_container_engine_versions_schema.TypeString, - Computed: true, - }, - "valid_master_versions": { - Type: data_source_google_container_engine_versions_schema.TypeList, - Computed: true, - Elem: &data_source_google_container_engine_versions_schema.Schema{Type: data_source_google_container_engine_versions_schema.TypeString}, - }, - "valid_node_versions": { - Type: data_source_google_container_engine_versions_schema.TypeList, - Computed: true, - Elem: &data_source_google_container_engine_versions_schema.Schema{Type: data_source_google_container_engine_versions_schema.TypeString}, - }, - "release_channel_default_version": { - Type: data_source_google_container_engine_versions_schema.TypeMap, - Computed: true, - Elem: &data_source_google_container_engine_versions_schema.Schema{Type: data_source_google_container_engine_versions_schema.TypeString}, - }, - }, - } -} - -func dataSourceGoogleContainerEngineVersionsRead(d *data_source_google_container_engine_versions_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - if len(location) == 0 { - return data_source_google_container_engine_versions_fmt.Errorf("Cannot determine location: set location in this data source or at provider-level") - } - - location = data_source_google_container_engine_versions_fmt.Sprintf("projects/%s/locations/%s", project, location) - resp, err := config.NewContainerClient(userAgent).Projects.Locations.GetServerConfig(location).Do() - if err != nil { - return data_source_google_container_engine_versions_fmt.Errorf("Error retrieving available container cluster versions: %s", err.Error()) - } - - validMasterVersions := make([]string, 0) - for _, v := range resp.ValidMasterVersions { - if data_source_google_container_engine_versions_strings.HasPrefix(v, d.Get("version_prefix").(string)) { - validMasterVersions = append(validMasterVersions, v) - } - } - - validNodeVersions := make([]string, 0) - for _, v := range resp.ValidNodeVersions { - if data_source_google_container_engine_versions_strings.HasPrefix(v, d.Get("version_prefix").(string)) { - validNodeVersions = append(validNodeVersions, v) - } - } - - if err := d.Set("valid_master_versions", validMasterVersions); err != nil { - return data_source_google_container_engine_versions_fmt.Errorf("Error setting valid_master_versions: %s", err) - } - if len(validMasterVersions) > 0 { - if err := d.Set("latest_master_version", validMasterVersions[0]); err != nil { - return data_source_google_container_engine_versions_fmt.Errorf("Error setting latest_master_version: %s", err) - } - } - - if err := d.Set("valid_node_versions", validNodeVersions); err != nil { - return data_source_google_container_engine_versions_fmt.Errorf("Error setting valid_node_versions: %s", err) - } - if len(validNodeVersions) > 0 { - if err := d.Set("latest_node_version", validNodeVersions[0]); err != nil { - return data_source_google_container_engine_versions_fmt.Errorf("Error setting latest_node_version: %s", err) - } - } - - if err := d.Set("default_cluster_version", resp.DefaultClusterVersion); err != nil { - return data_source_google_container_engine_versions_fmt.Errorf("Error setting default_cluster_version: %s", err) - } - - channels := map[string]string{} - for _, v := range resp.Channels { - channels[v.Channel] = v.DefaultVersion - } - if err := d.Set("release_channel_default_version", channels); err != nil { - return data_source_google_container_engine_versions_fmt.Errorf("Error setting release_channel_default_version: %s", err) - } - - d.SetId(data_source_google_container_engine_versions_time.Now().UTC().String()) - return nil -} - -func dataSourceGoogleFolder() *data_source_google_folder_schema.Resource { - return &data_source_google_folder_schema.Resource{ - Read: dataSourceFolderRead, - Schema: map[string]*data_source_google_folder_schema.Schema{ - "folder": { - Type: data_source_google_folder_schema.TypeString, - Required: true, - }, - "folder_id": { - Type: data_source_google_folder_schema.TypeString, - Computed: true, - }, - "name": { - Type: data_source_google_folder_schema.TypeString, - Computed: true, - }, - "parent": { - Type: data_source_google_folder_schema.TypeString, - Computed: true, - }, - "display_name": { - Type: data_source_google_folder_schema.TypeString, - Computed: true, - }, - "lifecycle_state": { - Type: data_source_google_folder_schema.TypeString, - Computed: true, - }, - "create_time": { - Type: data_source_google_folder_schema.TypeString, - Computed: true, - }, - "lookup_organization": { - Type: data_source_google_folder_schema.TypeBool, - Optional: true, - Default: false, - }, - "organization": { - Type: data_source_google_folder_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceFolderRead(d *data_source_google_folder_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - d.SetId(canonicalFolderName(d.Get("folder").(string))) - if err := resourceGoogleFolderRead(d, meta); err != nil { - return err - } - - if d.Id() == "" { - return nil - } - - if v, ok := d.GetOk("lookup_organization"); ok && v.(bool) { - organization, err := lookupOrganizationName(d.Id(), userAgent, d, config) - if err != nil { - return err - } - - if err := d.Set("organization", organization); err != nil { - return data_source_google_folder_fmt.Errorf("Error setting organization: %s", err) - } - } - - return nil -} - -func canonicalFolderName(ba string) string { - if data_source_google_folder_strings.HasPrefix(ba, "folders/") { - return ba - } - - return "folders/" + ba -} - -func lookupOrganizationName(parent, userAgent string, d *data_source_google_folder_schema.ResourceData, config *Config) (string, error) { - if parent == "" || data_source_google_folder_strings.HasPrefix(parent, "organizations/") { - return parent, nil - } else if data_source_google_folder_strings.HasPrefix(parent, "folders/") { - parentFolder, err := getGoogleFolder(parent, userAgent, d, config) - if err != nil { - return "", data_source_google_folder_fmt.Errorf("Error getting parent folder '%s': %s", parent, err) - } - return lookupOrganizationName(parentFolder.Parent, userAgent, d, config) - } else { - return "", data_source_google_folder_fmt.Errorf("Unknown parent type '%s' on folder '%s'", parent, d.Id()) - } -} - -func dataSourceGoogleFolderOrganizationPolicy() *data_source_google_folder_organization_policy_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceGoogleFolderOrganizationPolicy().Schema) - - addRequiredFieldsToSchema(dsSchema, "folder") - addRequiredFieldsToSchema(dsSchema, "constraint") - - return &data_source_google_folder_organization_policy_schema.Resource{ - Read: datasourceGoogleFolderOrganizationPolicyRead, - Schema: dsSchema, - } -} - -func datasourceGoogleFolderOrganizationPolicyRead(d *data_source_google_folder_organization_policy_schema.ResourceData, meta interface{}) error { - - d.SetId(data_source_google_folder_organization_policy_fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) - - return resourceGoogleFolderOrganizationPolicyRead(d, meta) -} - -func dataSourceGoogleComputeGlobalForwardingRule() *data_source_google_global_compute_forwarding_rule_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceComputeGlobalForwardingRule().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_global_compute_forwarding_rule_schema.Resource{ - Read: dataSourceGoogleComputeGlobalForwardingRuleRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleComputeGlobalForwardingRuleRead(d *data_source_google_global_compute_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - name := d.Get("name").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - d.SetId(data_source_google_global_compute_forwarding_rule_fmt.Sprintf("projects/%s/global/forwardingRules/%s", project, name)) - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} - -func dataSourceGoogleIamPolicy() *data_source_google_iam_policy_schema.Resource { - return &data_source_google_iam_policy_schema.Resource{ - Read: dataSourceGoogleIamPolicyRead, - Schema: map[string]*data_source_google_iam_policy_schema.Schema{ - "binding": { - Type: data_source_google_iam_policy_schema.TypeSet, - - Optional: true, - Elem: &data_source_google_iam_policy_schema.Resource{ - Schema: map[string]*data_source_google_iam_policy_schema.Schema{ - "role": { - Type: data_source_google_iam_policy_schema.TypeString, - Required: true, - }, - "members": { - Type: data_source_google_iam_policy_schema.TypeSet, - Required: true, - Elem: &data_source_google_iam_policy_schema.Schema{ - Type: data_source_google_iam_policy_schema.TypeString, - ValidateFunc: data_source_google_iam_policy_validation.StringDoesNotMatch(data_source_google_iam_policy_regexp.MustCompile("^deleted:"), "Terraform does not support IAM policies for deleted principals"), - }, - Set: data_source_google_iam_policy_schema.HashString, - }, - "condition": { - Type: data_source_google_iam_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &data_source_google_iam_policy_schema.Resource{ - Schema: map[string]*data_source_google_iam_policy_schema.Schema{ - "expression": { - Type: data_source_google_iam_policy_schema.TypeString, - Required: true, - }, - "title": { - Type: data_source_google_iam_policy_schema.TypeString, - Required: true, - }, - "description": { - Type: data_source_google_iam_policy_schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "policy_data": { - Type: data_source_google_iam_policy_schema.TypeString, - Computed: true, - }, - "audit_config": { - Type: data_source_google_iam_policy_schema.TypeSet, - Optional: true, - Elem: &data_source_google_iam_policy_schema.Resource{ - Schema: map[string]*data_source_google_iam_policy_schema.Schema{ - "service": { - Type: data_source_google_iam_policy_schema.TypeString, - Required: true, - }, - "audit_log_configs": { - Type: data_source_google_iam_policy_schema.TypeSet, - Required: true, - Elem: &data_source_google_iam_policy_schema.Resource{ - Schema: map[string]*data_source_google_iam_policy_schema.Schema{ - "log_type": { - Type: data_source_google_iam_policy_schema.TypeString, - Required: true, - }, - "exempted_members": { - Type: data_source_google_iam_policy_schema.TypeSet, - Elem: &data_source_google_iam_policy_schema.Schema{Type: data_source_google_iam_policy_schema.TypeString}, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func dataSourceGoogleIamPolicyRead(d *data_source_google_iam_policy_schema.ResourceData, meta interface{}) error { - var policy data_source_google_iam_policy_cloudresourcemanager.Policy - var bindings []*data_source_google_iam_policy_cloudresourcemanager.Binding - - bset := d.Get("binding").(*data_source_google_iam_policy_schema.Set) - aset := d.Get("audit_config").(*data_source_google_iam_policy_schema.Set) - - bindings = make([]*data_source_google_iam_policy_cloudresourcemanager.Binding, bset.Len()) - policy.Bindings = bindings - - for i, v := range bset.List() { - binding := v.(map[string]interface{}) - members := convertStringSet(binding["members"].(*data_source_google_iam_policy_schema.Set)) - condition := expandIamCondition(binding["condition"]) - - data_source_google_iam_policy_sort.Strings(members) - - policy.Bindings[i] = &data_source_google_iam_policy_cloudresourcemanager.Binding{ - Role: binding["role"].(string), - Members: members, - Condition: condition, - } - } - - data_source_google_iam_policy_sort.Slice(bindings, func(i, j int) bool { - return bindings[i].Role < bindings[j].Role - }) - - policy.AuditConfigs = expandAuditConfig(aset) - - pjson, err := data_source_google_iam_policy_json.Marshal(&policy) - if err != nil { - - return err - } - pstring := string(pjson) - - if err := d.Set("policy_data", pstring); err != nil { - return data_source_google_iam_policy_fmt.Errorf("Error setting policy_data: %s", err) - } - d.SetId(data_source_google_iam_policy_strconv.Itoa(hashcode(pstring))) - - return nil -} - -func expandAuditConfig(set *data_source_google_iam_policy_schema.Set) []*data_source_google_iam_policy_cloudresourcemanager.AuditConfig { - auditConfigs := make([]*data_source_google_iam_policy_cloudresourcemanager.AuditConfig, 0, set.Len()) - for _, v := range set.List() { - config := v.(map[string]interface{}) - - auditLogConfigSet := config["audit_log_configs"].(*data_source_google_iam_policy_schema.Set) - - auditLogConfigs := make([]*data_source_google_iam_policy_cloudresourcemanager.AuditLogConfig, 0, auditLogConfigSet.Len()) - for _, y := range auditLogConfigSet.List() { - logConfig := y.(map[string]interface{}) - auditLogConfigs = append(auditLogConfigs, &data_source_google_iam_policy_cloudresourcemanager.AuditLogConfig{ - LogType: logConfig["log_type"].(string), - ExemptedMembers: convertStringArr(logConfig["exempted_members"].(*data_source_google_iam_policy_schema.Set).List()), - }) - } - auditConfigs = append(auditConfigs, &data_source_google_iam_policy_cloudresourcemanager.AuditConfig{ - Service: config["service"].(string), - AuditLogConfigs: auditLogConfigs, - }) - } - return auditConfigs -} - -func dataSourceGoogleIamRole() *data_source_google_iam_role_schema.Resource { - return &data_source_google_iam_role_schema.Resource{ - Read: dataSourceGoogleIamRoleRead, - Schema: map[string]*data_source_google_iam_role_schema.Schema{ - "name": { - Type: data_source_google_iam_role_schema.TypeString, - Required: true, - }, - "title": { - Type: data_source_google_iam_role_schema.TypeString, - Computed: true, - }, - "included_permissions": { - Type: data_source_google_iam_role_schema.TypeList, - Computed: true, - Elem: &data_source_google_iam_role_schema.Schema{Type: data_source_google_iam_role_schema.TypeString}, - }, - "stage": { - Type: data_source_google_iam_role_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleIamRoleRead(d *data_source_google_iam_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - roleName := d.Get("name").(string) - role, err := config.NewIamClient(userAgent).Roles.Get(roleName).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_iam_role_fmt.Sprintf("Error reading IAM Role %s: %s", roleName, err)) - } - - d.SetId(role.Name) - if err := d.Set("title", role.Title); err != nil { - return data_source_google_iam_role_fmt.Errorf("Error setting title: %s", err) - } - if err := d.Set("stage", role.Stage); err != nil { - return data_source_google_iam_role_fmt.Errorf("Error setting stage: %s", err) - } - if err := d.Set("included_permissions", role.IncludedPermissions); err != nil { - return data_source_google_iam_role_fmt.Errorf("Error setting included_permissions: %s", err) - } - - return nil -} - -func dataSourceGoogleIamTestablePermissions() *data_source_google_iam_testable_permissions_schema.Resource { - return &data_source_google_iam_testable_permissions_schema.Resource{ - Read: dataSourceGoogleIamTestablePermissionsRead, - Schema: map[string]*data_source_google_iam_testable_permissions_schema.Schema{ - "full_resource_name": { - Type: data_source_google_iam_testable_permissions_schema.TypeString, - Required: true, - }, - "stages": { - Type: data_source_google_iam_testable_permissions_schema.TypeList, - Optional: true, - Elem: &data_source_google_iam_testable_permissions_schema.Schema{ - Type: data_source_google_iam_testable_permissions_schema.TypeString, - ValidateFunc: data_source_google_iam_testable_permissions_validation.StringInSlice([]string{"ALPHA", "BETA", "GA", "DEPRECATED"}, true), - }, - }, - "custom_support_level": { - Type: data_source_google_iam_testable_permissions_schema.TypeString, - Optional: true, - Default: "SUPPORTED", - ValidateFunc: data_source_google_iam_testable_permissions_validation.StringInSlice([]string{"NOT_SUPPORTED", "SUPPORTED", "TESTING"}, true), - }, - "permissions": { - Type: data_source_google_iam_testable_permissions_schema.TypeList, - Computed: true, - Elem: &data_source_google_iam_testable_permissions_schema.Resource{ - Schema: map[string]*data_source_google_iam_testable_permissions_schema.Schema{ - "name": { - Type: data_source_google_iam_testable_permissions_schema.TypeString, - Computed: true, - }, - "title": { - Type: data_source_google_iam_testable_permissions_schema.TypeString, - Computed: true, - }, - "custom_support_level": { - Type: data_source_google_iam_testable_permissions_schema.TypeString, - Computed: true, - }, - "stage": { - Type: data_source_google_iam_testable_permissions_schema.TypeString, - Computed: true, - }, - "api_disabled": { - Type: data_source_google_iam_testable_permissions_schema.TypeBool, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceGoogleIamTestablePermissionsRead(d *data_source_google_iam_testable_permissions_schema.ResourceData, meta interface{}) (err error) { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - body := make(map[string]interface{}) - body["pageSize"] = 500 - permissions := make([]map[string]interface{}, 0) - - custom_support_level := data_source_google_iam_testable_permissions_strings.ToUpper(d.Get("custom_support_level").(string)) - stages := []string{} - for _, e := range d.Get("stages").([]interface{}) { - stages = append(stages, data_source_google_iam_testable_permissions_strings.ToUpper(e.(string))) - } - if len(stages) == 0 { - - stages = append(stages, "GA") - } - for { - url := "https://iam.googleapis.com/v1/permissions:queryTestablePermissions" - body["fullResourceName"] = d.Get("full_resource_name").(string) - res, err := sendRequest(config, "POST", "", url, userAgent, body) - if err != nil { - return data_source_google_iam_testable_permissions_fmt.Errorf("Error retrieving permissions: %s", err) - } - - pagePermissions := flattenTestablePermissionsList(res["permissions"], custom_support_level, stages) - permissions = append(permissions, pagePermissions...) - pToken, ok := res["nextPageToken"] - if ok && pToken != nil && pToken.(string) != "" { - body["pageToken"] = pToken.(string) - } else { - break - } - } - - if err = d.Set("permissions", permissions); err != nil { - return data_source_google_iam_testable_permissions_fmt.Errorf("Error retrieving permissions: %s", err) - } - - d.SetId(d.Get("full_resource_name").(string)) - return nil -} - -func flattenTestablePermissionsList(v interface{}, custom_support_level string, stages []string) []map[string]interface{} { - if v == nil { - return make([]map[string]interface{}, 0) - } - - ls := v.([]interface{}) - permissions := make([]map[string]interface{}, 0, len(ls)) - for _, raw := range ls { - p := raw.(map[string]interface{}) - - if _, ok := p["name"]; ok { - var csl bool - if custom_support_level == "SUPPORTED" { - csl = p["customRolesSupportLevel"] == nil || p["customRolesSupportLevel"] == "SUPPORTED" - } else { - csl = p["customRolesSupportLevel"] == custom_support_level - } - if csl && p["stage"] != nil && stringInSlice(stages, p["stage"].(string)) { - permissions = append(permissions, map[string]interface{}{ - "name": p["name"], - "title": p["title"], - "stage": p["stage"], - "api_disabled": p["apiDisabled"], - "custom_support_level": p["customRolesSupportLevel"], - }) - } - } - } - - return permissions -} - -func dataSourceGoogleKmsCryptoKey() *data_source_google_kms_crypto_key_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceKMSCryptoKey().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addRequiredFieldsToSchema(dsSchema, "key_ring") - - return &data_source_google_kms_crypto_key_schema.Resource{ - Read: dataSourceGoogleKmsCryptoKeyRead, - Schema: dsSchema, - } - -} - -func dataSourceGoogleKmsCryptoKeyRead(d *data_source_google_kms_crypto_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - keyRingId, err := parseKmsKeyRingId(d.Get("key_ring").(string), config) - if err != nil { - return err - } - - cryptoKeyId := kmsCryptoKeyId{ - KeyRingId: *keyRingId, - Name: d.Get("name").(string), - } - - d.SetId(cryptoKeyId.cryptoKeyId()) - - return resourceKMSCryptoKeyRead(d, meta) -} - -func dataSourceGoogleKmsCryptoKeyVersion() *data_source_google_kms_crypto_key_version_schema.Resource { - return &data_source_google_kms_crypto_key_version_schema.Resource{ - Read: dataSourceGoogleKmsCryptoKeyVersionRead, - Schema: map[string]*data_source_google_kms_crypto_key_version_schema.Schema{ - "crypto_key": { - Type: data_source_google_kms_crypto_key_version_schema.TypeString, - Required: true, - ForceNew: true, - }, - "name": { - Type: data_source_google_kms_crypto_key_version_schema.TypeString, - Computed: true, - }, - "version": { - Type: data_source_google_kms_crypto_key_version_schema.TypeInt, - Optional: true, - Default: 1, - }, - "algorithm": { - Type: data_source_google_kms_crypto_key_version_schema.TypeString, - Computed: true, - }, - "protection_level": { - Type: data_source_google_kms_crypto_key_version_schema.TypeString, - Computed: true, - }, - "state": { - Type: data_source_google_kms_crypto_key_version_schema.TypeString, - Computed: true, - }, - "public_key": { - Type: data_source_google_kms_crypto_key_version_schema.TypeList, - Computed: true, - Elem: &data_source_google_kms_crypto_key_version_schema.Resource{ - Schema: map[string]*data_source_google_kms_crypto_key_version_schema.Schema{ - "algorithm": { - Type: data_source_google_kms_crypto_key_version_schema.TypeString, - Computed: true, - }, - "pem": { - Type: data_source_google_kms_crypto_key_version_schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceGoogleKmsCryptoKeyVersionRead(d *data_source_google_kms_crypto_key_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}") - if err != nil { - return err - } - - data_source_google_kms_crypto_key_version_log.Printf("[DEBUG] Getting attributes for CryptoKeyVersion: %#v", url) - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Get("crypto_key").(string), config) - if err != nil { - return err - } - res, err := sendRequest(config, "GET", cryptoKeyId.KeyRingId.Project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, data_source_google_kms_crypto_key_version_fmt.Sprintf("KmsCryptoKeyVersion %q", d.Id())) - } - - if err := d.Set("version", flattenKmsCryptoKeyVersionVersion(res["name"], d)); err != nil { - return data_source_google_kms_crypto_key_version_fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("name", flattenKmsCryptoKeyVersionName(res["name"], d)); err != nil { - return data_source_google_kms_crypto_key_version_fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("state", flattenKmsCryptoKeyVersionState(res["state"], d)); err != nil { - return data_source_google_kms_crypto_key_version_fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("protection_level", flattenKmsCryptoKeyVersionProtectionLevel(res["protectionLevel"], d)); err != nil { - return data_source_google_kms_crypto_key_version_fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - if err := d.Set("algorithm", flattenKmsCryptoKeyVersionAlgorithm(res["algorithm"], d)); err != nil { - return data_source_google_kms_crypto_key_version_fmt.Errorf("Error setting CryptoKeyVersion: %s", err) - } - - url, err = replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") - if err != nil { - return err - } - - data_source_google_kms_crypto_key_version_log.Printf("[DEBUG] Getting purpose of CryptoKey: %#v", url) - res, err = sendRequest(config, "GET", cryptoKeyId.KeyRingId.Project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, data_source_google_kms_crypto_key_version_fmt.Sprintf("KmsCryptoKey %q", d.Id())) - } - - if res["purpose"] == "ASYMMETRIC_SIGN" || res["purpose"] == "ASYMMETRIC_DECRYPT" { - url, err = replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}/cryptoKeyVersions/{{version}}/publicKey") - if err != nil { - return err - } - data_source_google_kms_crypto_key_version_log.Printf("[DEBUG] Getting public key of CryptoKeyVersion: %#v", url) - - res, err = sendRequestWithTimeout(config, "GET", cryptoKeyId.KeyRingId.Project, url, userAgent, nil, d.Timeout(data_source_google_kms_crypto_key_version_schema.TimeoutRead), isCryptoKeyVersionsPendingGeneration) - - if err != nil { - data_source_google_kms_crypto_key_version_log.Printf("Error generating public key: %s", err) - return err - } - - if err := d.Set("public_key", flattenKmsCryptoKeyVersionPublicKey(res, d)); err != nil { - return data_source_google_kms_crypto_key_version_fmt.Errorf("Error setting CryptoKeyVersion public key: %s", err) - } - } - d.SetId(data_source_google_kms_crypto_key_version_fmt.Sprintf("//cloudkms.googleapis.com/v1/%s/cryptoKeyVersions/%d", d.Get("crypto_key"), d.Get("version"))) - - return nil -} - -func flattenKmsCryptoKeyVersionVersion(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - parts := data_source_google_kms_crypto_key_version_strings.Split(v.(string), "/") - version := parts[len(parts)-1] - - if intVal, err := data_source_google_kms_crypto_key_version_strconv.ParseInt(version, 10, 64); err == nil { - return intVal - } - return v -} - -func flattenKmsCryptoKeyVersionName(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionState(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionProtectionLevel(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionAlgorithm(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionPublicKey(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pem"] = - flattenKmsCryptoKeyVersionPublicKeyPem(original["pem"], d) - transformed["algorithm"] = - flattenKmsCryptoKeyVersionPublicKeyAlgorithm(original["algorithm"], d) - return []interface{}{transformed} -} - -func flattenKmsCryptoKeyVersionPublicKeyPem(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - return v -} - -func flattenKmsCryptoKeyVersionPublicKeyAlgorithm(v interface{}, d *data_source_google_kms_crypto_key_version_schema.ResourceData) interface{} { - return v -} - -func dataSourceGoogleKmsKeyRing() *data_source_google_kms_key_ring_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceKMSKeyRing().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addRequiredFieldsToSchema(dsSchema, "location") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_google_kms_key_ring_schema.Resource{ - Read: dataSourceGoogleKmsKeyRingRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleKmsKeyRingRead(d *data_source_google_kms_key_ring_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - keyRingId := kmsKeyRingId{ - Name: d.Get("name").(string), - Location: d.Get("location").(string), - Project: project, - } - d.SetId(keyRingId.keyRingId()) - - return resourceKMSKeyRingRead(d, meta) -} - -func dataSourceGoogleKmsSecret() *data_source_google_kms_secret_schema.Resource { - return &data_source_google_kms_secret_schema.Resource{ - Read: dataSourceGoogleKmsSecretRead, - Schema: map[string]*data_source_google_kms_secret_schema.Schema{ - "crypto_key": { - Type: data_source_google_kms_secret_schema.TypeString, - Required: true, - }, - "ciphertext": { - Type: data_source_google_kms_secret_schema.TypeString, - Required: true, - }, - "plaintext": { - Type: data_source_google_kms_secret_schema.TypeString, - Computed: true, - Sensitive: true, - }, - "additional_authenticated_data": { - Type: data_source_google_kms_secret_schema.TypeString, - Optional: true, - }, - }, - } -} - -func dataSourceGoogleKmsSecretRead(d *data_source_google_kms_secret_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Get("crypto_key").(string), config) - - if err != nil { - return err - } - - ciphertext := d.Get("ciphertext").(string) - - kmsDecryptRequest := &data_source_google_kms_secret_cloudkms.DecryptRequest{ - Ciphertext: ciphertext, - } - - if aad, ok := d.GetOk("additional_authenticated_data"); ok { - kmsDecryptRequest.AdditionalAuthenticatedData = aad.(string) - } - - decryptResponse, err := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.Decrypt(cryptoKeyId.cryptoKeyId(), kmsDecryptRequest).Do() - - if err != nil { - return data_source_google_kms_secret_fmt.Errorf("Error decrypting ciphertext: %s", err) - } - - plaintext, err := data_source_google_kms_secret_base64.StdEncoding.DecodeString(decryptResponse.Plaintext) - - if err != nil { - return data_source_google_kms_secret_fmt.Errorf("Error decoding base64 response: %s", err) - } - - data_source_google_kms_secret_log.Printf("[INFO] Successfully decrypted ciphertext: %s", ciphertext) - - if err := d.Set("plaintext", string(plaintext[:])); err != nil { - return data_source_google_kms_secret_fmt.Errorf("Error setting plaintext: %s", err) - } - d.SetId(data_source_google_kms_secret_fmt.Sprintf("%s:%s", d.Get("crypto_key").(string), ciphertext)) - - return nil -} - -func dataSourceGoogleKmsSecretCiphertext() *data_source_google_kms_secret_ciphertext_schema.Resource { - return &data_source_google_kms_secret_ciphertext_schema.Resource{ - DeprecationMessage: "Use the google_kms_secret_ciphertext resource instead.", - Read: dataSourceGoogleKmsSecretCiphertextRead, - Schema: map[string]*data_source_google_kms_secret_ciphertext_schema.Schema{ - "crypto_key": { - Type: data_source_google_kms_secret_ciphertext_schema.TypeString, - Required: true, - }, - "ciphertext": { - Type: data_source_google_kms_secret_ciphertext_schema.TypeString, - Computed: true, - }, - "plaintext": { - Type: data_source_google_kms_secret_ciphertext_schema.TypeString, - Required: true, - Sensitive: true, - }, - }, - } -} - -func dataSourceGoogleKmsSecretCiphertextRead(d *data_source_google_kms_secret_ciphertext_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Get("crypto_key").(string), config) - - if err != nil { - return err - } - - plaintext := data_source_google_kms_secret_ciphertext_base64.StdEncoding.EncodeToString([]byte(d.Get("plaintext").(string))) - - kmsEncryptRequest := &data_source_google_kms_secret_ciphertext_cloudkms.EncryptRequest{ - Plaintext: plaintext, - } - - encryptCall := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.Encrypt(cryptoKeyId.cryptoKeyId(), kmsEncryptRequest) - if config.UserProjectOverride { - encryptCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) - } - encryptResponse, err := encryptCall.Do() - - if err != nil { - return data_source_google_kms_secret_ciphertext_fmt.Errorf("Error encrypting plaintext: %s", err) - } - - data_source_google_kms_secret_ciphertext_log.Printf("[INFO] Successfully encrypted plaintext") - - if err := d.Set("ciphertext", encryptResponse.Ciphertext); err != nil { - return data_source_google_kms_secret_ciphertext_fmt.Errorf("Error setting ciphertext: %s", err) - } - d.SetId(d.Get("crypto_key").(string)) - - return nil -} - -func dataSourceGoogleMonitoringUptimeCheckIps() *data_source_google_monitoring_uptime_check_ips_schema.Resource { - return &data_source_google_monitoring_uptime_check_ips_schema.Resource{ - Read: dataSourceGoogleMonitoringUptimeCheckIpsRead, - - Schema: map[string]*data_source_google_monitoring_uptime_check_ips_schema.Schema{ - "uptime_check_ips": { - Type: data_source_google_monitoring_uptime_check_ips_schema.TypeList, - Computed: true, - Elem: &data_source_google_monitoring_uptime_check_ips_schema.Resource{ - Schema: map[string]*data_source_google_monitoring_uptime_check_ips_schema.Schema{ - "region": { - Type: data_source_google_monitoring_uptime_check_ips_schema.TypeString, - Computed: true, - }, - "location": { - Type: data_source_google_monitoring_uptime_check_ips_schema.TypeString, - Computed: true, - }, - "ip_address": { - Type: data_source_google_monitoring_uptime_check_ips_schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - } -} - -func dataSourceGoogleMonitoringUptimeCheckIpsRead(d *data_source_google_monitoring_uptime_check_ips_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url := "https://monitoring.googleapis.com/v3/uptimeCheckIps" - - uptimeCheckIps, err := paginatedListRequest("", url, userAgent, config, flattenUptimeCheckIpsList) - if err != nil { - return data_source_google_monitoring_uptime_check_ips_fmt.Errorf("Error retrieving monitoring uptime check ips: %s", err) - } - - if err := d.Set("uptime_check_ips", uptimeCheckIps); err != nil { - return data_source_google_monitoring_uptime_check_ips_fmt.Errorf("Error retrieving monitoring uptime check ips: %s", err) - } - d.SetId("uptime_check_ips_id") - return nil -} - -func flattenUptimeCheckIpsList(resp map[string]interface{}) []interface{} { - ipObjList := resp["uptimeCheckIps"].([]interface{}) - uptimeCheckIps := make([]interface{}, len(ipObjList)) - for i, u := range ipObjList { - ipObj := u.(map[string]interface{}) - uptimeCheckIps[i] = map[string]interface{}{ - "region": ipObj["region"], - "location": ipObj["location"], - "ip_address": ipObj["ipAddress"], - } - } - return uptimeCheckIps -} - -type googRanges struct { - SyncToken string `json:"syncToken"` - CreationTime string `json:"creationTime"` - Prefixes []prefixes `json:"prefixes"` -} - -type prefixes struct { - Ipv4Prefix string `json:"ipv4Prefix"` - Ipv6Prefix string `json:"ipv6Prefix"` -} - -func dataSourceGoogleNetblockIpRanges() *data_source_google_netblock_ip_ranges_schema.Resource { - return &data_source_google_netblock_ip_ranges_schema.Resource{ - Read: dataSourceGoogleNetblockIpRangesRead, - - Schema: map[string]*data_source_google_netblock_ip_ranges_schema.Schema{ - "range_type": { - Type: data_source_google_netblock_ip_ranges_schema.TypeString, - Optional: true, - Default: "cloud-netblocks", - }, - "cidr_blocks": { - Type: data_source_google_netblock_ip_ranges_schema.TypeList, - Elem: &data_source_google_netblock_ip_ranges_schema.Schema{Type: data_source_google_netblock_ip_ranges_schema.TypeString}, - Computed: true, - }, - "cidr_blocks_ipv4": { - Type: data_source_google_netblock_ip_ranges_schema.TypeList, - Elem: &data_source_google_netblock_ip_ranges_schema.Schema{Type: data_source_google_netblock_ip_ranges_schema.TypeString}, - Computed: true, - }, - "cidr_blocks_ipv6": { - Type: data_source_google_netblock_ip_ranges_schema.TypeList, - Elem: &data_source_google_netblock_ip_ranges_schema.Schema{Type: data_source_google_netblock_ip_ranges_schema.TypeString}, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleNetblockIpRangesRead(d *data_source_google_netblock_ip_ranges_schema.ResourceData, meta interface{}) error { - - rt := d.Get("range_type").(string) - CidrBlocks := make(map[string][]string) - - switch rt { - - case "cloud-netblocks": - - const CLOUD_NETBLOCK_URL = "https://www.gstatic.com/ipranges/cloud.json" - CidrBlocks, err := getCidrBlocksFromUrl(CLOUD_NETBLOCK_URL) - - if err != nil { - return err - } - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - if err := d.Set("cidr_blocks_ipv6", CidrBlocks["cidr_blocks_ipv6"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv6: %s", err) - } - case "google-netblocks": - - const GOOGLE_NETBLOCK_URL = "https://www.gstatic.com/ipranges/goog.json" - CidrBlocks, err := getCidrBlocksFromUrl(GOOGLE_NETBLOCK_URL) - - if err != nil { - return err - } - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - if err := d.Set("cidr_blocks_ipv6", CidrBlocks["cidr_blocks_ipv6"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv6: %s", err) - } - - case "restricted-googleapis": - - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "199.36.153.4/30") - CidrBlocks["cidr_blocks"] = CidrBlocks["cidr_blocks_ipv4"] - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - case "private-googleapis": - - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "199.36.153.8/30") - CidrBlocks["cidr_blocks"] = CidrBlocks["cidr_blocks_ipv4"] - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - case "dns-forwarders": - - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "35.199.192.0/19") - CidrBlocks["cidr_blocks"] = CidrBlocks["cidr_blocks_ipv4"] - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - case "iap-forwarders": - - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "35.235.240.0/20") - CidrBlocks["cidr_blocks"] = CidrBlocks["cidr_blocks_ipv4"] - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - case "health-checkers": - - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "35.191.0.0/16") - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "130.211.0.0/22") - CidrBlocks["cidr_blocks"] = CidrBlocks["cidr_blocks_ipv4"] - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - case "legacy-health-checkers": - - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "35.191.0.0/16") - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "209.85.152.0/22") - CidrBlocks["cidr_blocks_ipv4"] = append(CidrBlocks["cidr_blocks_ipv4"], "209.85.204.0/22") - CidrBlocks["cidr_blocks"] = CidrBlocks["cidr_blocks_ipv4"] - if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks: %s", err) - } - if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { - return data_source_google_netblock_ip_ranges_fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) - } - default: - return data_source_google_netblock_ip_ranges_fmt.Errorf("Unknown range_type: %s", rt) - } - - d.SetId("netblock-ip-ranges-" + rt) - - return nil -} - -func getCidrBlocksFromUrl(url string) (map[string][]string, error) { - cidrBlocks := make(map[string][]string) - - response, err := data_source_google_netblock_ip_ranges_http.Get(url) - - if err != nil { - return nil, data_source_google_netblock_ip_ranges_fmt.Errorf("Error: %s", err) - } - - defer response.Body.Close() - body, err := data_source_google_netblock_ip_ranges_ioutil.ReadAll(response.Body) - - if err != nil { - return nil, data_source_google_netblock_ip_ranges_fmt.Errorf("Error to retrieve the CIDR list: %s", err) - } - - ranges := googRanges{} - jsonErr := data_source_google_netblock_ip_ranges_json.Unmarshal(body, &ranges) - if jsonErr != nil { - return nil, data_source_google_netblock_ip_ranges_fmt.Errorf("Error reading JSON list: %s", jsonErr) - } - - for _, element := range ranges.Prefixes { - - if len(element.Ipv4Prefix) > 0 { - cidrBlocks["cidr_blocks_ipv4"] = append(cidrBlocks["cidr_blocks_ipv4"], element.Ipv4Prefix) - cidrBlocks["cidr_blocks"] = append(cidrBlocks["cidr_blocks"], element.Ipv4Prefix) - } else if len(element.Ipv6Prefix) > 0 { - cidrBlocks["cidr_blocks_ipv6"] = append(cidrBlocks["cidr_blocks_ipv6"], element.Ipv6Prefix) - cidrBlocks["cidr_blocks"] = append(cidrBlocks["cidr_blocks"], element.Ipv6Prefix) - } - - } - - return cidrBlocks, nil -} - -func dataSourceGoogleOrganization() *data_source_google_organization_schema.Resource { - return &data_source_google_organization_schema.Resource{ - Read: dataSourceOrganizationRead, - Schema: map[string]*data_source_google_organization_schema.Schema{ - "domain": { - Type: data_source_google_organization_schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"organization"}, - }, - "organization": { - Type: data_source_google_organization_schema.TypeString, - Optional: true, - ConflictsWith: []string{"domain"}, - }, - "org_id": { - Type: data_source_google_organization_schema.TypeString, - Computed: true, - }, - "name": { - Type: data_source_google_organization_schema.TypeString, - Computed: true, - }, - "directory_customer_id": { - Type: data_source_google_organization_schema.TypeString, - Computed: true, - }, - "create_time": { - Type: data_source_google_organization_schema.TypeString, - Computed: true, - }, - "lifecycle_state": { - Type: data_source_google_organization_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceOrganizationRead(d *data_source_google_organization_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - var organization *data_source_google_organization_cloudresourcemanager.Organization - if v, ok := d.GetOk("domain"); ok { - filter := data_source_google_organization_fmt.Sprintf("domain=%s", v.(string)) - var resp *data_source_google_organization_cloudresourcemanager.SearchOrganizationsResponse - err := retryTimeDuration(func() (err error) { - resp, err = config.NewResourceManagerClient(userAgent).Organizations.Search(&data_source_google_organization_cloudresourcemanager.SearchOrganizationsRequest{ - Filter: filter, - }).Do() - return err - }, d.Timeout(data_source_google_organization_schema.TimeoutRead)) - if err != nil { - return data_source_google_organization_fmt.Errorf("Error reading organization: %s", err) - } - - if len(resp.Organizations) == 0 { - return data_source_google_organization_fmt.Errorf("Organization not found: %s", v) - } - - if len(resp.Organizations) > 1 { - - for _, org := range resp.Organizations { - if org.DisplayName == v.(string) { - organization = org - break - } - } - if organization == nil { - return data_source_google_organization_fmt.Errorf("Received multiple organizations in the response, but could not find an exact domain match.") - } - } else { - organization = resp.Organizations[0] - } - - } else if v, ok := d.GetOk("organization"); ok { - var resp *data_source_google_organization_cloudresourcemanager.Organization - err := retryTimeDuration(func() (err error) { - resp, err = config.NewResourceManagerClient(userAgent).Organizations.Get(canonicalOrganizationName(v.(string))).Do() - return err - }, d.Timeout(data_source_google_organization_schema.TimeoutRead)) - if err != nil { - return handleNotFoundError(err, d, data_source_google_organization_fmt.Sprintf("Organization Not Found : %s", v)) - } - - organization = resp - } else { - return data_source_google_organization_fmt.Errorf("one of domain or organization must be set") - } - - d.SetId(organization.Name) - if err := d.Set("name", organization.Name); err != nil { - return data_source_google_organization_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("org_id", GetResourceNameFromSelfLink(organization.Name)); err != nil { - return data_source_google_organization_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("domain", organization.DisplayName); err != nil { - return data_source_google_organization_fmt.Errorf("Error setting domain: %s", err) - } - if err := d.Set("create_time", organization.CreationTime); err != nil { - return data_source_google_organization_fmt.Errorf("Error setting create_time: %s", err) - } - if err := d.Set("lifecycle_state", organization.LifecycleState); err != nil { - return data_source_google_organization_fmt.Errorf("Error setting lifecycle_state: %s", err) - } - if organization.Owner != nil { - if err := d.Set("directory_customer_id", organization.Owner.DirectoryCustomerId); err != nil { - return data_source_google_organization_fmt.Errorf("Error setting directory_customer_id: %s", err) - } - } - - return nil -} - -func canonicalOrganizationName(ba string) string { - if data_source_google_organization_strings.HasPrefix(ba, "organizations/") { - return ba - } - - return "organizations/" + ba -} - -func dataSourceGoogleProject() *data_source_google_project_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceGoogleProject().Schema) - - addOptionalFieldsToSchema(dsSchema, "project_id") - - return &data_source_google_project_schema.Resource{ - Read: datasourceGoogleProjectRead, - Schema: dsSchema, - } -} - -func datasourceGoogleProjectRead(d *data_source_google_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if v, ok := d.GetOk("project_id"); ok { - project := v.(string) - d.SetId(data_source_google_project_fmt.Sprintf("projects/%s", project)) - } else { - project, err := getProject(d, config) - if err != nil { - return err - } - d.SetId(data_source_google_project_fmt.Sprintf("projects/%s", project)) - } - - return resourceGoogleProjectRead(d, meta) -} - -func dataSourceGoogleProjectOrganizationPolicy() *data_source_google_project_organization_policy_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceGoogleProjectOrganizationPolicy().Schema) - - addRequiredFieldsToSchema(dsSchema, "project") - addRequiredFieldsToSchema(dsSchema, "constraint") - - return &data_source_google_project_organization_policy_schema.Resource{ - Read: datasourceGoogleProjectOrganizationPolicyRead, - Schema: dsSchema, - } -} - -func datasourceGoogleProjectOrganizationPolicyRead(d *data_source_google_project_organization_policy_schema.ResourceData, meta interface{}) error { - - d.SetId(data_source_google_project_organization_policy_fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) - - return resourceGoogleProjectOrganizationPolicyRead(d, meta) -} - -func dataSourceGoogleProjects() *data_source_google_projects_schema.Resource { - return &data_source_google_projects_schema.Resource{ - Read: datasourceGoogleProjectsRead, - Schema: map[string]*data_source_google_projects_schema.Schema{ - "filter": { - Type: data_source_google_projects_schema.TypeString, - Required: true, - }, - "projects": { - Type: data_source_google_projects_schema.TypeList, - Computed: true, - Elem: &data_source_google_projects_schema.Resource{ - Schema: map[string]*data_source_google_projects_schema.Schema{ - "project_id": { - Type: data_source_google_projects_schema.TypeString, - Computed: true, - }, - "create_time": { - Type: data_source_google_projects_schema.TypeString, - Computed: true, - }, - "labels": { - Type: data_source_google_projects_schema.TypeMap, - Computed: true, - Elem: &data_source_google_projects_schema.Schema{Type: data_source_google_projects_schema.TypeString}, - Description: `A set of key/value label pairs assigned on a project.`, - }, - "parent": { - Type: data_source_google_projects_schema.TypeMap, - Computed: true, - Elem: &data_source_google_projects_schema.Schema{Type: data_source_google_projects_schema.TypeString}, - Description: `An optional reference to a parent Resource.`, - }, - "number": { - Type: data_source_google_projects_schema.TypeString, - Computed: true, - Description: `The numeric identifier of the project.`, - }, - "lifecycle_state": { - Type: data_source_google_projects_schema.TypeString, - Computed: true, - Description: `The numeric identifier of the project.`, - }, - "name": { - Type: data_source_google_projects_schema.TypeString, - Computed: true, - Description: `The optional user-assigned display name of the Project.`, - }, - }, - }, - }, - }, - } -} - -func datasourceGoogleProjectsRead(d *data_source_google_projects_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - params := make(map[string]string) - projects := make([]map[string]interface{}, 0) - - for { - params["filter"] = d.Get("filter").(string) - url := "https://cloudresourcemanager.googleapis.com/v1/projects" - - url, err := addQueryParams(url, params) - if err != nil { - return err - } - - res, err := sendRequest(config, "GET", "", url, userAgent, nil) - if err != nil { - return data_source_google_projects_fmt.Errorf("Error retrieving projects: %s", err) - } - - pageProjects := flattenDatasourceGoogleProjectsList(res["projects"]) - projects = append(projects, pageProjects...) - - pToken, ok := res["nextPageToken"] - if ok && pToken != nil && pToken.(string) != "" { - params["pageToken"] = pToken.(string) - } else { - break - } - } - - if err := d.Set("projects", projects); err != nil { - return data_source_google_projects_fmt.Errorf("Error retrieving projects: %s", err) - } - - d.SetId(d.Get("filter").(string)) - - return nil -} - -func flattenDatasourceGoogleProjectsList(v interface{}) []map[string]interface{} { - if v == nil { - return make([]map[string]interface{}, 0) - } - - ls := v.([]interface{}) - projects := make([]map[string]interface{}, 0, len(ls)) - for _, raw := range ls { - p := raw.(map[string]interface{}) - - var mId, mNumber, mLabels, mLifecycleState, mName, mCreateTime, mParent interface{} - if pId, ok := p["projectId"]; ok { - mId = pId - } - if pNumber, ok := p["projectNumber"]; ok { - mNumber = pNumber - } - if pName, ok := p["name"]; ok { - mName = pName - } - if pLabels, ok := p["labels"]; ok { - mLabels = pLabels - } - if pLifecycleState, ok := p["lifecycleState"]; ok { - mLifecycleState = pLifecycleState - } - if pCreateTime, ok := p["createTime"]; ok { - mCreateTime = pCreateTime - } - if pParent, ok := p["parent"]; ok { - mParent = pParent - } - projects = append(projects, map[string]interface{}{ - "project_id": mId, - "number": mNumber, - "name": mName, - "labels": mLabels, - "lifecycle_state": mLifecycleState, - "create_time": mCreateTime, - "parent": mParent, - }) - } - - return projects -} - -func dataSourceGoogleServiceAccount() *data_source_google_service_account_schema.Resource { - return &data_source_google_service_account_schema.Resource{ - Read: dataSourceGoogleServiceAccountRead, - Schema: map[string]*data_source_google_service_account_schema.Schema{ - "account_id": { - Type: data_source_google_service_account_schema.TypeString, - Required: true, - }, - "project": { - Type: data_source_google_service_account_schema.TypeString, - Optional: true, - }, - "email": { - Type: data_source_google_service_account_schema.TypeString, - Computed: true, - }, - "unique_id": { - Type: data_source_google_service_account_schema.TypeString, - Computed: true, - }, - "name": { - Type: data_source_google_service_account_schema.TypeString, - Computed: true, - }, - "display_name": { - Type: data_source_google_service_account_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleServiceAccountRead(d *data_source_google_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - serviceAccountName, err := serviceAccountFQN(d.Get("account_id").(string), d, config) - if err != nil { - return err - } - - sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(serviceAccountName).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_service_account_fmt.Sprintf("Service Account %q", serviceAccountName)) - } - - d.SetId(sa.Name) - if err := d.Set("email", sa.Email); err != nil { - return data_source_google_service_account_fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("unique_id", sa.UniqueId); err != nil { - return data_source_google_service_account_fmt.Errorf("Error setting unique_id: %s", err) - } - if err := d.Set("project", sa.ProjectId); err != nil { - return data_source_google_service_account_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("account_id", data_source_google_service_account_strings.Split(sa.Email, "@")[0]); err != nil { - return data_source_google_service_account_fmt.Errorf("Error setting account_id: %s", err) - } - if err := d.Set("name", sa.Name); err != nil { - return data_source_google_service_account_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", sa.DisplayName); err != nil { - return data_source_google_service_account_fmt.Errorf("Error setting display_name: %s", err) - } - - return nil -} - -func dataSourceGoogleServiceAccountAccessToken() *data_source_google_service_account_access_token_schema.Resource { - - return &data_source_google_service_account_access_token_schema.Resource{ - Read: dataSourceGoogleServiceAccountAccessTokenRead, - Schema: map[string]*data_source_google_service_account_access_token_schema.Schema{ - "target_service_account": { - Type: data_source_google_service_account_access_token_schema.TypeString, - Required: true, - ValidateFunc: validateRegexp("(" + data_source_google_service_account_access_token_strings.Join(PossibleServiceAccountNames, "|") + ")"), - }, - "access_token": { - Type: data_source_google_service_account_access_token_schema.TypeString, - Sensitive: true, - Computed: true, - }, - "scopes": { - Type: data_source_google_service_account_access_token_schema.TypeSet, - Required: true, - Elem: &data_source_google_service_account_access_token_schema.Schema{ - Type: data_source_google_service_account_access_token_schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - }, - "delegates": { - Type: data_source_google_service_account_access_token_schema.TypeSet, - Optional: true, - Elem: &data_source_google_service_account_access_token_schema.Schema{ - Type: data_source_google_service_account_access_token_schema.TypeString, - ValidateFunc: validateRegexp(ServiceAccountLinkRegex), - }, - }, - "lifetime": { - Type: data_source_google_service_account_access_token_schema.TypeString, - Optional: true, - ValidateFunc: validateDuration(), - Default: "3600s", - }, - }, - } -} - -func dataSourceGoogleServiceAccountAccessTokenRead(d *data_source_google_service_account_access_token_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - data_source_google_service_account_access_token_log.Printf("[INFO] Acquire Service Account AccessToken for %s", d.Get("target_service_account").(string)) - - service := config.NewIamCredentialsClient(userAgent) - - name := data_source_google_service_account_access_token_fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) - tokenRequest := &data_source_google_service_account_access_token_iamcredentialsiamcredentials.GenerateAccessTokenRequest{ - Lifetime: d.Get("lifetime").(string), - Delegates: convertStringSet(d.Get("delegates").(*data_source_google_service_account_access_token_schema.Set)), - Scope: canonicalizeServiceScopes(convertStringSet(d.Get("scopes").(*data_source_google_service_account_access_token_schema.Set))), - } - at, err := service.Projects.ServiceAccounts.GenerateAccessToken(name, tokenRequest).Do() - if err != nil { - return err - } - - d.SetId(name) - if err := d.Set("access_token", at.AccessToken); err != nil { - return data_source_google_service_account_access_token_fmt.Errorf("Error setting access_token: %s", err) - } - - return nil -} - -const ( - userInfoScope = "https://www.googleapis.com/auth/userinfo.email" -) - -func dataSourceGoogleServiceAccountIdToken() *data_source_google_service_account_id_token_schema.Resource { - - return &data_source_google_service_account_id_token_schema.Resource{ - Read: dataSourceGoogleServiceAccountIdTokenRead, - Schema: map[string]*data_source_google_service_account_id_token_schema.Schema{ - "target_audience": { - Type: data_source_google_service_account_id_token_schema.TypeString, - Required: true, - }, - "target_service_account": { - Type: data_source_google_service_account_id_token_schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp("(" + data_source_google_service_account_id_token_strings.Join(PossibleServiceAccountNames, "|") + ")"), - }, - "delegates": { - Type: data_source_google_service_account_id_token_schema.TypeSet, - Optional: true, - Elem: &data_source_google_service_account_id_token_schema.Schema{ - Type: data_source_google_service_account_id_token_schema.TypeString, - ValidateFunc: validateRegexp(ServiceAccountLinkRegex), - }, - }, - "include_email": { - Type: data_source_google_service_account_id_token_schema.TypeBool, - Optional: true, - Default: false, - }, - - "id_token": { - Type: data_source_google_service_account_id_token_schema.TypeString, - Sensitive: true, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleServiceAccountIdTokenRead(d *data_source_google_service_account_id_token_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - targetAudience := d.Get("target_audience").(string) - creds, err := config.GetCredentials([]string{userInfoScope}, false) - if err != nil { - return data_source_google_service_account_id_token_fmt.Errorf("error calling getCredentials(): %v", err) - } - - if creds.JSON == nil { - - service := config.NewIamCredentialsClient(userAgent) - name := data_source_google_service_account_id_token_fmt.Sprintf("projects/-/serviceAccounts/%s", d.Get("target_service_account").(string)) - tokenRequest := &data_source_google_service_account_id_token_iamcredentialsiamcredentials.GenerateIdTokenRequest{ - Audience: targetAudience, - IncludeEmail: d.Get("include_email").(bool), - Delegates: convertStringSet(d.Get("delegates").(*data_source_google_service_account_id_token_schema.Set)), - } - at, err := service.Projects.ServiceAccounts.GenerateIdToken(name, tokenRequest).Do() - if err != nil { - return data_source_google_service_account_id_token_fmt.Errorf("error calling iamcredentials.GenerateIdToken: %v", err) - } - - d.SetId(d.Get("target_service_account").(string)) - if err := d.Set("id_token", at.Token); err != nil { - return data_source_google_service_account_id_token_fmt.Errorf("Error setting id_token: %s", err) - } - - return nil - } - - ctx := data_source_google_service_account_id_token_context.Background() - co := []data_source_google_service_account_id_token_option.ClientOption{} - if creds.JSON != nil { - co = append(co, data_source_google_service_account_id_token_idtoken.WithCredentialsJSON(creds.JSON)) - } - - idTokenSource, err := data_source_google_service_account_id_token_idtoken.NewTokenSource(ctx, targetAudience, co...) - if err != nil { - return data_source_google_service_account_id_token_fmt.Errorf("unable to retrieve TokenSource: %v", err) - } - idToken, err := idTokenSource.Token() - if err != nil { - return data_source_google_service_account_id_token_fmt.Errorf("unable to retrieve Token: %v", err) - } - - d.SetId(targetAudience) - if err := d.Set("id_token", idToken.AccessToken); err != nil { - return data_source_google_service_account_id_token_fmt.Errorf("Error setting id_token: %s", err) - } - - return nil -} - -func dataSourceGoogleServiceAccountKey() *data_source_google_service_account_key_schema.Resource { - return &data_source_google_service_account_key_schema.Resource{ - Read: dataSourceGoogleServiceAccountKeyRead, - - Schema: map[string]*data_source_google_service_account_key_schema.Schema{ - "name": { - Type: data_source_google_service_account_key_schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(ServiceAccountKeyNameRegex), - }, - "public_key_type": { - Type: data_source_google_service_account_key_schema.TypeString, - Default: "TYPE_X509_PEM_FILE", - Optional: true, - ValidateFunc: data_source_google_service_account_key_validation.StringInSlice([]string{"TYPE_NONE", "TYPE_X509_PEM_FILE", "TYPE_RAW_PUBLIC_KEY"}, false), - }, - "project": { - Type: data_source_google_service_account_key_schema.TypeString, - Optional: true, - }, - "key_algorithm": { - Type: data_source_google_service_account_key_schema.TypeString, - Computed: true, - }, - "public_key": { - Type: data_source_google_service_account_key_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleServiceAccountKeyRead(d *data_source_google_service_account_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - keyName := d.Get("name").(string) - - r := data_source_google_service_account_key_regexp.MustCompile(ServiceAccountKeyNameRegex) - if !r.MatchString(keyName) { - return data_source_google_service_account_key_fmt.Errorf("invalid key name %q does not match regexp %q", keyName, ServiceAccountKeyNameRegex) - } - - publicKeyType := d.Get("public_key_type").(string) - - sak, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Get(keyName).PublicKeyType(publicKeyType).Do() - if err != nil { - return handleNotFoundError(err, d, data_source_google_service_account_key_fmt.Sprintf("Service Account Key %q", keyName)) - } - - d.SetId(sak.Name) - - if err := d.Set("name", sak.Name); err != nil { - return data_source_google_service_account_key_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("key_algorithm", sak.KeyAlgorithm); err != nil { - return data_source_google_service_account_key_fmt.Errorf("Error setting key_algorithm: %s", err) - } - if err := d.Set("public_key", sak.PublicKeyData); err != nil { - return data_source_google_service_account_key_fmt.Errorf("Error setting public_key: %s", err) - } - - return nil -} - -func dataSourceGoogleServiceNetworkingPeeredDNSDomain() *data_source_google_service_networking_peered_dns_domain_schema.Resource { - return &data_source_google_service_networking_peered_dns_domain_schema.Resource{ - Read: resourceGoogleServiceNetworkingPeeredDNSDomainRead, - Schema: map[string]*data_source_google_service_networking_peered_dns_domain_schema.Schema{ - "project": { - Type: data_source_google_service_networking_peered_dns_domain_schema.TypeString, - Required: true, - }, - "name": { - Type: data_source_google_service_networking_peered_dns_domain_schema.TypeString, - Required: true, - }, - "network": { - Type: data_source_google_service_networking_peered_dns_domain_schema.TypeString, - Required: true, - }, - "service": { - Type: data_source_google_service_networking_peered_dns_domain_schema.TypeString, - Required: true, - }, - "dns_suffix": { - Type: data_source_google_service_networking_peered_dns_domain_schema.TypeString, - Computed: true, - }, - "parent": { - Type: data_source_google_service_networking_peered_dns_domain_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleSQLCaCerts() *data_source_google_sql_ca_certs_schema.Resource { - return &data_source_google_sql_ca_certs_schema.Resource{ - Read: dataSourceGoogleSQLCaCertsRead, - - Schema: map[string]*data_source_google_sql_ca_certs_schema.Schema{ - "instance": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "project": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "active_version": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Computed: true, - }, - "certs": { - Type: data_source_google_sql_ca_certs_schema.TypeList, - Elem: &data_source_google_sql_ca_certs_schema.Resource{ - Schema: map[string]*data_source_google_sql_ca_certs_schema.Schema{ - "cert": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Computed: true, - }, - "common_name": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Computed: true, - }, - "create_time": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Computed: true, - }, - "expiration_time": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Computed: true, - }, - "sha1_fingerprint": { - Type: data_source_google_sql_ca_certs_schema.TypeString, - Computed: true, - }, - }, - }, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleSQLCaCertsRead(d *data_source_google_sql_ca_certs_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - fv, err := parseProjectFieldValue("instances", d.Get("instance").(string), "project", d, config, false) - if err != nil { - return err - } - project := fv.Project - instance := fv.Name - - data_source_google_sql_ca_certs_log.Printf("[DEBUG] Fetching CA certs from instance %s", instance) - - response, err := config.NewSqlAdminClient(userAgent).Instances.ListServerCas(project, instance).Do() - if err != nil { - return data_source_google_sql_ca_certs_fmt.Errorf("error retrieving CA certs: %s", err) - } - - data_source_google_sql_ca_certs_log.Printf("[DEBUG] Fetched CA certs from instance %s", instance) - - if err := d.Set("project", project); err != nil { - return data_source_google_sql_ca_certs_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("certs", flattenServerCaCerts(response.Certs)); err != nil { - return data_source_google_sql_ca_certs_fmt.Errorf("Error setting certs: %s", err) - } - if err := d.Set("active_version", response.ActiveVersion); err != nil { - return data_source_google_sql_ca_certs_fmt.Errorf("Error setting active_version: %s", err) - } - d.SetId(data_source_google_sql_ca_certs_fmt.Sprintf("projects/%s/instance/%s", project, instance)) - - return nil -} - -func dataSourceGoogleStorageBucket() *data_source_google_storage_bucket_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceStorageBucket().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - return &data_source_google_storage_bucket_schema.Resource{ - Read: dataSourceGoogleStorageBucketRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleStorageBucketRead(d *data_source_google_storage_bucket_schema.ResourceData, meta interface{}) error { - - bucket := d.Get("name").(string) - d.SetId(bucket) - - return resourceStorageBucketRead(d, meta) -} - -func dataSourceGoogleStorageBucketObject() *data_source_google_storage_bucket_object_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceStorageBucketObject().Schema) - - addOptionalFieldsToSchema(dsSchema, "bucket") - addOptionalFieldsToSchema(dsSchema, "name") - - return &data_source_google_storage_bucket_object_schema.Resource{ - Read: dataSourceGoogleStorageBucketObjectRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleStorageBucketObjectRead(d *data_source_google_storage_bucket_object_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - if data_source_google_storage_bucket_object_strings.Contains(name, "/") { - name = data_source_google_storage_bucket_object_url.QueryEscape(name) - } - - data_source_google_storage_bucket_object_url := data_source_google_storage_bucket_object_fmt.Sprintf("https://www.googleapis.com/storage/v1/b/%s/o/%s", bucket, name) - - res, err := sendRequest(config, "GET", "", data_source_google_storage_bucket_object_url, userAgent, nil) - if err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error retrieving storage bucket object: %s", err) - } - - if err := d.Set("cache_control", res["cacheControl"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting cache_control: %s", err) - } - if err := d.Set("content_disposition", res["contentDisposition"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting content_disposition: %s", err) - } - if err := d.Set("content_encoding", res["contentEncoding"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting content_encoding: %s", err) - } - if err := d.Set("content_language", res["contentLanguage"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting content_language: %s", err) - } - if err := d.Set("content_type", res["contentType"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting content_type: %s", err) - } - if err := d.Set("crc32c", res["crc32c"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting crc32c: %s", err) - } - if err := d.Set("self_link", res["selfLink"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("storage_class", res["storageClass"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting storage_class: %s", err) - } - if err := d.Set("md5hash", res["md5Hash"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting md5hash: %s", err) - } - if err := d.Set("media_link", res["mediaLink"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting media_link: %s", err) - } - if err := d.Set("metadata", res["metadata"]); err != nil { - return data_source_google_storage_bucket_object_fmt.Errorf("Error setting metadata: %s", err) - } - - d.SetId(bucket + "-" + name) - - return nil -} - -func dataSourceGoogleStorageProjectServiceAccount() *data_source_google_storage_project_service_account_schema.Resource { - return &data_source_google_storage_project_service_account_schema.Resource{ - Read: dataSourceGoogleStorageProjectServiceAccountRead, - Schema: map[string]*data_source_google_storage_project_service_account_schema.Schema{ - "project": { - Type: data_source_google_storage_project_service_account_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "user_project": { - Type: data_source_google_storage_project_service_account_schema.TypeString, - Optional: true, - ForceNew: true, - }, - "email_address": { - Type: data_source_google_storage_project_service_account_schema.TypeString, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleStorageProjectServiceAccountRead(d *data_source_google_storage_project_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - serviceAccountGetRequest := config.NewStorageClient(userAgent).Projects.ServiceAccount.Get(project) - - if v, ok := d.GetOk("user_project"); ok { - serviceAccountGetRequest = serviceAccountGetRequest.UserProject(v.(string)) - } - - serviceAccount, err := serviceAccountGetRequest.Do() - if err != nil { - return handleNotFoundError(err, d, "GCS service account not found") - } - - if err := d.Set("project", project); err != nil { - return data_source_google_storage_project_service_account_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("email_address", serviceAccount.EmailAddress); err != nil { - return data_source_google_storage_project_service_account_fmt.Errorf("Error setting email_address: %s", err) - } - - d.SetId(serviceAccount.EmailAddress) - - return nil -} - -func dataSourceGoogleStorageTransferProjectServiceAccount() *data_source_google_storage_transfer_project_service_account_schema.Resource { - return &data_source_google_storage_transfer_project_service_account_schema.Resource{ - Read: dataSourceGoogleStorageTransferProjectServiceAccountRead, - Schema: map[string]*data_source_google_storage_transfer_project_service_account_schema.Schema{ - "email": { - Type: data_source_google_storage_transfer_project_service_account_schema.TypeString, - Computed: true, - }, - "project": { - Type: data_source_google_storage_transfer_project_service_account_schema.TypeString, - Optional: true, - Computed: true, - }, - }, - } -} - -func dataSourceGoogleStorageTransferProjectServiceAccountRead(d *data_source_google_storage_transfer_project_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - serviceAccount, err := config.NewStorageTransferClient(userAgent).GoogleServiceAccounts.Get(project).Do() - if err != nil { - return handleNotFoundError(err, d, "Google Cloud Storage Transfer service account not found") - } - - d.SetId(serviceAccount.AccountEmail) - if err := d.Set("email", serviceAccount.AccountEmail); err != nil { - return data_source_google_storage_transfer_project_service_account_fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_google_storage_transfer_project_service_account_fmt.Errorf("Error setting project: %s", err) - } - return nil -} - -func dataSourceGoogleIapClient() *data_source_iap_client_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceIapClient().Schema) - addRequiredFieldsToSchema(dsSchema, "brand", "client_id") - - return &data_source_iap_client_schema.Resource{ - Read: dataSourceGoogleIapClientRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleIapClientRead(d *data_source_iap_client_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return data_source_iap_client_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceIapClientRead(d, meta) -} - -func dataSourceMonitoringIstioCanonicalService() *data_source_monitoring_istio_canonical_service_schema.Resource { - csSchema := map[string]*data_source_monitoring_istio_canonical_service_schema.Schema{ - "mesh_uid": { - Type: data_source_monitoring_istio_canonical_service_schema.TypeString, - Required: true, - Description: `Identifier for the Istio mesh in which this canonical service is defined. - Corresponds to the meshUid metric label in Istio metrics.`, - }, - "canonical_service_namespace": { - Type: data_source_monitoring_istio_canonical_service_schema.TypeString, - Required: true, - Description: `The namespace of the canonical service underlying this service. - Corresponds to the destination_service_namespace metric label in Istio metrics.`, - }, - "canonical_service": { - Type: data_source_monitoring_istio_canonical_service_schema.TypeString, - Required: true, - Description: `The name of the canonical service underlying this service.. - Corresponds to the destination_service_name metric label in Istio metrics.`, - }, - } - t := `istio_canonical_service.mesh_uid="{{mesh_uid}}" AND - istio_canonical_service.canonical_service="{{canonical_service}}" AND - istio_canonical_service.canonical_service_namespace="{{canonical_service_namespace}}"` - return dataSourceMonitoringServiceType(csSchema, t, dataSourceMonitoringIstioCanonicalServiceRead) -} - -func dataSourceMonitoringIstioCanonicalServiceRead(res map[string]interface{}, d *data_source_monitoring_istio_canonical_service_schema.ResourceData, meta interface{}) error { - var istioCanonicalService map[string]interface{} - if v, ok := res["istio_canonical_service"]; ok { - istioCanonicalService = v.(map[string]interface{}) - } - if len(istioCanonicalService) == 0 { - return nil - } - if err := d.Set("canonical_service", istioCanonicalService["canonical_service"]); err != nil { - return err - } - if err := d.Set("canonical_service_namespace", istioCanonicalService["canonical_service_namespace"]); err != nil { - return err - } - if err := d.Set("mesh_name", istioCanonicalService["mesh_name"]); err != nil { - return err - } - return nil -} - -func dataSourceMonitoringNotificationChannel() *data_source_monitoring_notification_channel_schema.Resource { - dsSchema := datasourceSchemaFromResourceSchema(resourceMonitoringNotificationChannel().Schema) - - addOptionalFieldsToSchema(dsSchema, "display_name") - addOptionalFieldsToSchema(dsSchema, "project") - addOptionalFieldsToSchema(dsSchema, "type") - addOptionalFieldsToSchema(dsSchema, "labels") - addOptionalFieldsToSchema(dsSchema, "user_labels") - - return &data_source_monitoring_notification_channel_schema.Resource{ - Read: dataSourceMonitoringNotificationChannelRead, - Schema: dsSchema, - } -} - -func dataSourceMonitoringNotificationChannelRead(d *data_source_monitoring_notification_channel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/notificationChannels") - if err != nil { - return err - } - - displayName := d.Get("display_name").(string) - channelType := d.Get("type").(string) - - if displayName == "" && channelType == "" { - return data_source_monitoring_notification_channel_fmt.Errorf("At least one of display_name or type must be provided") - } - - labels, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } - - userLabels, err := expandMonitoringNotificationChannelLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } - - filters := make([]string, 0, len(labels)+2) - - if displayName != "" { - filters = append(filters, data_source_monitoring_notification_channel_fmt.Sprintf(`display_name="%s"`, displayName)) - } - - if channelType != "" { - filters = append(filters, data_source_monitoring_notification_channel_fmt.Sprintf(`type="%s"`, channelType)) - } - - for k, v := range labels { - filters = append(filters, data_source_monitoring_notification_channel_fmt.Sprintf(`labels.%s="%s"`, k, v)) - } - - for k, v := range userLabels { - filters = append(filters, data_source_monitoring_notification_channel_fmt.Sprintf(`user_labels.%s="%s"`, k, v)) - } - - filter := data_source_monitoring_notification_channel_strings.Join(filters, " AND ") - params := map[string]string{ - "filter": filter, - } - url, err = addQueryParams(url, params) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - response, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return data_source_monitoring_notification_channel_fmt.Errorf("Error retrieving NotificationChannels: %s", err) - } - - var channels []interface{} - if v, ok := response["notificationChannels"]; ok { - channels = v.([]interface{}) - } - if len(channels) == 0 { - return data_source_monitoring_notification_channel_fmt.Errorf("No NotificationChannel found using filter: %s", filter) - } - if len(channels) > 1 { - return data_source_monitoring_notification_channel_fmt.Errorf("Found more than one 1 NotificationChannel matching specified filter: %s", filter) - } - res := channels[0].(map[string]interface{}) - - name := flattenMonitoringNotificationChannelName(res["name"], d, config).(string) - if err := d.Set("name", name); err != nil { - return data_source_monitoring_notification_channel_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - return resourceMonitoringNotificationChannelRead(d, meta) -} - -type monitoringServiceTypeStateSetter func(map[string]interface{}, *data_source_monitoring_service_schema.ResourceData, interface{}) error - -func dataSourceMonitoringServiceType( - typeSchema map[string]*data_source_monitoring_service_schema.Schema, - listFilter string, - typeStateSetter monitoringServiceTypeStateSetter) *data_source_monitoring_service_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceMonitoringService().Schema) - addOptionalFieldsToSchema(dsSchema, "project") - - dsSchema = mergeSchemas(typeSchema, dsSchema) - - return &data_source_monitoring_service_schema.Resource{ - Read: dataSourceMonitoringServiceTypeReadFromList(listFilter, typeStateSetter), - Schema: dsSchema, - } -} - -func dataSourceMonitoringServiceTypeReadFromList(listFilter string, typeStateSetter monitoringServiceTypeStateSetter) data_source_monitoring_service_schema.ReadFunc { - return func(d *data_source_monitoring_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - filters, err := replaceVars(d, config, listFilter) - if err != nil { - return err - } - - listUrlTmpl := "{{MonitoringBasePath}}v3/projects/{{project}}/services?filter=" + data_source_monitoring_service_urlneturl.QueryEscape(filters) - url, err := replaceVars(d, config, listUrlTmpl) - if err != nil { - return err - } - - resp, err := sendRequest(config, "GET", project, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return data_source_monitoring_service_fmt.Errorf("unable to list Monitoring Service for data source: %v", err) - } - - v, ok := resp["services"] - if !ok || v == nil { - return data_source_monitoring_service_fmt.Errorf("no Monitoring Services found for data source") - } - ls, ok := v.([]interface{}) - if !ok { - return data_source_monitoring_service_fmt.Errorf("no Monitoring Services found for data source") - } - if len(ls) == 0 { - return data_source_monitoring_service_fmt.Errorf("no Monitoring Services found for data source") - } - if len(ls) > 1 { - return data_source_monitoring_service_fmt.Errorf("more than one Monitoring Services with given identifier found") - } - res := ls[0].(map[string]interface{}) - - if err := d.Set("project", project); err != nil { - return data_source_monitoring_service_fmt.Errorf("Error setting Service: %s", err) - } - if err := d.Set("display_name", flattenMonitoringServiceDisplayName(res["displayName"], d, config)); err != nil { - return data_source_monitoring_service_fmt.Errorf("Error setting Service: %s", err) - } - if err := d.Set("telemetry", flattenMonitoringServiceTelemetry(res["telemetry"], d, config)); err != nil { - return data_source_monitoring_service_fmt.Errorf("Error setting Service: %s", err) - } - if err := d.Set("service_id", flattenMonitoringServiceServiceId(res["name"], d, config)); err != nil { - return data_source_monitoring_service_fmt.Errorf("Error setting Service: %s", err) - } - if err := typeStateSetter(res, d, config); err != nil { - return data_source_monitoring_service_fmt.Errorf("Error reading Service: %s", err) - } - - name := flattenMonitoringServiceName(res["name"], d, config).(string) - if err := d.Set("name", name); err != nil { - return data_source_monitoring_service_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - return nil - } -} - -func dataSourceMonitoringServiceAppEngine() *data_source_monitoring_service_app_engine_schema.Resource { - aeSchema := map[string]*data_source_monitoring_service_app_engine_schema.Schema{ - "module_id": { - Type: data_source_monitoring_service_app_engine_schema.TypeString, - Required: true, - Description: `The ID of the App Engine module underlying this service. -Corresponds to the 'moduleId' resource label for a 'gae_app' -monitored resource(see https://cloud.google.com/monitoring/api/resources#tag_gae_app)`, - }, - } - filter := `app_engine.module_id="{{module_id}}"` - return dataSourceMonitoringServiceType(aeSchema, filter, dataSourceMonitoringServiceAppEngineRead) -} - -func dataSourceMonitoringServiceAppEngineRead(res map[string]interface{}, d *data_source_monitoring_service_app_engine_schema.ResourceData, meta interface{}) error { - var appEngine map[string]interface{} - if v, ok := res["app_engine"]; ok { - appEngine = v.(map[string]interface{}) - } - if len(appEngine) == 0 { - return nil - } - - if err := d.Set("module_id", appEngine["module_id"]); err != nil { - return err - } - return nil -} - -func dataSourceMonitoringServiceClusterIstio() *data_source_monitoring_service_cluster_istio_schema.Resource { - ciSchema := map[string]*data_source_monitoring_service_cluster_istio_schema.Schema{ - "location": { - Type: data_source_monitoring_service_cluster_istio_schema.TypeString, - Required: true, - Description: `The location of the Kubernetes cluster in which this Istio service is defined. - Corresponds to the location resource label in k8s_cluster resources.`, - }, - "cluster_name": { - Type: data_source_monitoring_service_cluster_istio_schema.TypeString, - Required: true, - Description: `The name of the Kubernetes cluster in which this Istio service is defined. - Corresponds to the clusterName resource label in k8s_cluster resources.`, - }, - "service_namespace": { - Type: data_source_monitoring_service_cluster_istio_schema.TypeString, - Required: true, - Description: `The namespace of the Istio service underlying this service. - Corresponds to the destination_service_namespace metric label in Istio metrics.`, - }, - "service_name": { - Type: data_source_monitoring_service_cluster_istio_schema.TypeString, - Required: true, - Description: `The name of the Istio service underlying this service. - Corresponds to the destination_service_name metric label in Istio metrics.`, - }, - } - filter := `cluster_istio.cluster_name="{{cluster_name}}" AND - cluster_istio.service_namespace="{{service_namespace}}" AND - cluster_istio.service_name="{{service_name}}" AND - cluster_istio.location="{{location}}"` - return dataSourceMonitoringServiceType(ciSchema, filter, dataSourceMonitoringServiceClusterIstioRead) -} - -func dataSourceMonitoringServiceClusterIstioRead(res map[string]interface{}, d *data_source_monitoring_service_cluster_istio_schema.ResourceData, meta interface{}) error { - var clusterIstio map[string]interface{} - if v, ok := res["cluster_istio"]; ok { - clusterIstio = v.(map[string]interface{}) - } - if len(clusterIstio) == 0 { - return nil - } - - if err := d.Set("location", clusterIstio["location"]); err != nil { - return err - } - if err := d.Set("service_name", clusterIstio["service_name"]); err != nil { - return err - } - if err := d.Set("service_namespace", clusterIstio["service_namespace"]); err != nil { - return err - } - if err := d.Set("cluster_name", clusterIstio["cluster_name"]); err != nil { - return err - } - return nil -} - -func dataSourceMonitoringServiceMeshIstio() *data_source_monitoring_service_mesh_istio_schema.Resource { - miSchema := map[string]*data_source_monitoring_service_mesh_istio_schema.Schema{ - "mesh_uid": { - Type: data_source_monitoring_service_mesh_istio_schema.TypeString, - Required: true, - Description: `Identifier for the mesh in which this Istio service is defined. - Corresponds to the meshUid metric label in Istio metrics.`, - }, - "service_namespace": { - Type: data_source_monitoring_service_mesh_istio_schema.TypeString, - Required: true, - Description: `The namespace of the Istio service underlying this service. - Corresponds to the destination_service_namespace metric label in Istio metrics.`, - }, - "service_name": { - Type: data_source_monitoring_service_mesh_istio_schema.TypeString, - Required: true, - Description: `The name of the Istio service underlying this service. - Corresponds to the destination_service_name metric label in Istio metrics.`, - }, - } - t := `mesh_istio.mesh_uid="{{mesh_uid}}" AND - mesh_istio.service_name="{{service_name}}" AND - mesh_istio.service_namespace="{{service_namespace}}"` - return dataSourceMonitoringServiceType(miSchema, t, dataSourceMonitoringServiceMeshIstioRead) -} - -func dataSourceMonitoringServiceMeshIstioRead(res map[string]interface{}, d *data_source_monitoring_service_mesh_istio_schema.ResourceData, meta interface{}) error { - var meshIstio map[string]interface{} - if v, ok := res["mesh_istio"]; ok { - meshIstio = v.(map[string]interface{}) - } - if len(meshIstio) == 0 { - return nil - } - if err := d.Set("service_name", meshIstio["service_name"]); err != nil { - return err - } - if err := d.Set("service_namespace", meshIstio["service_namespace"]); err != nil { - return err - } - if err := d.Set("mesh_name", meshIstio["mesh_name"]); err != nil { - return err - } - return nil -} - -func dataSourceGooglePubsubTopic() *data_source_pubsub_topic_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourcePubsubTopic().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_pubsub_topic_schema.Resource{ - Read: dataSourceGooglePubsubTopicRead, - Schema: dsSchema, - } -} - -func dataSourceGooglePubsubTopicRead(d *data_source_pubsub_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/topics/{{name}}") - if err != nil { - return data_source_pubsub_topic_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourcePubsubTopicRead(d, meta) -} - -func dataSourceGoogleRedisInstance() *data_source_redis_instance_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceRedisInstance().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - - addOptionalFieldsToSchema(dsSchema, "project", "region") - - return &data_source_redis_instance_schema.Resource{ - Read: dataSourceGoogleRedisInstanceRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleRedisInstanceRead(d *data_source_redis_instance_schema.ResourceData, meta interface{}) error { - id, err := replaceVars(d, meta.(*Config), "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - d.SetId(id) - - return resourceRedisInstanceRead(d, meta) -} - -func dataSourceSecretManagerSecret() *data_source_secret_manager_secret_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceSecretManagerSecret().Schema) - addRequiredFieldsToSchema(dsSchema, "secret_id") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_secret_manager_secret_schema.Resource{ - Read: dataSourceSecretManagerSecretRead, - Schema: dsSchema, - } -} - -func dataSourceSecretManagerSecretRead(d *data_source_secret_manager_secret_schema.ResourceData, meta interface{}) error { - id, err := replaceVars(d, meta.(*Config), "projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return data_source_secret_manager_secret_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return resourceSecretManagerSecretRead(d, meta) -} - -func dataSourceSecretManagerSecretVersion() *data_source_secret_manager_secret_version_schema.Resource { - return &data_source_secret_manager_secret_version_schema.Resource{ - Read: dataSourceSecretManagerSecretVersionRead, - Schema: map[string]*data_source_secret_manager_secret_version_schema.Schema{ - "project": { - Type: data_source_secret_manager_secret_version_schema.TypeString, - Optional: true, - Computed: true, - }, - "secret": { - Type: data_source_secret_manager_secret_version_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "version": { - Type: data_source_secret_manager_secret_version_schema.TypeString, - Optional: true, - Computed: true, - }, - "create_time": { - Type: data_source_secret_manager_secret_version_schema.TypeString, - Computed: true, - }, - "destroy_time": { - Type: data_source_secret_manager_secret_version_schema.TypeString, - Computed: true, - }, - "name": { - Type: data_source_secret_manager_secret_version_schema.TypeString, - Computed: true, - }, - "enabled": { - Type: data_source_secret_manager_secret_version_schema.TypeBool, - Computed: true, - }, - "secret_data": { - Type: data_source_secret_manager_secret_version_schema.TypeString, - Computed: true, - Sensitive: true, - }, - }, - } -} - -func dataSourceSecretManagerSecretVersionRead(d *data_source_secret_manager_secret_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - fv, err := parseProjectFieldValue("secrets", d.Get("secret").(string), "project", d, config, false) - if err != nil { - return err - } - if d.Get("project").(string) != "" && d.Get("project").(string) != fv.Project { - return data_source_secret_manager_secret_version_fmt.Errorf("The project set on this secret version (%s) is not equal to the project where this secret exists (%s).", d.Get("project").(string), fv.Project) - } - project := fv.Project - if err := d.Set("project", project); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("secret", fv.Name); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting secret: %s", err) - } - - var url string - versionNum := d.Get("version") - - if versionNum != "" { - url, err = replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/{{version}}") - if err != nil { - return err - } - } else { - url, err = replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret}}/versions/latest") - if err != nil { - return err - } - } - - var version map[string]interface{} - version, err = sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error retrieving available secret manager secret versions: %s", err.Error()) - } - - secretVersionRegex := data_source_secret_manager_secret_version_regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") - - parts := secretVersionRegex.FindStringSubmatch(version["name"].(string)) - - if len(parts) != 4 { - panic(data_source_secret_manager_secret_version_fmt.Sprintf("secret name, %s, does not match format, projects/{{project}}/secrets/{{secret}}/versions/{{version}}", version["name"].(string))) - } - - data_source_secret_manager_secret_version_log.Printf("[DEBUG] Received Google SecretManager Version: %q", version) - - if err := d.Set("version", parts[3]); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting version: %s", err) - } - - url = data_source_secret_manager_secret_version_fmt.Sprintf("%s:access", url) - resp, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error retrieving available secret manager secret version access: %s", err.Error()) - } - - if err := d.Set("create_time", version["createTime"].(string)); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting create_time: %s", err) - } - if version["destroyTime"] != nil { - if err := d.Set("destroy_time", version["destroyTime"].(string)); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting destroy_time: %s", err) - } - } - if err := d.Set("name", version["name"].(string)); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("enabled", true); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting enabled: %s", err) - } - - data := resp["payload"].(map[string]interface{}) - secretData, err := data_source_secret_manager_secret_version_base64.StdEncoding.DecodeString(data["data"].(string)) - if err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error decoding secret manager secret version data: %s", err.Error()) - } - if err := d.Set("secret_data", string(secretData)); err != nil { - return data_source_secret_manager_secret_version_fmt.Errorf("Error setting secret_data: %s", err) - } - - d.SetId(version["name"].(string)) - return nil -} - -func dataSourceGoogleSourceRepoRepository() *data_source_sourcerepo_repository_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceSourceRepoRepository().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_sourcerepo_repository_schema.Resource{ - Read: dataSourceGoogleSourceRepoRepositoryRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleSourceRepoRepositoryRead(d *data_source_sourcerepo_repository_schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") - if err != nil { - return data_source_sourcerepo_repository_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceSourceRepoRepositoryRead(d, meta) -} - -func dataSourceSpannerInstance() *data_source_spanner_instance_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceSpannerInstance().Schema) - - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "config") - addOptionalFieldsToSchema(dsSchema, "display_name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_spanner_instance_schema.Resource{ - Read: dataSourceSpannerInstanceRead, - Schema: dsSchema, - } -} - -func dataSourceSpannerInstanceRead(d *data_source_spanner_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return data_source_spanner_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceSpannerInstanceRead(d, meta) -} - -func dataSourceSqlBackupRun() *data_source_sql_backup_run_schema.Resource { - - return &data_source_sql_backup_run_schema.Resource{ - Read: dataSourceSqlBackupRunRead, - - Schema: map[string]*data_source_sql_backup_run_schema.Schema{ - "backup_id": { - Type: data_source_sql_backup_run_schema.TypeInt, - Optional: true, - Computed: true, - Description: `The identifier for this backup run. Unique only for a specific Cloud SQL instance. If left empty and multiple backups exist for the instance, most_recent must be set to true.`, - }, - "instance": { - Type: data_source_sql_backup_run_schema.TypeString, - Required: true, - Description: `Name of the database instance.`, - }, - "location": { - Type: data_source_sql_backup_run_schema.TypeString, - Computed: true, - Description: `Location of the backups.`, - }, - "start_time": { - Type: data_source_sql_backup_run_schema.TypeString, - Computed: true, - Description: `The time the backup operation actually started in UTC timezone in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.`, - }, - "status": { - Type: data_source_sql_backup_run_schema.TypeString, - Computed: true, - Description: `The status of this run.`, - }, - "most_recent": { - Type: data_source_sql_backup_run_schema.TypeBool, - Optional: true, - Description: `Toggles use of the most recent backup run if multiple backups exist for a Cloud SQL instance.`, - }, - }, - } -} - -func dataSourceSqlBackupRunRead(d *data_source_sql_backup_run_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - project, err := getProject(d, config) - if err != nil { - return err - } - - instance := d.Get("instance").(string) - - var backup *data_source_sql_backup_run_sqladminsqladmin.BackupRun - if backupId, ok := d.GetOk("backup_id"); ok { - backup, err = config.NewSqlAdminClient(userAgent).BackupRuns.Get(project, instance, int64(backupId.(int))).Do() - if err != nil { - return err - } - } else { - res, err := config.NewSqlAdminClient(userAgent).BackupRuns.List(project, instance).Do() - if err != nil { - return err - } - backupsList := res.Items - if len(backupsList) == 0 { - return data_source_sql_backup_run_fmt.Errorf("No backups found for SQL Database Instance %s", instance) - } else if len(backupsList) > 1 { - mostRecent := d.Get("most_recent").(bool) - if !mostRecent { - return data_source_sql_backup_run_fmt.Errorf("Multiple SQL backup runs listed for Instance %s. Consider setting most_recent or specifying a backup_id", instance) - } - } - backup = backupsList[0] - } - - if err := d.Set("backup_id", backup.Id); err != nil { - return data_source_sql_backup_run_fmt.Errorf("Error setting backup_id: %s", err) - } - if err := d.Set("location", backup.Location); err != nil { - return data_source_sql_backup_run_fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("start_time", backup.StartTime); err != nil { - return data_source_sql_backup_run_fmt.Errorf("Error setting start_time: %s", err) - } - if err := d.Set("status", backup.Status); err != nil { - return data_source_sql_backup_run_fmt.Errorf("Error setting status: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/backupRuns/{{backup_id}}") - if err != nil { - return data_source_sql_backup_run_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - return nil -} - -func dataSourceSqlDatabaseInstance() *data_source_sql_database_instance_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceSqlDatabaseInstance().Schema) - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "project") - - return &data_source_sql_database_instance_schema.Resource{ - Read: dataSourceSqlDatabaseInstanceRead, - Schema: dsSchema, - } -} - -func dataSourceSqlDatabaseInstanceRead(d *data_source_sql_database_instance_schema.ResourceData, meta interface{}) error { - - return resourceSqlDatabaseInstanceRead(d, meta) - -} - -func dataSourceGoogleStorageBucketObjectContent() *data_source_storage_bucket_object_content_schema.Resource { - - dsSchema := datasourceSchemaFromResourceSchema(resourceStorageBucketObject().Schema) - - addRequiredFieldsToSchema(dsSchema, "bucket") - addRequiredFieldsToSchema(dsSchema, "name") - addOptionalFieldsToSchema(dsSchema, "content") - - return &data_source_storage_bucket_object_content_schema.Resource{ - Read: dataSourceGoogleStorageBucketObjectContentRead, - Schema: dsSchema, - } -} - -func dataSourceGoogleStorageBucketObjectContentRead(d *data_source_storage_bucket_object_content_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - objectsService := data_source_storage_bucket_object_content_storage.NewObjectsService(config.NewStorageClient(userAgent)) - getCall := objectsService.Get(bucket, name) - - res, err := getCall.Download() - if err != nil { - return data_source_storage_bucket_object_content_fmt.Errorf("Error downloading storage bucket object: %s", err) - } - - defer res.Body.Close() - var bodyString string - - if res.StatusCode == data_source_storage_bucket_object_content_http.StatusOK { - bodyBytes, err := data_source_storage_bucket_object_content_ioutil.ReadAll(res.Body) - if err != nil { - return data_source_storage_bucket_object_content_fmt.Errorf("Error reading all from res.Body: %s", err) - } - bodyString = string(bodyBytes) - } - - if err := d.Set("content", bodyString); err != nil { - return data_source_storage_bucket_object_content_fmt.Errorf("Error setting content: %s", err) - } - - d.SetId(bucket + "-" + name) - return nil -} - -const gcsBaseUrl = "https://storage.googleapis.com" - -const googleCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" - -func dataSourceGoogleSignedUrl() *data_source_storage_object_signed_url_schema.Resource { - return &data_source_storage_object_signed_url_schema.Resource{ - Read: dataSourceGoogleSignedUrlRead, - - Schema: map[string]*data_source_storage_object_signed_url_schema.Schema{ - "bucket": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Required: true, - }, - "content_md5": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Optional: true, - Default: "", - }, - "content_type": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Optional: true, - Default: "", - }, - "credentials": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Sensitive: true, - Optional: true, - }, - "duration": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Optional: true, - Default: "1h", - }, - "extension_headers": { - Type: data_source_storage_object_signed_url_schema.TypeMap, - Optional: true, - Elem: &data_source_storage_object_signed_url_schema.Schema{Type: data_source_storage_object_signed_url_schema.TypeString}, - ValidateFunc: validateExtensionHeaders, - }, - "http_method": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Optional: true, - Default: "GET", - ValidateFunc: data_source_storage_object_signed_url_validation.StringInSlice([]string{"GET", "HEAD", "PUT", "DELETE"}, true), - }, - "path": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Required: true, - }, - "signed_url": { - Type: data_source_storage_object_signed_url_schema.TypeString, - Computed: true, - }, - }, - } -} - -func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []error) { - hdrMap := v.(map[string]interface{}) - for k := range hdrMap { - if !data_source_storage_object_signed_url_strings.HasPrefix(data_source_storage_object_signed_url_strings.ToLower(k), "x-goog-") { - errors = append(errors, data_source_storage_object_signed_url_fmt.Errorf( - "extension_header (%s) not valid, header name must begin with 'x-goog-'", k)) - } - } - return -} - -func dataSourceGoogleSignedUrlRead(d *data_source_storage_object_signed_url_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - urlData := &UrlData{} - - if method, ok := d.GetOk("http_method"); ok { - urlData.HttpMethod = method.(string) - } - - durationString := "1h" - if v, ok := d.GetOk("duration"); ok { - durationString = v.(string) - } - duration, err := data_source_storage_object_signed_url_time.ParseDuration(durationString) - if err != nil { - return data_source_storage_object_signed_url_errwrap.Wrapf("could not parse duration: {{err}}", err) - } - expires := data_source_storage_object_signed_url_time.Now().Unix() + int64(duration.Seconds()) - urlData.Expires = int(expires) - - if v, ok := d.GetOk("content_md5"); ok { - urlData.ContentMd5 = v.(string) - } - - if v, ok := d.GetOk("content_type"); ok { - urlData.ContentType = v.(string) - } - - if v, ok := d.GetOk("extension_headers"); ok { - hdrMap := v.(map[string]interface{}) - - if len(hdrMap) > 0 { - urlData.HttpHeaders = make(map[string]string, len(hdrMap)) - for k, v := range hdrMap { - urlData.HttpHeaders[k] = v.(string) - } - } - } - - urlData.Path = data_source_storage_object_signed_url_fmt.Sprintf("/%s/%s", d.Get("bucket").(string), d.Get("path").(string)) - - jwtConfig, err := loadJwtConfig(d, config) - if err != nil { - return err - } - urlData.JwtConfig = jwtConfig - - signedUrl, err := urlData.SignedUrl() - if err != nil { - return err - } - - if err := d.Set("signed_url", signedUrl); err != nil { - return data_source_storage_object_signed_url_fmt.Errorf("Error setting signed_url: %s", err) - } - - encodedSig, err := urlData.EncodedSignature() - if err != nil { - return err - } - d.SetId(encodedSig) - - return nil -} - -func loadJwtConfig(d *data_source_storage_object_signed_url_schema.ResourceData, meta interface{}) (*data_source_storage_object_signed_url_jwt.Config, error) { - config := meta.(*Config) - - credentials := "" - if v, ok := d.GetOk("credentials"); ok { - data_source_storage_object_signed_url_log.Println("[DEBUG] using data source credentials to sign URL") - credentials = v.(string) - - } else if config.Credentials != "" { - data_source_storage_object_signed_url_log.Println("[DEBUG] using provider credentials to sign URL") - credentials = config.Credentials - - } else if filename := data_source_storage_object_signed_url_os.Getenv(googleCredentialsEnvVar); filename != "" { - data_source_storage_object_signed_url_log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials to sign URL") - credentials = filename - - } - - if data_source_storage_object_signed_url_strings.TrimSpace(credentials) != "" { - contents, _, err := pathOrContents(credentials) - if err != nil { - return nil, data_source_storage_object_signed_url_errwrap.Wrapf("Error loading credentials: {{err}}", err) - } - - cfg, err := data_source_storage_object_signed_url_google.JWTConfigFromJSON([]byte(contents), "") - if err != nil { - return nil, data_source_storage_object_signed_url_errwrap.Wrapf("Error parsing credentials: {{err}}", err) - } - return cfg, nil - } - - return nil, data_source_storage_object_signed_url_errors.New("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") -} - -func parsePrivateKey(key []byte) (*data_source_storage_object_signed_url_rsa.PrivateKey, error) { - block, _ := data_source_storage_object_signed_url_pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := data_source_storage_object_signed_url_x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = data_source_storage_object_signed_url_x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, data_source_storage_object_signed_url_errwrap.Wrapf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: {{err}}", err) - } - } - parsed, ok := parsedKey.(*data_source_storage_object_signed_url_rsa.PrivateKey) - if !ok { - return nil, data_source_storage_object_signed_url_errors.New("private key is invalid") - } - return parsed, nil -} - -type UrlData struct { - JwtConfig *data_source_storage_object_signed_url_jwt.Config - ContentMd5 string - ContentType string - HttpMethod string - Expires int - HttpHeaders map[string]string - Path string -} - -func (u *UrlData) SigningString() []byte { - var buf data_source_storage_object_signed_url_bytes.Buffer - - buf.WriteString(u.HttpMethod) - buf.WriteString("\n") - - buf.WriteString(u.ContentMd5) - buf.WriteString("\n") - - buf.WriteString(u.ContentType) - buf.WriteString("\n") - - buf.WriteString(data_source_storage_object_signed_url_strconv.Itoa(u.Expires)) - buf.WriteString("\n") - - var keys []string - for k := range u.HttpHeaders { - keys = append(keys, data_source_storage_object_signed_url_strings.ToLower(k)) - } - data_source_storage_object_signed_url_sort.Strings(keys) - - for _, k := range keys { - buf.WriteString(data_source_storage_object_signed_url_fmt.Sprintf("%s:%s\n", k, u.HttpHeaders[k])) - } - - buf.WriteString(u.Path) - - return buf.Bytes() -} - -func (u *UrlData) Signature() ([]byte, error) { - - signature, err := SignString(u.SigningString(), u.JwtConfig) - if err != nil { - return nil, err - - } - - return signature, nil -} - -func (u *UrlData) EncodedSignature() (string, error) { - signature, err := u.Signature() - if err != nil { - return "", err - } - - encoded := data_source_storage_object_signed_url_base64.StdEncoding.EncodeToString(signature) - - encoded = data_source_storage_object_signed_url_url.QueryEscape(encoded) - - return encoded, nil -} - -func (u *UrlData) SignedUrl() (string, error) { - - encodedSig, err := u.EncodedSignature() - if err != nil { - return "", err - } - - var urlBuffer data_source_storage_object_signed_url_bytes.Buffer - urlBuffer.WriteString(gcsBaseUrl) - urlBuffer.WriteString(u.Path) - urlBuffer.WriteString("?GoogleAccessId=") - urlBuffer.WriteString(u.JwtConfig.Email) - urlBuffer.WriteString("&Expires=") - urlBuffer.WriteString(data_source_storage_object_signed_url_strconv.Itoa(u.Expires)) - urlBuffer.WriteString("&Signature=") - urlBuffer.WriteString(encodedSig) - - return urlBuffer.String(), nil -} - -func SignString(toSign []byte, cfg *data_source_storage_object_signed_url_jwt.Config) ([]byte, error) { - - pk, err := parsePrivateKey(cfg.PrivateKey) - if err != nil { - return nil, data_source_storage_object_signed_url_errwrap.Wrapf("failed to sign string, could not parse key: {{err}}", err) - } - - hasher := data_source_storage_object_signed_url_sha256.New() - if _, err := hasher.Write(toSign); err != nil { - return nil, data_source_storage_object_signed_url_errwrap.Wrapf("failed to calculate sha256: {{err}}", err) - } - - signed, err := data_source_storage_object_signed_url_rsa.SignPKCS1v15(data_source_storage_object_signed_url_rand.Reader, pk, data_source_storage_object_signed_url_crypto.SHA256, hasher.Sum(nil)) - if err != nil { - return nil, data_source_storage_object_signed_url_errwrap.Wrapf("failed to sign string, an error occurred: {{err}}", err) - } - - return signed, nil -} - -func dataSourceTpuTensorflowVersions() *data_source_tpu_tensorflow_versions_schema.Resource { - return &data_source_tpu_tensorflow_versions_schema.Resource{ - Read: dataSourceTpuTensorFlowVersionsRead, - Schema: map[string]*data_source_tpu_tensorflow_versions_schema.Schema{ - "project": { - Type: data_source_tpu_tensorflow_versions_schema.TypeString, - Optional: true, - Computed: true, - }, - "zone": { - Type: data_source_tpu_tensorflow_versions_schema.TypeString, - Optional: true, - Computed: true, - }, - "versions": { - Type: data_source_tpu_tensorflow_versions_schema.TypeList, - Computed: true, - Elem: &data_source_tpu_tensorflow_versions_schema.Schema{Type: data_source_tpu_tensorflow_versions_schema.TypeString}, - }, - }, - } -} - -func dataSourceTpuTensorFlowVersionsRead(d *data_source_tpu_tensorflow_versions_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/tensorflowVersions") - if err != nil { - return err - } - - versionsRaw, err := paginatedListRequest(project, url, userAgent, config, flattenTpuTensorflowVersions) - if err != nil { - return data_source_tpu_tensorflow_versions_fmt.Errorf("Error listing TPU Tensorflow versions: %s", err) - } - - versions := make([]string, len(versionsRaw)) - for i, ver := range versionsRaw { - versions[i] = ver.(string) - } - data_source_tpu_tensorflow_versions_sort.Strings(versions) - - data_source_tpu_tensorflow_versions_log.Printf("[DEBUG] Received Google TPU Tensorflow Versions: %q", versions) - - if err := d.Set("versions", versions); err != nil { - return data_source_tpu_tensorflow_versions_fmt.Errorf("Error setting versions: %s", err) - } - if err := d.Set("zone", zone); err != nil { - return data_source_tpu_tensorflow_versions_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", project); err != nil { - return data_source_tpu_tensorflow_versions_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(data_source_tpu_tensorflow_versions_fmt.Sprintf("projects/%s/zones/%s", project, zone)) - - return nil -} - -func flattenTpuTensorflowVersions(resp map[string]interface{}) []interface{} { - verObjList := resp["tensorflowVersions"].([]interface{}) - versions := make([]interface{}, len(verObjList)) - for i, v := range verObjList { - verObj := v.(map[string]interface{}) - versions[i] = verObj["version"] - } - return versions -} - -type DataprocClusterOperationWaiter struct { - Service *dataproc_cluster_operation_dataproc.Service - CommonOperationWaiter -} - -func (w *DataprocClusterOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, dataproc_cluster_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - return w.Service.Projects.Regions.Operations.Get(w.Op.Name).Do() -} - -func dataprocClusterOperationWait(config *Config, op *dataproc_cluster_operation_dataproc.Operation, activity, userAgent string, timeout dataproc_cluster_operation_time.Duration) error { - w := &DataprocClusterOperationWaiter{ - Service: config.NewDataprocClient(userAgent), - } - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type DataprocJobOperationWaiter struct { - Service *dataproc_job_operation_dataproc.Service - Region string - ProjectId string - JobId string - Status string -} - -func (w *DataprocJobOperationWaiter) State() string { - if w == nil { - return "" - } - return w.Status -} - -func (w *DataprocJobOperationWaiter) Error() error { - - return nil -} - -func (w *DataprocJobOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *DataprocJobOperationWaiter) SetOp(job interface{}) error { - - return nil -} - -func (w *DataprocJobOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, dataproc_job_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - job, err := w.Service.Projects.Regions.Jobs.Get(w.ProjectId, w.Region, w.JobId).Do() - if job != nil { - w.Status = job.Status.State - } - return job, err -} - -func (w *DataprocJobOperationWaiter) OpName() string { - if w == nil { - return "" - } - return w.JobId -} - -func (w *DataprocJobOperationWaiter) PendingStates() []string { - return []string{"PENDING", "CANCEL_PENDING", "CANCEL_STARTED", "SETUP_DONE"} -} - -func (w *DataprocJobOperationWaiter) TargetStates() []string { - return []string{"CANCELLED", "DONE", "ATTEMPT_FAILURE", "ERROR", "RUNNING"} -} - -func dataprocJobOperationWait(config *Config, region, projectId, jobId, activity, userAgent string, timeout dataproc_job_operation_time.Duration) error { - w := &DataprocJobOperationWaiter{ - Service: config.NewDataprocClient(userAgent), - Region: region, - ProjectId: projectId, - JobId: jobId, - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type DataprocDeleteJobOperationWaiter struct { - DataprocJobOperationWaiter -} - -func (w *DataprocDeleteJobOperationWaiter) PendingStates() []string { - return []string{"EXISTS", "ERROR"} -} - -func (w *DataprocDeleteJobOperationWaiter) TargetStates() []string { - return []string{"DELETED"} -} - -func (w *DataprocDeleteJobOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, dataproc_job_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - job, err := w.Service.Projects.Regions.Jobs.Get(w.ProjectId, w.Region, w.JobId).Do() - if err != nil { - if isGoogleApiErrorWithCode(err, dataproc_job_operation_http.StatusNotFound) { - w.Status = "DELETED" - return job, nil - } - w.Status = "ERROR" - } - w.Status = "EXISTS" - return job, err -} - -func dataprocDeleteOperationWait(config *Config, region, projectId, jobId, activity, userAgent string, timeout dataproc_job_operation_time.Duration) error { - w := &DataprocDeleteJobOperationWaiter{ - DataprocJobOperationWaiter{ - Service: config.NewDataprocClient(userAgent), - Region: region, - ProjectId: projectId, - JobId: jobId, - }, - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func datasourceSchemaFromResourceSchema(rs map[string]*datasource_helpers_schema.Schema) map[string]*datasource_helpers_schema.Schema { - ds := make(map[string]*datasource_helpers_schema.Schema, len(rs)) - for k, v := range rs { - dv := &datasource_helpers_schema.Schema{ - Computed: true, - ForceNew: false, - Required: false, - Description: v.Description, - Type: v.Type, - } - - switch v.Type { - case datasource_helpers_schema.TypeSet: - dv.Set = v.Set - fallthrough - case datasource_helpers_schema.TypeList: - - if elem, ok := v.Elem.(*datasource_helpers_schema.Resource); ok { - - dv.Elem = &datasource_helpers_schema.Resource{ - Schema: datasourceSchemaFromResourceSchema(elem.Schema), - } - } else { - - dv.Elem = v.Elem - } - - default: - - dv.Elem = v.Elem - - } - ds[k] = dv - - } - return ds -} - -func fixDatasourceSchemaFlags(schema map[string]*datasource_helpers_schema.Schema, required bool, keys ...string) { - for _, v := range keys { - schema[v].Computed = false - schema[v].Optional = !required - schema[v].Required = required - } -} - -func addRequiredFieldsToSchema(schema map[string]*datasource_helpers_schema.Schema, keys ...string) { - fixDatasourceSchemaFlags(schema, true, keys...) -} - -func addOptionalFieldsToSchema(schema map[string]*datasource_helpers_schema.Schema, keys ...string) { - fixDatasourceSchemaFlags(schema, false, keys...) -} - -type DatastoreOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *DatastoreOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, datastore_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := datastore_operation_fmt.Sprintf("https://datastore.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil, datastoreIndex409Contention) -} - -func createDatastoreWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*DatastoreOperationWaiter, error) { - w := &DatastoreOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func datastoreOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout datastore_operation_time.Duration) error { - w, err := createDatastoreWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return datastore_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func datastoreOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout datastore_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createDatastoreWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -var ( - CreateDirective = []dcl_dcldcl.ApplyOption{ - dcl_dcldcl.WithLifecycleParam(dcl_dcldcl.BlockAcquire), - dcl_dcldcl.WithLifecycleParam(dcl_dcldcl.BlockDestruction), - dcl_dcldcl.WithLifecycleParam(dcl_dcldcl.BlockModification), - } - - UpdateDirective = []dcl_dcldcl.ApplyOption{ - dcl_dcldcl.WithLifecycleParam(dcl_dcldcl.BlockCreation), - dcl_dcldcl.WithLifecycleParam(dcl_dcldcl.BlockDestruction), - } -) - -type dclLogger struct{} - -func (l dclLogger) Fatal(args ...interface{}) { - dcl_logger_log.Fatal(args...) -} - -func (l dclLogger) Fatalf(format string, args ...interface{}) { - dcl_logger_log.Fatalf(dcl_logger_fmt.Sprintf("[DEBUG][DCL FATAL] %s", format), args...) -} - -func (l dclLogger) Info(args ...interface{}) { - dcl_logger_log.Print(args...) -} - -func (l dclLogger) Infof(format string, args ...interface{}) { - dcl_logger_log.Printf(dcl_logger_fmt.Sprintf("[DEBUG][DCL INFO] %s", format), args...) -} - -func (l dclLogger) Warningf(format string, args ...interface{}) { - dcl_logger_log.Printf(dcl_logger_fmt.Sprintf("[DEBUG][DCL WARNING] %s", format), args...) -} - -func (l dclLogger) Warning(args ...interface{}) { - dcl_logger_log.Print(args...) -} - -type DeploymentManagerOperationWaiter struct { - Config *Config - UserAgent string - Project string - OperationUrl string - ComputeOperationWaiter -} - -func (w *DeploymentManagerOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *DeploymentManagerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil || w.Op == nil || w.Op.SelfLink == "" { - return nil, deployment_manager_operation_fmt.Errorf("cannot query unset/nil operation") - } - - resp, err := sendRequest(w.Config, "GET", w.Project, w.Op.SelfLink, w.UserAgent, nil) - if err != nil { - return nil, err - } - op := &deployment_manager_operation_compute.Operation{} - if err := Convert(resp, op); err != nil { - return nil, deployment_manager_operation_fmt.Errorf("could not convert response to operation: %v", err) - } - return op, nil -} - -func deploymentManagerOperationWaitTime(config *Config, resp interface{}, project, activity, userAgent string, timeout deployment_manager_operation_time.Duration) error { - op := &deployment_manager_operation_compute.Operation{} - err := Convert(resp, op) - if err != nil { - return err - } - - w := &DeploymentManagerOperationWaiter{ - Config: config, - UserAgent: userAgent, - OperationUrl: op.SelfLink, - ComputeOperationWaiter: ComputeOperationWaiter{ - Project: project, - }, - } - if err := w.SetOp(op); err != nil { - return err - } - - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func (w *DeploymentManagerOperationWaiter) Error() error { - if w != nil && w.Op != nil && w.Op.Error != nil { - return DeploymentManagerOperationError{ - HTTPStatusCode: w.Op.HttpErrorStatusCode, - HTTPMessage: w.Op.HttpErrorMessage, - OperationError: *w.Op.Error, - } - } - return nil -} - -type DeploymentManagerOperationError struct { - HTTPStatusCode int64 - HTTPMessage string - deployment_manager_operation_compute.OperationError -} - -func (e DeploymentManagerOperationError) Error() string { - var buf deployment_manager_operation_bytes.Buffer - buf.WriteString("Deployment Manager returned errors for this operation, likely due to invalid configuration.") - buf.WriteString(deployment_manager_operation_fmt.Sprintf("Operation failed with HTTP error %d: %s.", e.HTTPStatusCode, e.HTTPMessage)) - buf.WriteString("Errors returned: \n") - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - return buf.String() -} - -type DialogflowCXOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *DialogflowCXOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, dialogflow_cx_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := dialogflow_cx_operation_fmt.Sprintf("https://dialogflow.googleapis.com/v3/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createDialogflowCXWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*DialogflowCXOperationWaiter, error) { - w := &DialogflowCXOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func dialogflowCXOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout dialogflow_cx_operation_time.Duration) error { - w, err := createDialogflowCXWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return dialogflow_cx_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func dialogflowCXOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout dialogflow_cx_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createDialogflowCXWaiter(config, op, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func readDiskType(c *Config, d TerraformResourceData, name string) (*ZonalFieldValue, error) { - return parseZonalFieldValue("diskTypes", name, "project", "zone", d, c, false) -} - -func readRegionDiskType(c *Config, d TerraformResourceData, name string) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("diskTypes", name, "project", "region", "zone", d, c, false) -} - -type DnsChangeWaiter struct { - Service *dns_change_dns.Service - Change *dns_change_dns.Change - Project string - ManagedZone string -} - -func (w *DnsChangeWaiter) RefreshFunc() dns_change_resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var chg *dns_change_dns.Change - var err error - - chg, err = w.Service.Changes.Get( - w.Project, w.ManagedZone, w.Change.Id).Do() - - if err != nil { - return nil, "", err - } - - return chg, chg.Status, nil - } -} - -func (w *DnsChangeWaiter) Conf() *dns_change_resource.StateChangeConf { - return &dns_change_resource.StateChangeConf{ - Pending: []string{"pending"}, - Target: []string{"done"}, - Refresh: w.RefreshFunc(), - Timeout: 10 * dns_change_time.Minute, - MinTimeout: 2 * dns_change_time.Second, - } -} - -type RetryErrorPredicateFunc func(error) (bool, string) - -var defaultErrorRetryPredicates = []RetryErrorPredicateFunc{ - - isNetworkTemporaryError, - isNetworkTimeoutError, - isIoEOFError, - isConnectionResetNetworkError, - - isCommonRetryableErrorCode, - - is409OperationInProgressError, - - isSubnetworkUnreadyError, -} - -func isNetworkTemporaryError(err error) (bool, string) { - if netErr, ok := err.(*error_retry_predicates_net.OpError); ok && netErr.Temporary() { - return true, "marked as timeout" - } - if urlerr, ok := err.(*error_retry_predicates_url.Error); ok && urlerr.Temporary() { - return true, "marked as timeout" - } - return false, "" -} - -func isNetworkTimeoutError(err error) (bool, string) { - if netErr, ok := err.(*error_retry_predicates_net.OpError); ok && netErr.Timeout() { - return true, "marked as timeout" - } - if urlerr, ok := err.(*error_retry_predicates_url.Error); ok && urlerr.Timeout() { - return true, "marked as timeout" - } - return false, "" -} - -func isIoEOFError(err error) (bool, string) { - if err == error_retry_predicates_io.ErrUnexpectedEOF { - return true, "Got unexpected EOF" - } - - if urlerr, urlok := err.(*error_retry_predicates_url.Error); urlok { - wrappedErr := urlerr.Unwrap() - if wrappedErr == error_retry_predicates_io.ErrUnexpectedEOF { - return true, "Got unexpected EOF" - } - } - return false, "" -} - -const connectionResetByPeerErr = ": connection reset by peer" - -func isConnectionResetNetworkError(err error) (bool, string) { - if error_retry_predicates_strings.HasSuffix(err.Error(), connectionResetByPeerErr) { - return true, error_retry_predicates_fmt.Sprintf("reset connection error: %v", err) - } - return false, "" -} - -func is409OperationInProgressError(err error) (bool, string) { - gerr, ok := err.(*error_retry_predicates_googleapi.Error) - if !ok { - return false, "" - } - - if gerr.Code == 409 && error_retry_predicates_strings.Contains(gerr.Body, "operationInProgress") { - error_retry_predicates_log.Printf("[DEBUG] Dismissed an error as retryable based on error code 409 and error reason 'operationInProgress': %s", err) - return true, "Operation still in progress" - } - return false, "" -} - -func isSubnetworkUnreadyError(err error) (bool, string) { - gerr, ok := err.(*error_retry_predicates_googleapi.Error) - if !ok { - return false, "" - } - - if gerr.Code == 400 && error_retry_predicates_strings.Contains(gerr.Body, "resourceNotReady") && error_retry_predicates_strings.Contains(gerr.Body, "subnetworks") { - error_retry_predicates_log.Printf("[DEBUG] Dismissed an error as retryable based on error code 400 and error reason 'resourceNotReady' w/ `subnetwork`: %s", err) - return true, "Subnetwork not ready" - } - return false, "" -} - -func isCommonRetryableErrorCode(err error) (bool, string) { - gerr, ok := err.(*error_retry_predicates_googleapi.Error) - if !ok { - return false, "" - } - - if gerr.Code == 429 || gerr.Code == 500 || gerr.Code == 502 || gerr.Code == 503 { - error_retry_predicates_log.Printf("[DEBUG] Dismissed an error as retryable based on error code: %s", err) - return true, error_retry_predicates_fmt.Sprintf("Retryable error code %d", gerr.Code) - } - return false, "" -} - -var FINGERPRINT_FAIL_ERRORS = []string{"Invalid fingerprint.", "Supplied fingerprint does not match current metadata fingerprint."} - -func isFingerprintError(err error) (bool, string) { - gerr, ok := err.(*error_retry_predicates_googleapi.Error) - if !ok { - return false, "" - } - - if gerr.Code != 412 { - return false, "" - } - - for _, msg := range FINGERPRINT_FAIL_ERRORS { - if error_retry_predicates_strings.Contains(err.Error(), msg) { - return true, "fingerprint mismatch" - } - } - - return false, "" -} - -func iamMemberMissing(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 400 && error_retry_predicates_strings.Contains(gerr.Body, "permission") { - return true, "Waiting for IAM member permissions to propagate." - } - } - return false, "" -} - -func pubsubTopicProjectNotReady(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 400 && error_retry_predicates_strings.Contains(gerr.Body, "retry this operation") { - error_retry_predicates_log.Printf("[DEBUG] Dismissed error as a retryable operation: %s", err) - return true, "Waiting for Pubsub topic's project to properly initialize with organiation policy" - } - } - return false, "" -} - -func isSqlInternalError(err error) (bool, string) { - if gerr, ok := err.(*SqlAdminOperationError); ok { - - var ierr interface{} - ierr = gerr - if serr, ok := ierr.(*error_retry_predicates_sqladminsqladmin.OperationErrors); ok && serr.Errors[0].Code == "INTERNAL_ERROR" { - return true, "Received an internal error, which is sometimes retryable for some SQL resources. Optimistically retrying." - } - - } - return false, "" -} - -func isSqlOperationInProgressError(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok && gerr.Code == 409 { - if error_retry_predicates_strings.Contains(gerr.Body, "instanceAlreadyExists") { - return false, "" - } - - return true, "Waiting for other concurrent Cloud SQL operations to finish" - } - return false, "" -} - -func serviceUsageServiceBeingActivated(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok && gerr.Code == 400 { - if error_retry_predicates_strings.Contains(gerr.Body, "Another activation or deactivation is in progress") { - return true, "Waiting for same service activation/deactivation to finish" - } - - return false, "" - } - return false, "" -} - -func isBigqueryIAMQuotaError(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 403 && error_retry_predicates_strings.Contains(error_retry_predicates_strings.ToLower(gerr.Body), "exceeded rate limits") { - return true, "Waiting for Bigquery edit quota to refresh" - } - } - return false, "" -} - -func isOperationReadQuotaError(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 403 && error_retry_predicates_strings.Contains(gerr.Body, "Quota exceeded for quota group") { - return true, "Waiting for quota to refresh" - } - } - return false, "" -} - -func isMonitoringConcurrentEditError(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 409 && (error_retry_predicates_strings.Contains(error_retry_predicates_strings.ToLower(gerr.Body), "too many concurrent edits") || - error_retry_predicates_strings.Contains(error_retry_predicates_strings.ToLower(gerr.Body), "could not fulfill the request")) { - return true, "Waiting for other Monitoring changes to finish" - } - } - return false, "" -} - -func isNotFilestoreQuotaError(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 429 { - return false, "" - } - } - return isCommonRetryableErrorCode(err) -} - -func isAppEngineRetryableError(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 409 && error_retry_predicates_strings.Contains(error_retry_predicates_strings.ToLower(gerr.Body), "operation is already in progress") { - return true, "Waiting for other concurrent App Engine changes to finish" - } - if gerr.Code == 404 && error_retry_predicates_strings.Contains(error_retry_predicates_strings.ToLower(gerr.Body), "unable to retrieve p4sa") { - return true, "Waiting for P4SA propagation to GAIA" - } - } - return false, "" -} - -func isCryptoKeyVersionsPendingGeneration(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok && gerr.Code == 400 { - if error_retry_predicates_strings.Contains(gerr.Body, "PENDING_GENERATION") { - return true, "Waiting for pending key generation" - } - } - return false, "" -} - -func isNotFoundRetryableError(opType string) RetryErrorPredicateFunc { - return func(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok && gerr.Code == 404 { - return true, error_retry_predicates_fmt.Sprintf("Retry 404s for %s", opType) - } - return false, "" - } -} - -func isDataflowJobUpdateRetryableError(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 404 && error_retry_predicates_strings.Contains(gerr.Body, "in RUNNING OR DRAINING state") { - return true, "Waiting for job to be in a valid state" - } - } - return false, "" -} - -func isPeeringOperationInProgress(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 400 && error_retry_predicates_strings.Contains(gerr.Body, "There is a peering operation in progress") { - return true, "Waiting peering operation to complete" - } - } - return false, "" -} - -func isCloudFunctionsSourceCodeError(err error) (bool, string) { - if operr, ok := err.(*CommonOpError); ok { - if operr.Code == 3 && operr.Message == "Failed to retrieve function source code" { - return true, error_retry_predicates_fmt.Sprintf("Retry on Function failing to pull code from GCS") - } - } - return false, "" -} - -func datastoreIndex409Contention(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 409 && error_retry_predicates_strings.Contains(gerr.Body, "too much contention") { - return true, "too much contention - waiting for less activity" - } - } - return false, "" -} - -func iapClient409Operation(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 409 && error_retry_predicates_strings.Contains(error_retry_predicates_strings.ToLower(gerr.Body), "operation was aborted") { - return true, "operation was aborted possibly due to concurrency issue - retrying" - } - } - return false, "" -} - -func healthcareDatasetNotInitialized(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 404 && error_retry_predicates_strings.Contains(error_retry_predicates_strings.ToLower(gerr.Body), "dataset not initialized") { - return true, "dataset not initialized - retrying" - } - } - return false, "" -} - -func isCloudRunCreationConflict(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 409 { - return true, "saw a 409 - waiting until background deletion completes" - } - } - - return false, "" -} - -func iamServiceAccountNotFound(err error) (bool, string) { - if gerr, ok := err.(*error_retry_predicates_googleapi.Error); ok { - if gerr.Code == 400 && error_retry_predicates_strings.Contains(gerr.Body, "Service account") && error_retry_predicates_strings.Contains(gerr.Body, "does not exist") { - return true, "service account not found in IAM" - } - } - - return false, "" -} - -func isBigTableRetryableError(err error) (bool, string) { - statusCode := error_retry_predicates_status.Code(err) - if statusCode.String() == "FailedPrecondition" { - return true, "Waiting for table to be in a valid state" - } - - return false, "" -} - -func expandStringArray(v interface{}) []string { - arr, ok := v.([]string) - - if ok { - return arr - } - - if arr, ok := v.(*expanders_schema.Set); ok { - return convertStringSet(arr) - } - - arr = convertStringArr(v.([]interface{})) - if arr == nil { - - return make([]string, 0) - } - return arr -} - -func expandIntegerArray(v interface{}) []int64 { - arr, ok := v.([]int64) - - if ok { - return arr - } - - if arr, ok := v.(*expanders_schema.Set); ok { - return convertIntegerSet(arr) - } - - return convertIntegerArr(v.([]interface{})) -} - -func convertIntegerSet(v *expanders_schema.Set) []int64 { - return convertIntegerArr(v.List()) -} - -func convertIntegerArr(v []interface{}) []int64 { - var vi []int64 - for _, vs := range v { - vi = append(vi, int64(vs.(int))) - } - return vi -} - -func expandEnumBool(v interface{}) *bool { - s, ok := v.(string) - if !ok { - return nil - } - switch s { - case "TRUE": - b := true - return &b - case "FALSE": - b := false - return &b - } - return nil -} - -const ( - globalLinkTemplate = "projects/%s/global/%s/%s" - globalLinkBasePattern = "projects/(.+)/global/%s/(.+)" - zonalLinkTemplate = "projects/%s/zones/%s/%s/%s" - zonalLinkBasePattern = "projects/(.+)/zones/(.+)/%s/(.+)" - zonalPartialLinkBasePattern = "zones/(.+)/%s/(.+)" - regionalLinkTemplate = "projects/%s/regions/%s/%s/%s" - regionalLinkBasePattern = "projects/(.+)/regions/(.+)/%s/(.+)" - regionalPartialLinkBasePattern = "regions/(.+)/%s/(.+)" - projectLinkTemplate = "projects/%s/%s/%s" - projectBasePattern = "projects/(.+)/%s/(.+)" - organizationLinkTemplate = "organizations/%s/%s/%s" - organizationBasePattern = "organizations/(.+)/%s/(.+)" -) - -func ParseNetworkFieldValue(network string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("networks", network, "project", d, config, true) -} - -func ParseSubnetworkFieldValue(subnetwork string, d TerraformResourceData, config *Config) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("subnetworks", subnetwork, "project", "region", "zone", d, config, true) -} - -func ParseSubnetworkFieldValueWithProjectField(subnetwork, projectField string, d TerraformResourceData, config *Config) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("subnetworks", subnetwork, projectField, "region", "zone", d, config, true) -} - -func ParseSslCertificateFieldValue(sslCertificate string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("sslCertificates", sslCertificate, "project", d, config, false) -} - -func ParseHttpHealthCheckFieldValue(healthCheck string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("httpHealthChecks", healthCheck, "project", d, config, false) -} - -func ParseDiskFieldValue(disk string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("disks", disk, "project", "zone", d, config, false) -} - -func ParseRegionDiskFieldValue(disk string, d TerraformResourceData, config *Config) (*RegionalFieldValue, error) { - return parseRegionalFieldValue("disks", disk, "project", "region", "zone", d, config, false) -} - -func ParseOrganizationCustomRoleName(role string) (*OrganizationFieldValue, error) { - return parseOrganizationFieldValue("roles", role, false) -} - -func ParseAcceleratorFieldValue(accelerator string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("acceleratorTypes", accelerator, "project", "zone", d, config, false) -} - -func ParseMachineTypesFieldValue(machineType string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("machineTypes", machineType, "project", "zone", d, config, false) -} - -func ParseInstanceFieldValue(instance string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("instances", instance, "project", "zone", d, config, false) -} - -func ParseInstanceGroupFieldValue(instanceGroup string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("instanceGroups", instanceGroup, "project", "zone", d, config, false) -} - -func ParseInstanceTemplateFieldValue(instanceTemplate string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("instanceTemplates", instanceTemplate, "project", d, config, false) -} - -func ParseMachineImageFieldValue(machineImage string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("machineImages", machineImage, "project", d, config, false) -} - -func ParseSecurityPolicyFieldValue(securityPolicy string, d TerraformResourceData, config *Config) (*GlobalFieldValue, error) { - return parseGlobalFieldValue("securityPolicies", securityPolicy, "project", d, config, true) -} - -func ParseNetworkEndpointGroupFieldValue(networkEndpointGroup string, d TerraformResourceData, config *Config) (*ZonalFieldValue, error) { - return parseZonalFieldValue("networkEndpointGroups", networkEndpointGroup, "project", "zone", d, config, false) -} - -type GlobalFieldValue struct { - Project string - Name string - - resourceType string -} - -func (f GlobalFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return field_helpers_fmt.Sprintf(globalLinkTemplate, f.Project, f.resourceType, f.Name) -} - -func parseGlobalFieldValue(resourceType, fieldValue, projectSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*GlobalFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &GlobalFieldValue{resourceType: resourceType}, nil - } - return nil, field_helpers_fmt.Errorf("The global field for resource %s cannot be empty", resourceType) - } - - r := field_helpers_regexp.MustCompile(field_helpers_fmt.Sprintf(globalLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &GlobalFieldValue{ - Project: parts[1], - Name: parts[2], - - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - return &GlobalFieldValue{ - Project: project, - Name: GetResourceNameFromSelfLink(fieldValue), - - resourceType: resourceType, - }, nil -} - -type ZonalFieldValue struct { - Project string - Zone string - Name string - - resourceType string -} - -func (f ZonalFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return field_helpers_fmt.Sprintf(zonalLinkTemplate, f.Project, f.Zone, f.resourceType, f.Name) -} - -func parseZonalFieldValue(resourceType, fieldValue, projectSchemaField, zoneSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*ZonalFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &ZonalFieldValue{resourceType: resourceType}, nil - } - return nil, field_helpers_fmt.Errorf("The zonal field for resource %s cannot be empty.", resourceType) - } - - r := field_helpers_regexp.MustCompile(field_helpers_fmt.Sprintf(zonalLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &ZonalFieldValue{ - Project: parts[1], - Zone: parts[2], - Name: parts[3], - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - r = field_helpers_regexp.MustCompile(field_helpers_fmt.Sprintf(zonalPartialLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &ZonalFieldValue{ - Project: project, - Zone: parts[1], - Name: parts[2], - resourceType: resourceType, - }, nil - } - - if len(zoneSchemaField) == 0 { - return nil, field_helpers_fmt.Errorf("Invalid field format. Got '%s', expected format '%s'", fieldValue, field_helpers_fmt.Sprintf(globalLinkTemplate, "{project}", resourceType, "{name}")) - } - - zone, ok := d.GetOk(zoneSchemaField) - if !ok { - zone = config.Zone - if zone == "" { - return nil, field_helpers_fmt.Errorf("A zone must be specified") - } - } - - return &ZonalFieldValue{ - Project: project, - Zone: zone.(string), - Name: GetResourceNameFromSelfLink(fieldValue), - resourceType: resourceType, - }, nil -} - -func getProjectFromSchema(projectSchemaField string, d TerraformResourceData, config *Config) (string, error) { - res, ok := d.GetOk(projectSchemaField) - if ok && projectSchemaField != "" { - return res.(string), nil - } - if config.Project != "" { - return config.Project, nil - } - return "", field_helpers_fmt.Errorf("%s: required field is not set", projectSchemaField) -} - -func getBillingProjectFromSchema(billingProjectSchemaField string, d TerraformResourceData, config *Config) (string, error) { - res, ok := d.GetOk(billingProjectSchemaField) - if ok && billingProjectSchemaField != "" { - return res.(string), nil - } - if config.BillingProject != "" { - return config.BillingProject, nil - } - return "", field_helpers_fmt.Errorf("%s: required field is not set", billingProjectSchemaField) -} - -type OrganizationFieldValue struct { - OrgId string - Name string - - resourceType string -} - -func (f OrganizationFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return field_helpers_fmt.Sprintf(organizationLinkTemplate, f.OrgId, f.resourceType, f.Name) -} - -func parseOrganizationFieldValue(resourceType, fieldValue string, isEmptyValid bool) (*OrganizationFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &OrganizationFieldValue{resourceType: resourceType}, nil - } - return nil, field_helpers_fmt.Errorf("The organization field for resource %s cannot be empty", resourceType) - } - - r := field_helpers_regexp.MustCompile(field_helpers_fmt.Sprintf(organizationBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &OrganizationFieldValue{ - OrgId: parts[1], - Name: parts[2], - - resourceType: resourceType, - }, nil - } - - return nil, field_helpers_fmt.Errorf("Invalid field format. Got '%s', expected format '%s'", fieldValue, field_helpers_fmt.Sprintf(organizationLinkTemplate, "{org_id}", resourceType, "{name}")) -} - -type RegionalFieldValue struct { - Project string - Region string - Name string - - resourceType string -} - -func (f RegionalFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return field_helpers_fmt.Sprintf(regionalLinkTemplate, f.Project, f.Region, f.resourceType, f.Name) -} - -func parseRegionalFieldValue(resourceType, fieldValue, projectSchemaField, regionSchemaField, zoneSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*RegionalFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &RegionalFieldValue{resourceType: resourceType}, nil - } - return nil, field_helpers_fmt.Errorf("The regional field for resource %s cannot be empty.", resourceType) - } - - r := field_helpers_regexp.MustCompile(field_helpers_fmt.Sprintf(regionalLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &RegionalFieldValue{ - Project: parts[1], - Region: parts[2], - Name: parts[3], - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - r = field_helpers_regexp.MustCompile(field_helpers_fmt.Sprintf(regionalPartialLinkBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &RegionalFieldValue{ - Project: project, - Region: parts[1], - Name: parts[2], - resourceType: resourceType, - }, nil - } - - region, err := getRegionFromSchema(regionSchemaField, zoneSchemaField, d, config) - if err != nil { - return nil, err - } - - return &RegionalFieldValue{ - Project: project, - Region: region, - Name: GetResourceNameFromSelfLink(fieldValue), - resourceType: resourceType, - }, nil -} - -func getRegionFromSchema(regionSchemaField, zoneSchemaField string, d TerraformResourceData, config *Config) (string, error) { - - if regionSchemaField == zoneSchemaField { - if v, ok := d.GetOk(regionSchemaField); ok { - if isZone(v.(string)) { - return getRegionFromZone(v.(string)), nil - } - - return v.(string), nil - } - } - - if v, ok := d.GetOk(regionSchemaField); ok && regionSchemaField != "" { - return GetResourceNameFromSelfLink(v.(string)), nil - } - if v, ok := d.GetOk(zoneSchemaField); ok && zoneSchemaField != "" { - return getRegionFromZone(v.(string)), nil - } - if config.Region != "" { - return config.Region, nil - } - if config.Zone != "" { - return getRegionFromZone(config.Zone), nil - } - - return "", field_helpers_fmt.Errorf("Cannot determine region: set in this resource, or set provider-level 'region' or 'zone'.") -} - -type ProjectFieldValue struct { - Project string - Name string - - resourceType string -} - -func (f ProjectFieldValue) RelativeLink() string { - if len(f.Name) == 0 { - return "" - } - - return field_helpers_fmt.Sprintf(projectLinkTemplate, f.Project, f.resourceType, f.Name) -} - -func parseProjectFieldValue(resourceType, fieldValue, projectSchemaField string, d TerraformResourceData, config *Config, isEmptyValid bool) (*ProjectFieldValue, error) { - if len(fieldValue) == 0 { - if isEmptyValid { - return &ProjectFieldValue{resourceType: resourceType}, nil - } - return nil, field_helpers_fmt.Errorf("The project field for resource %s cannot be empty", resourceType) - } - - r := field_helpers_regexp.MustCompile(field_helpers_fmt.Sprintf(projectBasePattern, resourceType)) - if parts := r.FindStringSubmatch(fieldValue); parts != nil { - return &ProjectFieldValue{ - Project: parts[1], - Name: parts[2], - - resourceType: resourceType, - }, nil - } - - project, err := getProjectFromSchema(projectSchemaField, d, config) - if err != nil { - return nil, err - } - - return &ProjectFieldValue{ - Project: project, - Name: GetResourceNameFromSelfLink(fieldValue), - - resourceType: resourceType, - }, nil -} - -type FilestoreOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *FilestoreOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, filestore_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := filestore_operation_fmt.Sprintf("https://file.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil, isNotFilestoreQuotaError) -} - -func createFilestoreWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*FilestoreOperationWaiter, error) { - w := &FilestoreOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func filestoreOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout filestore_operation_time.Duration) error { - w, err := createFilestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return filestore_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func filestoreOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout filestore_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createFilestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type FirestoreOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *FirestoreOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, firestore_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := firestore_operation_fmt.Sprintf("https://firestore.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createFirestoreWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*FirestoreOperationWaiter, error) { - w := &FirestoreOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func firestoreOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout firestore_operation_time.Duration) error { - w, err := createFirestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return firestore_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func firestoreOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout firestore_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createFirestoreWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func flattenEnumBool(v interface{}) string { - b, ok := v.(*bool) - if !ok || b == nil { - return "" - } - if *b { - return "TRUE" - } - return "FALSE" -} - -type GameServicesOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *GameServicesOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, game_services_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := game_services_operation_fmt.Sprintf("https://gameservices.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createGameServicesWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*GameServicesOperationWaiter, error) { - w := &GameServicesOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func gameServicesOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout game_services_operation_time.Duration) error { - w, err := createGameServicesWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return game_services_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func gameServicesOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout game_services_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createGameServicesWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type GKEHubOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *GKEHubOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, gke_hub_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := gke_hub_operation_fmt.Sprintf("https://gkehub.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createGKEHubWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*GKEHubOperationWaiter, error) { - w := &GKEHubOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func gKEHubOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout gke_hub_operation_time.Duration) error { - w, err := createGKEHubWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return gke_hub_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func gKEHubOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout gke_hub_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createGKEHubWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func hashcode(s string) int { - v := int(hashcode_crc32.ChecksumIEEE([]byte(s))) - if v >= 0 { - return v - } - if -v >= 0 { - return -v - } - - return 0 -} - -type headerTransportLayer struct { - header_transport_http.Header - baseTransit header_transport_http.RoundTripper -} - -func newTransportWithHeaders(baseTransit header_transport_http.RoundTripper) headerTransportLayer { - if baseTransit == nil { - baseTransit = header_transport_http.DefaultTransport - } - - headers := make(header_transport_http.Header) - - return headerTransportLayer{Header: headers, baseTransit: baseTransit} -} - -func (h headerTransportLayer) RoundTrip(req *header_transport_http.Request) (*header_transport_http.Response, error) { - for key, value := range h.Header { - - if _, ok := req.Header[key]; !ok { - req.Header[key] = value - } - } - return h.baseTransit.RoundTrip(req) -} - -type healthcareDatasetId struct { - Project string - Location string - Name string -} - -func (s *healthcareDatasetId) datasetId() string { - return healthcare_utils_fmt.Sprintf("projects/%s/locations/%s/datasets/%s", s.Project, s.Location, s.Name) -} - -func (s *healthcareDatasetId) terraformId() string { - return healthcare_utils_fmt.Sprintf("%s/%s/%s", s.Project, s.Location, s.Name) -} - -func parseHealthcareDatasetId(id string, config *Config) (*healthcareDatasetId, error) { - parts := healthcare_utils_strings.Split(id, "/") - - datasetIdRegex := healthcare_utils_regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})$") - datasetIdWithoutProjectRegex := healthcare_utils_regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})$") - datasetRelativeLinkRegex := healthcare_utils_regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})$") - - if datasetIdRegex.MatchString(id) { - return &healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, nil - } - - if datasetIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, healthcare_utils_fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}` id format.") - } - - return &healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, nil - } - - if parts := datasetRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, nil - } - return nil, healthcare_utils_fmt.Errorf("Invalid Dataset id format, expecting `{projectId}/{locationId}/{datasetName}` or `{locationId}/{datasetName}.`") -} - -type healthcareFhirStoreId struct { - DatasetId healthcareDatasetId - Name string -} - -func (s *healthcareFhirStoreId) fhirStoreId() string { - return healthcare_utils_fmt.Sprintf("%s/fhirStores/%s", s.DatasetId.datasetId(), s.Name) -} - -func (s *healthcareFhirStoreId) terraformId() string { - return healthcare_utils_fmt.Sprintf("%s/%s", s.DatasetId.terraformId(), s.Name) -} - -func parseHealthcareFhirStoreId(id string, config *Config) (*healthcareFhirStoreId, error) { - parts := healthcare_utils_strings.Split(id, "/") - - fhirStoreIdRegex := healthcare_utils_regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - fhirStoreIdWithoutProjectRegex := healthcare_utils_regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - fhirStoreRelativeLinkRegex := healthcare_utils_regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/fhirStores/([a-zA-Z0-9_-]{1,256})$") - - if fhirStoreIdRegex.MatchString(id) { - return &healthcareFhirStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if fhirStoreIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, healthcare_utils_fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{fhirStoreName}` id format.") - } - - return &healthcareFhirStoreId{ - DatasetId: healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := fhirStoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareFhirStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - return nil, healthcare_utils_fmt.Errorf("Invalid FhirStore id format, expecting `{projectId}/{locationId}/{datasetName}/{fhirStoreName}` or `{locationId}/{datasetName}/{fhirStoreName}.`") -} - -type healthcareHl7V2StoreId struct { - DatasetId healthcareDatasetId - Name string -} - -func (s *healthcareHl7V2StoreId) hl7V2StoreId() string { - return healthcare_utils_fmt.Sprintf("%s/hl7V2Stores/%s", s.DatasetId.datasetId(), s.Name) -} - -func (s *healthcareHl7V2StoreId) terraformId() string { - return healthcare_utils_fmt.Sprintf("%s/%s", s.DatasetId.terraformId(), s.Name) -} - -func parseHealthcareHl7V2StoreId(id string, config *Config) (*healthcareHl7V2StoreId, error) { - parts := healthcare_utils_strings.Split(id, "/") - - hl7V2StoreIdRegex := healthcare_utils_regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - hl7V2StoreIdWithoutProjectRegex := healthcare_utils_regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - hl7V2StoreRelativeLinkRegex := healthcare_utils_regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/hl7V2Stores/([a-zA-Z0-9_-]{1,256})$") - - if hl7V2StoreIdRegex.MatchString(id) { - return &healthcareHl7V2StoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if hl7V2StoreIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, healthcare_utils_fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{hl7V2StoreName}` id format.") - } - - return &healthcareHl7V2StoreId{ - DatasetId: healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := hl7V2StoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareHl7V2StoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - return nil, healthcare_utils_fmt.Errorf("Invalid Hl7V2Store id format, expecting `{projectId}/{locationId}/{datasetName}/{hl7V2StoreName}` or `{locationId}/{datasetName}/{hl7V2StoreName}.`") -} - -type healthcareDicomStoreId struct { - DatasetId healthcareDatasetId - Name string -} - -func (s *healthcareDicomStoreId) dicomStoreId() string { - return healthcare_utils_fmt.Sprintf("%s/dicomStores/%s", s.DatasetId.datasetId(), s.Name) -} - -func (s *healthcareDicomStoreId) terraformId() string { - return healthcare_utils_fmt.Sprintf("%s/%s", s.DatasetId.terraformId(), s.Name) -} - -func parseHealthcareDicomStoreId(id string, config *Config) (*healthcareDicomStoreId, error) { - parts := healthcare_utils_strings.Split(id, "/") - - dicomStoreIdRegex := healthcare_utils_regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - dicomStoreIdWithoutProjectRegex := healthcare_utils_regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,256})/([a-zA-Z0-9_-]{1,256})$") - dicomStoreRelativeLinkRegex := healthcare_utils_regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/datasets/([a-zA-Z0-9_-]{1,256})/dicomStores/([a-zA-Z0-9_-]{1,256})$") - - if dicomStoreIdRegex.MatchString(id) { - return &healthcareDicomStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if dicomStoreIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, healthcare_utils_fmt.Errorf("The default project for the provider must be set when using the `{location}/{datasetName}/{dicomStoreName}` id format.") - } - - return &healthcareDicomStoreId{ - DatasetId: healthcareDatasetId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := dicomStoreRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &healthcareDicomStoreId{ - DatasetId: healthcareDatasetId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - return nil, healthcare_utils_fmt.Errorf("Invalid DicomStore id format, expecting `{projectId}/{locationId}/{datasetName}/{dicomStoreName}` or `{locationId}/{datasetName}/{dicomStoreName}.`") -} - -const maxBackoffSeconds = 30 - -const iamPolicyVersion = 3 - -type ( - ResourceIamUpdater interface { - GetResourceIamPolicy() (*iam_cloudresourcemanager.Policy, error) - - SetResourceIamPolicy(policy *iam_cloudresourcemanager.Policy) error - - GetMutexKey() string - - GetResourceId() string - - DescribeResource() string - } - - newResourceIamUpdaterFunc func(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) - - iamPolicyModifyFunc func(p *iam_cloudresourcemanager.Policy) error - - resourceIdParserFunc func(d *iam_schema.ResourceData, config *Config) error -) - -func iamPolicyReadWithRetry(updater ResourceIamUpdater) (*iam_cloudresourcemanager.Policy, error) { - mutexKey := updater.GetMutexKey() - mutexKV.Lock(mutexKey) - defer mutexKV.Unlock(mutexKey) - - iam_log.Printf("[DEBUG] Retrieving policy for %s\n", updater.DescribeResource()) - var policy *iam_cloudresourcemanager.Policy - err := retryTime(func() (perr error) { - policy, perr = updater.GetResourceIamPolicy() - return perr - }, 10) - if err != nil { - return nil, err - } - iam_log.Print(iam_spew.Sprintf("[DEBUG] Retrieved policy for %s: %#v\n", updater.DescribeResource(), policy)) - return policy, nil -} - -func iamPolicyReadModifyWrite(updater ResourceIamUpdater, modify iamPolicyModifyFunc) error { - mutexKey := updater.GetMutexKey() - mutexKV.Lock(mutexKey) - defer mutexKV.Unlock(mutexKey) - - backoff := iam_time.Second - for { - iam_log.Printf("[DEBUG]: Retrieving policy for %s\n", updater.DescribeResource()) - p, err := updater.GetResourceIamPolicy() - if isGoogleApiErrorWithCode(err, 429) { - iam_log.Printf("[DEBUG] 429 while attempting to read policy for %s, waiting %v before attempting again", updater.DescribeResource(), backoff) - iam_time.Sleep(backoff) - continue - } else if err != nil { - return err - } - iam_log.Printf("[DEBUG]: Retrieved policy for %s: %+v\n", updater.DescribeResource(), p) - - err = modify(p) - if err != nil { - return err - } - - iam_log.Printf("[DEBUG]: Setting policy for %s to %+v\n", updater.DescribeResource(), p) - err = updater.SetResourceIamPolicy(p) - if err == nil { - fetchBackoff := 1 * iam_time.Second - for successfulFetches := 0; successfulFetches < 3; { - if fetchBackoff > maxBackoffSeconds*iam_time.Second { - return iam_fmt.Errorf("Error applying IAM policy to %s: Waited too long for propagation.\n", updater.DescribeResource()) - } - iam_time.Sleep(fetchBackoff) - iam_log.Printf("[DEBUG]: Retrieving policy for %s\n", updater.DescribeResource()) - new_p, err := updater.GetResourceIamPolicy() - if err != nil { - - if isGoogleApiErrorWithCode(err, 429) { - fetchBackoff = fetchBackoff * 2 - } else { - return err - } - } - iam_log.Printf("[DEBUG]: Retrieved policy for %s: %+v\n", updater.DescribeResource(), p) - if new_p == nil { - - fetchBackoff = fetchBackoff * 2 - continue - } - modified_p := new_p - - err = modify(modified_p) - if err != nil { - return err - } - if modified_p == new_p { - successfulFetches += 1 - } else { - fetchBackoff = fetchBackoff * 2 - } - } - break - } - if isConflictError(err) { - iam_log.Printf("[DEBUG]: Concurrent policy changes, restarting read-modify-write after %s\n", backoff) - iam_time.Sleep(backoff) - backoff = backoff * 2 - if backoff > 30*iam_time.Second { - return iam_errwrap.Wrapf(iam_fmt.Sprintf("Error applying IAM policy to %s: Too many conflicts. Latest error: {{err}}", updater.DescribeResource()), err) - } - continue - } - - if isServiceAccountNotFoundError, _ := iamServiceAccountNotFound(err); isServiceAccountNotFoundError { - - currentPolicy, rerr := iamPolicyReadWithRetry(updater) - if rerr != nil { - if p.Etag != currentPolicy.Etag { - - iam_log.Printf("current and old etag did not match for %s, retrying", updater.DescribeResource()) - iam_time.Sleep(backoff) - backoff = backoff * 2 - continue - } - - iam_log.Printf("current and old etag matched for %s, not retrying", updater.DescribeResource()) - } else { - - iam_log.Printf("[DEBUG]: error checking etag for policy %s. error: %v", updater.DescribeResource(), rerr) - } - } - - iam_log.Printf("[DEBUG]: not retrying IAM policy for %s. error: %v", updater.DescribeResource(), err) - return iam_errwrap.Wrapf(iam_fmt.Sprintf("Error applying IAM policy for %s: {{err}}", updater.DescribeResource()), err) - } - iam_log.Printf("[DEBUG]: Set policy for %s", updater.DescribeResource()) - return nil -} - -func mergeBindings(bindings []*iam_cloudresourcemanager.Binding) []*iam_cloudresourcemanager.Binding { - bm := createIamBindingsMap(bindings) - return listFromIamBindingMap(bm) -} - -type conditionKey struct { - Description string - Expression string - Title string -} - -func conditionKeyFromCondition(condition *iam_cloudresourcemanager.Expr) conditionKey { - if condition == nil { - return conditionKey{} - } - return conditionKey{condition.Description, condition.Expression, condition.Title} -} - -func (k conditionKey) Empty() bool { - return k == conditionKey{} -} - -func (k conditionKey) String() string { - return iam_fmt.Sprintf("%s/%s/%s", k.Title, k.Description, k.Expression) -} - -type iamBindingKey struct { - Role string - Condition conditionKey -} - -func filterBindingsWithRoleAndCondition(b []*iam_cloudresourcemanager.Binding, role string, condition *iam_cloudresourcemanager.Expr) []*iam_cloudresourcemanager.Binding { - bMap := createIamBindingsMap(b) - key := iamBindingKey{role, conditionKeyFromCondition(condition)} - delete(bMap, key) - return listFromIamBindingMap(bMap) -} - -func subtractFromBindings(bindings []*iam_cloudresourcemanager.Binding, toRemove ...*iam_cloudresourcemanager.Binding) []*iam_cloudresourcemanager.Binding { - currMap := createIamBindingsMap(bindings) - toRemoveMap := createIamBindingsMap(toRemove) - - for key, removeSet := range toRemoveMap { - members, ok := currMap[key] - if !ok { - continue - } - - for m := range removeSet { - delete(members, m) - } - - if len(members) == 0 { - delete(currMap, key) - } - } - - return listFromIamBindingMap(currMap) -} - -func iamMemberIsCaseSensitive(member string) bool { - return iam_strings.HasPrefix(member, "principalSet:") || iam_strings.HasPrefix(member, "principal:") || iam_strings.HasPrefix(member, "principalHierarchy:") -} - -func normalizeIamMemberCasing(member string) string { - var pieces []string - if iam_strings.HasPrefix(member, "deleted:") { - pieces = iam_strings.SplitN(member, ":", 3) - if len(pieces) > 2 && !iamMemberIsCaseSensitive(iam_strings.TrimPrefix(member, "deleted:")) { - pieces[2] = iam_strings.ToLower(pieces[2]) - } - } else if !iamMemberIsCaseSensitive(member) { - pieces = iam_strings.SplitN(member, ":", 2) - if len(pieces) > 1 { - pieces[1] = iam_strings.ToLower(pieces[1]) - } - } - - if len(pieces) > 0 { - member = iam_strings.Join(pieces, ":") - } - return member -} - -func createIamBindingsMap(bindings []*iam_cloudresourcemanager.Binding) map[iamBindingKey]map[string]struct{} { - bm := make(map[iamBindingKey]map[string]struct{}) - - for _, b := range bindings { - members := make(map[string]struct{}) - key := iamBindingKey{b.Role, conditionKeyFromCondition(b.Condition)} - - if _, ok := bm[key]; ok { - members = bm[key] - } - - for _, m := range b.Members { - m = normalizeIamMemberCasing(m) - - members[m] = struct{}{} - } - if len(members) > 0 { - bm[key] = members - } else { - delete(bm, key) - } - } - return bm -} - -func listFromIamBindingMap(bm map[iamBindingKey]map[string]struct{}) []*iam_cloudresourcemanager.Binding { - rb := make([]*iam_cloudresourcemanager.Binding, 0, len(bm)) - var keys []iamBindingKey - for k := range bm { - keys = append(keys, k) - } - iam_sort.Slice(keys, func(i, j int) bool { - keyI := keys[i] - keyJ := keys[j] - return iam_fmt.Sprintf("%s%s", keyI.Role, keyI.Condition.String()) < iam_fmt.Sprintf("%s%s", keyJ.Role, keyJ.Condition.String()) - }) - for _, key := range keys { - members := bm[key] - if len(members) == 0 { - continue - } - b := &iam_cloudresourcemanager.Binding{ - Role: key.Role, - Members: stringSliceFromGolangSet(members), - } - if !key.Condition.Empty() { - b.Condition = &iam_cloudresourcemanager.Expr{ - Description: key.Condition.Description, - Expression: key.Condition.Expression, - Title: key.Condition.Title, - } - } - rb = append(rb, b) - } - return rb -} - -func removeAllAuditConfigsWithService(ac []*iam_cloudresourcemanager.AuditConfig, service string) []*iam_cloudresourcemanager.AuditConfig { - acMap := createIamAuditConfigsMap(ac) - delete(acMap, service) - return listFromIamAuditConfigMap(acMap) -} - -func createIamAuditConfigsMap(auditConfigs []*iam_cloudresourcemanager.AuditConfig) map[string]map[string]map[string]struct{} { - acMap := make(map[string]map[string]map[string]struct{}) - - for _, ac := range auditConfigs { - if _, ok := acMap[ac.Service]; !ok { - acMap[ac.Service] = make(map[string]map[string]struct{}) - } - alcMap := acMap[ac.Service] - for _, alc := range ac.AuditLogConfigs { - if _, ok := alcMap[alc.LogType]; !ok { - alcMap[alc.LogType] = make(map[string]struct{}) - } - memberMap := alcMap[alc.LogType] - - for _, m := range alc.ExemptedMembers { - memberMap[m] = struct{}{} - } - } - } - - return acMap -} - -func listFromIamAuditConfigMap(acMap map[string]map[string]map[string]struct{}) []*iam_cloudresourcemanager.AuditConfig { - ac := make([]*iam_cloudresourcemanager.AuditConfig, 0, len(acMap)) - - for service, logConfigMap := range acMap { - if len(logConfigMap) == 0 { - continue - } - - logConfigs := make([]*iam_cloudresourcemanager.AuditLogConfig, 0, len(logConfigMap)) - for logType, memberSet := range logConfigMap { - alc := &iam_cloudresourcemanager.AuditLogConfig{ - LogType: logType, - ForceSendFields: []string{"exemptedMembers"}, - } - if len(memberSet) > 0 { - alc.ExemptedMembers = stringSliceFromGolangSet(memberSet) - } - logConfigs = append(logConfigs, alc) - } - - ac = append(ac, &iam_cloudresourcemanager.AuditConfig{ - Service: service, - AuditLogConfigs: logConfigs, - }) - } - return ac -} - -func jsonPolicyDiffSuppress(k, old, new string, d *iam_schema.ResourceData) bool { - if old == "" && new == "" { - return true - } - - var oldPolicy, newPolicy iam_cloudresourcemanager.Policy - if old != "" && new != "" { - if err := iam_json.Unmarshal([]byte(old), &oldPolicy); err != nil { - iam_log.Printf("[ERROR] Could not unmarshal old policy %s: %v", old, err) - return false - } - if err := iam_json.Unmarshal([]byte(new), &newPolicy); err != nil { - iam_log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err) - return false - } - - return compareIamPolicies(&newPolicy, &oldPolicy) - } - - return false -} - -func compareIamPolicies(a, b *iam_cloudresourcemanager.Policy) bool { - if a.Etag != b.Etag { - iam_log.Printf("[DEBUG] policies etag differ: %q vs %q", a.Etag, b.Etag) - return false - } - if a.Version != b.Version { - iam_log.Printf("[DEBUG] policies version differ: %q vs %q", a.Version, b.Version) - return false - } - if !compareBindings(a.Bindings, b.Bindings) { - iam_log.Printf("[DEBUG] policies bindings differ: %#v vs %#v", a.Bindings, b.Bindings) - return false - } - if !compareAuditConfigs(a.AuditConfigs, b.AuditConfigs) { - iam_log.Printf("[DEBUG] policies audit configs differ: %#v vs %#v", a.AuditConfigs, b.AuditConfigs) - return false - } - return true -} - -func compareBindings(a, b []*iam_cloudresourcemanager.Binding) bool { - aMap := createIamBindingsMap(a) - bMap := createIamBindingsMap(b) - return iam_reflect.DeepEqual(aMap, bMap) -} - -func compareAuditConfigs(a, b []*iam_cloudresourcemanager.AuditConfig) bool { - aMap := createIamAuditConfigsMap(a) - bMap := createIamAuditConfigsMap(b) - return iam_reflect.DeepEqual(aMap, bMap) -} - -type IamSettings struct { - DeprecationMessage string -} - -func IamWithDeprecationMessage(message string) func(s *IamSettings) { - return func(s *IamSettings) { - s.DeprecationMessage = message - } -} - -func IamWithGAResourceDeprecation() func(s *IamSettings) { - return IamWithDeprecationMessage("This resource has been deprecated in the google (GA) provider, and will only be available in the google-beta provider in a future release.") -} - -const ( - batchKeyTmplModifyIamPolicy = "%s modifyIamPolicy" - - IamBatchingEnabled = true - IamBatchingDisabled = false -) - -func BatchRequestModifyIamPolicy(updater ResourceIamUpdater, modify iamPolicyModifyFunc, config *Config, reqDesc string) error { - batchKey := iam_batching_fmt.Sprintf(batchKeyTmplModifyIamPolicy, updater.GetMutexKey()) - - request := &BatchRequest{ - ResourceName: updater.GetResourceId(), - Body: []iamPolicyModifyFunc{modify}, - CombineF: combineBatchIamPolicyModifiers, - SendF: sendBatchModifyIamPolicy(updater), - DebugId: reqDesc, - } - - _, err := config.requestBatcherIam.SendRequestWithTimeout(batchKey, request, iam_batching_time.Minute*30) - return err -} - -func combineBatchIamPolicyModifiers(currV interface{}, toAddV interface{}) (interface{}, error) { - currModifiers, ok := currV.([]iamPolicyModifyFunc) - if !ok { - return nil, iam_batching_fmt.Errorf("provider error in batch combiner: expected data to be type []iamPolicyModifyFunc, got %v with type %T", currV, currV) - } - - newModifiers, ok := toAddV.([]iamPolicyModifyFunc) - if !ok { - return nil, iam_batching_fmt.Errorf("provider error in batch combiner: expected data to be type []iamPolicyModifyFunc, got %v with type %T", currV, currV) - } - - return append(currModifiers, newModifiers...), nil -} - -func sendBatchModifyIamPolicy(updater ResourceIamUpdater) BatcherSendFunc { - return func(resourceName string, body interface{}) (interface{}, error) { - modifiers, ok := body.([]iamPolicyModifyFunc) - if !ok { - return nil, iam_batching_fmt.Errorf("provider error: expected data to be type []iamPolicyModifyFunc, got %v with type %T", body, body) - } - return nil, iamPolicyReadModifyWrite(updater, func(policy *iam_batching_cloudresourcemanager.Policy) error { - for _, modifyF := range modifiers { - if err := modifyF(policy); err != nil { - return err - } - } - return nil - }) - } -} - -var IamBigqueryDatasetSchema = map[string]*iam_bigquery_dataset_schema.Schema{ - "dataset_id": { - Type: iam_bigquery_dataset_schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: iam_bigquery_dataset_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -var bigqueryAccessPrimitiveToRoleMap = map[string]string{ - "OWNER": "roles/bigquery.dataOwner", - "WRITER": "roles/bigquery.dataEditor", - "READER": "roles/bigquery.dataViewer", -} - -type BigqueryDatasetIamUpdater struct { - project string - datasetId string - d TerraformResourceData - Config *Config -} - -func NewBigqueryDatasetIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, iam_bigquery_dataset_fmt.Errorf("Error setting project: %s", err) - } - - return &BigqueryDatasetIamUpdater{ - project: project, - datasetId: d.Get("dataset_id").(string), - d: d, - Config: config, - }, nil -} - -func BigqueryDatasetIdParseFunc(d *iam_bigquery_dataset_schema.ResourceData, config *Config) error { - fv, err := parseProjectFieldValue("datasets", d.Id(), "project", d, config, false) - if err != nil { - return err - } - - if err := d.Set("project", fv.Project); err != nil { - return iam_bigquery_dataset_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("dataset_id", fv.Name); err != nil { - return iam_bigquery_dataset_fmt.Errorf("Error setting dataset_id: %s", err) - } - - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *BigqueryDatasetIamUpdater) GetResourceIamPolicy() (*iam_bigquery_dataset_cloudresourcemanager.Policy, error) { - url := iam_bigquery_dataset_fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(u.Config, "GET", u.project, url, userAgent, nil) - if err != nil { - return nil, iam_bigquery_dataset_errwrap.Wrapf(iam_bigquery_dataset_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - policy, err := accessToPolicy(res["access"]) - if err != nil { - return nil, err - } - return policy, nil -} - -func (u *BigqueryDatasetIamUpdater) SetResourceIamPolicy(policy *iam_bigquery_dataset_cloudresourcemanager.Policy) error { - url := iam_bigquery_dataset_fmt.Sprintf("%s%s", u.Config.BigQueryBasePath, u.GetResourceId()) - - access, err := policyToAccess(policy) - if err != nil { - return err - } - obj := map[string]interface{}{ - "access": access, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequest(u.Config, "PATCH", u.project, url, userAgent, obj) - if err != nil { - return iam_bigquery_dataset_fmt.Errorf("Error creating DatasetAccess: %s", err) - } - - return nil -} - -func accessToPolicy(access interface{}) (*iam_bigquery_dataset_cloudresourcemanager.Policy, error) { - if access == nil { - return nil, nil - } - roleToBinding := make(map[string]*iam_bigquery_dataset_cloudresourcemanager.Binding) - - accessArr := access.([]interface{}) - for _, v := range accessArr { - memberRole := v.(map[string]interface{}) - rawRole, ok := memberRole["role"] - if !ok { - - continue - } - role := rawRole.(string) - if iamRole, ok := bigqueryAccessPrimitiveToRoleMap[role]; ok { - - role = iamRole - } - member, err := accessToIamMember(memberRole) - if err != nil { - return nil, err - } - - binding, ok := roleToBinding[role] - if !ok { - binding = &iam_bigquery_dataset_cloudresourcemanager.Binding{Role: role, Members: []string{}} - } - binding.Members = append(binding.Members, member) - - roleToBinding[role] = binding - } - bindings := make([]*iam_bigquery_dataset_cloudresourcemanager.Binding, 0) - for _, v := range roleToBinding { - bindings = append(bindings, v) - } - - return &iam_bigquery_dataset_cloudresourcemanager.Policy{Bindings: bindings}, nil -} - -func policyToAccess(policy *iam_bigquery_dataset_cloudresourcemanager.Policy) ([]map[string]interface{}, error) { - res := make([]map[string]interface{}, 0) - if len(policy.AuditConfigs) != 0 { - return nil, iam_bigquery_dataset_errors.New("Access policies not allowed on BigQuery Dataset IAM policies") - } - for _, binding := range policy.Bindings { - if binding.Condition != nil { - return nil, iam_bigquery_dataset_errors.New("IAM conditions not allowed on BigQuery Dataset IAM") - } - if fullRole, ok := bigqueryAccessPrimitiveToRoleMap[binding.Role]; ok { - return nil, iam_bigquery_dataset_fmt.Errorf("BigQuery Dataset legacy role %s is not allowed when using google_bigquery_dataset_iam resources. Please use the full form: %s", binding.Role, fullRole) - } - for _, member := range binding.Members { - - if iam_bigquery_dataset_strings.HasPrefix(member, "deleted:") { - continue - } - access := map[string]interface{}{ - "role": binding.Role, - } - memberType, member, err := iamMemberToAccess(member) - if err != nil { - return nil, err - } - access[memberType] = member - res = append(res, access) - } - } - - return res, nil -} - -func iamMemberToAccess(member string) (string, string, error) { - if iam_bigquery_dataset_strings.HasPrefix(member, "deleted:") { - return "", "", iam_bigquery_dataset_fmt.Errorf("BigQuery Dataset IAM member is deleted: %s", member) - } - - pieces := iam_bigquery_dataset_strings.SplitN(member, ":", 2) - if len(pieces) > 1 { - switch pieces[0] { - case "group": - return "groupByEmail", pieces[1], nil - case "domain": - return "domain", pieces[1], nil - case "user": - return "userByEmail", pieces[1], nil - case "serviceAccount": - return "userByEmail", pieces[1], nil - default: - return "", "", iam_bigquery_dataset_fmt.Errorf("Failed to parse BigQuery Dataset IAM member type: %s", member) - } - } - if member == "projectOwners" || member == "projectReaders" || member == "projectWriters" || member == "allAuthenticatedUsers" { - - return "specialGroup", member, nil - } - return "iamMember", member, nil -} - -func accessToIamMember(access map[string]interface{}) (string, error) { - - if member, ok := access["groupByEmail"]; ok { - return iam_bigquery_dataset_fmt.Sprintf("group:%s", member.(string)), nil - } - if member, ok := access["domain"]; ok { - return iam_bigquery_dataset_fmt.Sprintf("domain:%s", member.(string)), nil - } - if member, ok := access["specialGroup"]; ok { - return member.(string), nil - } - if member, ok := access["iamMember"]; ok { - return member.(string), nil - } - if _, ok := access["view"]; ok { - - return "", iam_bigquery_dataset_fmt.Errorf("Failed to convert BigQuery Dataset access to IAM member. To use views with a dataset, please use dataset_access") - } - if member, ok := access["userByEmail"]; ok { - - if iam_bigquery_dataset_strings.Contains(member.(string), "gserviceaccount") { - return iam_bigquery_dataset_fmt.Sprintf("serviceAccount:%s", member.(string)), nil - } - return iam_bigquery_dataset_fmt.Sprintf("user:%s", member.(string)), nil - } - return "", iam_bigquery_dataset_fmt.Errorf("Failed to identify IAM member from BigQuery Dataset access: %v", access) -} - -func (u *BigqueryDatasetIamUpdater) GetResourceId() string { - return iam_bigquery_dataset_fmt.Sprintf("projects/%s/datasets/%s", u.project, u.datasetId) -} - -func (u *BigqueryDatasetIamUpdater) GetMutexKey() string { - return iam_bigquery_dataset_fmt.Sprintf("%s", u.datasetId) -} - -func (u *BigqueryDatasetIamUpdater) DescribeResource() string { - return iam_bigquery_dataset_fmt.Sprintf("Bigquery Dataset %s/%s", u.project, u.datasetId) -} - -var BigQueryTableIamSchema = map[string]*iam_bigquery_table_schema.Schema{ - "project": { - Type: iam_bigquery_table_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "dataset_id": { - Type: iam_bigquery_table_schema.TypeString, - Required: true, - ForceNew: true, - }, - "table_id": { - Type: iam_bigquery_table_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BigQueryTableIamUpdater struct { - project string - datasetId string - tableId string - d TerraformResourceData - Config *Config -} - -func BigQueryTableIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_bigquery_table_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("dataset_id"); ok { - values["dataset_id"] = v.(string) - } - - if v, ok := d.GetOk("table_id"); ok { - values["table_id"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("table_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BigQueryTableIamUpdater{ - project: values["project"], - datasetId: values["dataset_id"], - tableId: values["table_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_bigquery_table_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("dataset_id", u.datasetId); err != nil { - return nil, iam_bigquery_table_fmt.Errorf("Error setting dataset_id: %s", err) - } - if err := d.Set("table_id", u.GetResourceId()); err != nil { - return nil, iam_bigquery_table_fmt.Errorf("Error setting table_id: %s", err) - } - - return u, nil -} - -func BigQueryTableIdParseFunc(d *iam_bigquery_table_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BigQueryTableIamUpdater{ - project: values["project"], - datasetId: values["dataset_id"], - tableId: values["table_id"], - d: d, - Config: config, - } - if err := d.Set("table_id", u.GetResourceId()); err != nil { - return iam_bigquery_table_fmt.Errorf("Error setting table_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BigQueryTableIamUpdater) GetResourceIamPolicy() (*iam_bigquery_table_cloudresourcemanager.Policy, error) { - url, err := u.qualifyTableUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": 1, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_bigquery_table_errwrap.Wrapf(iam_bigquery_table_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_bigquery_table_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_bigquery_table_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BigQueryTableIamUpdater) SetResourceIamPolicy(policy *iam_bigquery_table_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - json["version"] = 1 - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTableUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_bigquery_table_schema.TimeoutCreate)) - if err != nil { - return iam_bigquery_table_errwrap.Wrapf(iam_bigquery_table_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigQueryTableIamUpdater) qualifyTableUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_bigquery_table_fmt.Sprintf("{{BigQueryBasePath}}%s:%s", iam_bigquery_table_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", u.project, u.datasetId, u.tableId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BigQueryTableIamUpdater) GetResourceId() string { - return iam_bigquery_table_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", u.project, u.datasetId, u.tableId) -} - -func (u *BigQueryTableIamUpdater) GetMutexKey() string { - return iam_bigquery_table_fmt.Sprintf("iam-bigquery-table-%s", u.GetResourceId()) -} - -func (u *BigQueryTableIamUpdater) DescribeResource() string { - return iam_bigquery_table_fmt.Sprintf("bigquery table %q", u.GetResourceId()) -} - -var IamBigtableInstanceSchema = map[string]*iam_bigtable_instance_schema.Schema{ - "instance": { - Type: iam_bigtable_instance_schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: iam_bigtable_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type BigtableInstanceIamUpdater struct { - project string - instance string - d TerraformResourceData - Config *Config -} - -func NewBigtableInstanceUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, iam_bigtable_instance_fmt.Errorf("Error setting project: %s", err) - } - - return &BigtableInstanceIamUpdater{ - project: project, - instance: d.Get("instance").(string), - d: d, - Config: config, - }, nil -} - -func BigtableInstanceIdParseFunc(d *iam_bigtable_instance_schema.ResourceData, config *Config) error { - fv, err := parseProjectFieldValue("instances", d.Id(), "project", d, config, false) - if err != nil { - return err - } - - if err := d.Set("project", fv.Project); err != nil { - return iam_bigtable_instance_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("instance", fv.Name); err != nil { - return iam_bigtable_instance_fmt.Errorf("Error setting instance: %s", err) - } - - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *BigtableInstanceIamUpdater) GetResourceIamPolicy() (*iam_bigtable_instance_cloudresourcemanager.Policy, error) { - req := &iam_bigtable_instance_bigtableadmin.GetIamPolicyRequest{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewBigTableProjectsInstancesClient(userAgent).GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, iam_bigtable_instance_errwrap.Wrapf(iam_bigtable_instance_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := bigtableToResourceManagerPolicy(p) - if err != nil { - return nil, iam_bigtable_instance_errwrap.Wrapf(iam_bigtable_instance_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *BigtableInstanceIamUpdater) SetResourceIamPolicy(policy *iam_bigtable_instance_cloudresourcemanager.Policy) error { - bigtablePolicy, err := resourceManagerToBigtablePolicy(policy) - if err != nil { - return iam_bigtable_instance_errwrap.Wrapf(iam_bigtable_instance_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - req := &iam_bigtable_instance_bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewBigTableProjectsInstancesClient(userAgent).SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return iam_bigtable_instance_errwrap.Wrapf(iam_bigtable_instance_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigtableInstanceIamUpdater) GetResourceId() string { - return iam_bigtable_instance_fmt.Sprintf("projects/%s/instances/%s", u.project, u.instance) -} - -func (u *BigtableInstanceIamUpdater) GetMutexKey() string { - return iam_bigtable_instance_fmt.Sprintf("iam-bigtable-instance-%s-%s", u.project, u.instance) -} - -func (u *BigtableInstanceIamUpdater) DescribeResource() string { - return iam_bigtable_instance_fmt.Sprintf("Bigtable Instance %s/%s", u.project, u.instance) -} - -func resourceManagerToBigtablePolicy(p *iam_bigtable_instance_cloudresourcemanager.Policy) (*iam_bigtable_instance_bigtableadmin.Policy, error) { - out := &iam_bigtable_instance_bigtableadmin.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_bigtable_instance_errwrap.Wrapf("Cannot convert a bigtable policy to a cloudresourcemanager policy: {{err}}", err) - } - return out, nil -} - -func bigtableToResourceManagerPolicy(p *iam_bigtable_instance_bigtableadmin.Policy) (*iam_bigtable_instance_cloudresourcemanager.Policy, error) { - out := &iam_bigtable_instance_cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_bigtable_instance_errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a bigtable policy: {{err}}", err) - } - return out, nil -} - -var IamBigtableTableSchema = map[string]*iam_bigtable_table_schema.Schema{ - "instance": { - Type: iam_bigtable_table_schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: iam_bigtable_table_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "table": { - Type: iam_bigtable_table_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type BigtableTableIamUpdater struct { - project string - instance string - table string - d TerraformResourceData - Config *Config -} - -func NewBigtableTableUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, iam_bigtable_table_fmt.Errorf("Error setting project: %s", err) - } - - return &BigtableTableIamUpdater{ - project: project, - instance: d.Get("instance").(string), - table: d.Get("table").(string), - d: d, - Config: config, - }, nil -} - -func BigtableTableIdParseFunc(d *iam_bigtable_table_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - project, _ := getProject(d, config) - - for k, v := range m { - values[k] = v - } - - if err := d.Set("project", project); err != nil { - return iam_bigtable_table_fmt.Errorf("Error setting project: %s", err) - } - - if err := d.Set("instance", values["instance"]); err != nil { - return iam_bigtable_table_fmt.Errorf("Error setting instance: %s", err) - } - - if err := d.Set("table", values["table"]); err != nil { - return iam_bigtable_table_fmt.Errorf("Error setting table: %s", err) - } - - d.SetId(iam_bigtable_table_fmt.Sprintf("projects/%s/instances/%s/tables/%s", project, values["instance"], values["table"])) - return nil -} - -func (u *BigtableTableIamUpdater) GetResourceIamPolicy() (*iam_bigtable_table_cloudresourcemanager.Policy, error) { - req := &iam_bigtable_table_bigtableadmin.GetIamPolicyRequest{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewBigTableProjectsInstancesTablesClient(userAgent).GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, iam_bigtable_table_errwrap.Wrapf(iam_bigtable_table_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := bigtableToResourceManagerPolicy(p) - if err != nil { - return nil, iam_bigtable_table_errwrap.Wrapf(iam_bigtable_table_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *BigtableTableIamUpdater) SetResourceIamPolicy(policy *iam_bigtable_table_cloudresourcemanager.Policy) error { - bigtablePolicy, err := resourceManagerToBigtablePolicy(policy) - if err != nil { - return iam_bigtable_table_errwrap.Wrapf(iam_bigtable_table_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - req := &iam_bigtable_table_bigtableadmin.SetIamPolicyRequest{Policy: bigtablePolicy} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewBigTableProjectsInstancesTablesClient(userAgent).SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return iam_bigtable_table_errwrap.Wrapf(iam_bigtable_table_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BigtableTableIamUpdater) GetResourceId() string { - return iam_bigtable_table_fmt.Sprintf("projects/%s/instances/%s/tables/%s", u.project, u.instance, u.table) -} - -func (u *BigtableTableIamUpdater) GetMutexKey() string { - return iam_bigtable_table_fmt.Sprintf("iam-bigtable-instance-%s-%s-%s", u.project, u.instance, u.table) -} - -func (u *BigtableTableIamUpdater) DescribeResource() string { - return iam_bigtable_table_fmt.Sprintf("Bigtable Table %s/%s-%s", u.project, u.instance, u.table) -} - -var IamBillingAccountSchema = map[string]*iam_billing_account_schema.Schema{ - "billing_account_id": { - Type: iam_billing_account_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type BillingAccountIamUpdater struct { - billingAccountId string - d TerraformResourceData - Config *Config -} - -func NewBillingAccountIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &BillingAccountIamUpdater{ - billingAccountId: canonicalBillingAccountId(d.Get("billing_account_id").(string)), - d: d, - Config: config, - }, nil -} - -func BillingAccountIdParseFunc(d *iam_billing_account_schema.ResourceData, _ *Config) error { - if err := d.Set("billing_account_id", d.Id()); err != nil { - return iam_billing_account_fmt.Errorf("Error setting billing_account_id: %s", err) - } - return nil -} - -func (u *BillingAccountIamUpdater) GetResourceIamPolicy() (*iam_billing_account_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - return getBillingAccountIamPolicyByBillingAccountName(u.billingAccountId, u.Config, userAgent) -} - -func (u *BillingAccountIamUpdater) SetResourceIamPolicy(policy *iam_billing_account_cloudresourcemanager.Policy) error { - billingPolicy, err := resourceManagerToBillingPolicy(policy) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewBillingClient(userAgent).BillingAccounts.SetIamPolicy("billingAccounts/"+u.billingAccountId, &iam_billing_account_cloudbilling.SetIamPolicyRequest{ - Policy: billingPolicy, - }).Do() - - if err != nil { - return iam_billing_account_errwrap.Wrapf(iam_billing_account_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BillingAccountIamUpdater) GetResourceId() string { - return u.billingAccountId -} - -func (u *BillingAccountIamUpdater) GetMutexKey() string { - return iam_billing_account_fmt.Sprintf("iam-billing-account-%s", u.billingAccountId) -} - -func (u *BillingAccountIamUpdater) DescribeResource() string { - return iam_billing_account_fmt.Sprintf("billingAccount %q", u.billingAccountId) -} - -func canonicalBillingAccountId(resource string) string { - return resource -} - -func resourceManagerToBillingPolicy(p *iam_billing_account_cloudresourcemanager.Policy) (*iam_billing_account_cloudbilling.Policy, error) { - out := &iam_billing_account_cloudbilling.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_billing_account_errwrap.Wrapf("Cannot convert a v1 policy to a billing policy: {{err}}", err) - } - return out, nil -} - -func billingToResourceManagerPolicy(p *iam_billing_account_cloudbilling.Policy) (*iam_billing_account_cloudresourcemanager.Policy, error) { - out := &iam_billing_account_cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_billing_account_errwrap.Wrapf("Cannot convert a billing policy to a v1 policy: {{err}}", err) - } - return out, nil -} - -func getBillingAccountIamPolicyByBillingAccountName(resource string, config *Config, userAgent string) (*iam_billing_account_cloudresourcemanager.Policy, error) { - p, err := config.NewBillingClient(userAgent).BillingAccounts.GetIamPolicy("billingAccounts/" + resource).Do() - - if err != nil { - return nil, iam_billing_account_errwrap.Wrapf(iam_billing_account_fmt.Sprintf("Error retrieving IAM policy for billing account %q: {{err}}", resource), err) - } - - v1Policy, err := billingToResourceManagerPolicy(p) - if err != nil { - return nil, err - } - - return v1Policy, nil -} - -var BinaryAuthorizationAttestorIamSchema = map[string]*iam_binary_authorization_attestor_schema.Schema{ - "project": { - Type: iam_binary_authorization_attestor_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "attestor": { - Type: iam_binary_authorization_attestor_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type BinaryAuthorizationAttestorIamUpdater struct { - project string - attestor string - d TerraformResourceData - Config *Config -} - -func BinaryAuthorizationAttestorIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_binary_authorization_attestor_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("attestor"); ok { - values["attestor"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/attestors/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("attestor").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &BinaryAuthorizationAttestorIamUpdater{ - project: values["project"], - attestor: values["attestor"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_binary_authorization_attestor_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("attestor", u.GetResourceId()); err != nil { - return nil, iam_binary_authorization_attestor_fmt.Errorf("Error setting attestor: %s", err) - } - - return u, nil -} - -func BinaryAuthorizationAttestorIdParseFunc(d *iam_binary_authorization_attestor_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/attestors/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &BinaryAuthorizationAttestorIamUpdater{ - project: values["project"], - attestor: values["attestor"], - d: d, - Config: config, - } - if err := d.Set("attestor", u.GetResourceId()); err != nil { - return iam_binary_authorization_attestor_fmt.Errorf("Error setting attestor: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) GetResourceIamPolicy() (*iam_binary_authorization_attestor_cloudresourcemanager.Policy, error) { - url, err := u.qualifyAttestorUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_binary_authorization_attestor_errwrap.Wrapf(iam_binary_authorization_attestor_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_binary_authorization_attestor_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_binary_authorization_attestor_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) SetResourceIamPolicy(policy *iam_binary_authorization_attestor_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAttestorUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_binary_authorization_attestor_schema.TimeoutCreate)) - if err != nil { - return iam_binary_authorization_attestor_errwrap.Wrapf(iam_binary_authorization_attestor_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) qualifyAttestorUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_binary_authorization_attestor_fmt.Sprintf("{{BinaryAuthorizationBasePath}}%s:%s", iam_binary_authorization_attestor_fmt.Sprintf("projects/%s/attestors/%s", u.project, u.attestor), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *BinaryAuthorizationAttestorIamUpdater) GetResourceId() string { - return iam_binary_authorization_attestor_fmt.Sprintf("projects/%s/attestors/%s", u.project, u.attestor) -} - -func (u *BinaryAuthorizationAttestorIamUpdater) GetMutexKey() string { - return iam_binary_authorization_attestor_fmt.Sprintf("iam-binaryauthorization-attestor-%s", u.GetResourceId()) -} - -func (u *BinaryAuthorizationAttestorIamUpdater) DescribeResource() string { - return iam_binary_authorization_attestor_fmt.Sprintf("binaryauthorization attestor %q", u.GetResourceId()) -} - -var CloudRunServiceIamSchema = map[string]*iam_cloud_run_service_schema.Schema{ - "project": { - Type: iam_cloud_run_service_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: iam_cloud_run_service_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "service": { - Type: iam_cloud_run_service_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudRunServiceIamUpdater struct { - project string - location string - service string - d TerraformResourceData - Config *Config -} - -func CloudRunServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_cloud_run_service_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, iam_cloud_run_service_fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("service"); ok { - values["service"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunServiceIamUpdater{ - project: values["project"], - location: values["location"], - service: values["service"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_cloud_run_service_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, iam_cloud_run_service_fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return nil, iam_cloud_run_service_fmt.Errorf("Error setting service: %s", err) - } - - return u, nil -} - -func CloudRunServiceIdParseFunc(d *iam_cloud_run_service_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudRunServiceIamUpdater{ - project: values["project"], - location: values["location"], - service: values["service"], - d: d, - Config: config, - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return iam_cloud_run_service_fmt.Errorf("Error setting service: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudRunServiceIamUpdater) GetResourceIamPolicy() (*iam_cloud_run_service_cloudresourcemanager.Policy, error) { - url, err := u.qualifyServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj, isCloudRunCreationConflict) - if err != nil { - return nil, iam_cloud_run_service_errwrap.Wrapf(iam_cloud_run_service_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_cloud_run_service_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_cloud_run_service_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudRunServiceIamUpdater) SetResourceIamPolicy(policy *iam_cloud_run_service_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_cloud_run_service_schema.TimeoutCreate), isCloudRunCreationConflict) - if err != nil { - return iam_cloud_run_service_errwrap.Wrapf(iam_cloud_run_service_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudRunServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_cloud_run_service_fmt.Sprintf("{{CloudRunBasePath}}%s:%s", iam_cloud_run_service_fmt.Sprintf("v1/projects/%s/locations/%s/services/%s", u.project, u.location, u.service), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudRunServiceIamUpdater) GetResourceId() string { - return iam_cloud_run_service_fmt.Sprintf("v1/projects/%s/locations/%s/services/%s", u.project, u.location, u.service) -} - -func (u *CloudRunServiceIamUpdater) GetMutexKey() string { - return iam_cloud_run_service_fmt.Sprintf("iam-cloudrun-service-%s", u.GetResourceId()) -} - -func (u *CloudRunServiceIamUpdater) DescribeResource() string { - return iam_cloud_run_service_fmt.Sprintf("cloudrun service %q", u.GetResourceId()) -} - -var CloudFunctionsCloudFunctionIamSchema = map[string]*iam_cloudfunctions_function_schema.Schema{ - "project": { - Type: iam_cloudfunctions_function_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: iam_cloudfunctions_function_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "cloud_function": { - Type: iam_cloudfunctions_function_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type CloudFunctionsCloudFunctionIamUpdater struct { - project string - region string - cloudFunction string - d TerraformResourceData - Config *Config -} - -func CloudFunctionsCloudFunctionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_cloudfunctions_function_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, iam_cloudfunctions_function_fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("cloud_function"); ok { - values["cloud_function"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("cloud_function").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudFunctionsCloudFunctionIamUpdater{ - project: values["project"], - region: values["region"], - cloudFunction: values["cloud_function"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_cloudfunctions_function_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, iam_cloudfunctions_function_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("cloud_function", u.GetResourceId()); err != nil { - return nil, iam_cloudfunctions_function_fmt.Errorf("Error setting cloud_function: %s", err) - } - - return u, nil -} - -func CloudFunctionsCloudFunctionIdParseFunc(d *iam_cloudfunctions_function_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &CloudFunctionsCloudFunctionIamUpdater{ - project: values["project"], - region: values["region"], - cloudFunction: values["cloud_function"], - d: d, - Config: config, - } - if err := d.Set("cloud_function", u.GetResourceId()); err != nil { - return iam_cloudfunctions_function_fmt.Errorf("Error setting cloud_function: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) GetResourceIamPolicy() (*iam_cloudfunctions_function_cloudresourcemanager.Policy, error) { - url, err := u.qualifyCloudFunctionUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_cloudfunctions_function_errwrap.Wrapf(iam_cloudfunctions_function_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_cloudfunctions_function_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_cloudfunctions_function_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) SetResourceIamPolicy(policy *iam_cloudfunctions_function_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyCloudFunctionUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_cloudfunctions_function_schema.TimeoutCreate)) - if err != nil { - return iam_cloudfunctions_function_errwrap.Wrapf(iam_cloudfunctions_function_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) qualifyCloudFunctionUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_cloudfunctions_function_fmt.Sprintf("{{CloudFunctionsBasePath}}%s:%s", iam_cloudfunctions_function_fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.region, u.cloudFunction), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) GetResourceId() string { - return iam_cloudfunctions_function_fmt.Sprintf("projects/%s/locations/%s/functions/%s", u.project, u.region, u.cloudFunction) -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) GetMutexKey() string { - return iam_cloudfunctions_function_fmt.Sprintf("iam-cloudfunctions-cloudfunction-%s", u.GetResourceId()) -} - -func (u *CloudFunctionsCloudFunctionIamUpdater) DescribeResource() string { - return iam_cloudfunctions_function_fmt.Sprintf("cloudfunctions cloudfunction %q", u.GetResourceId()) -} - -var ComputeDiskIamSchema = map[string]*iam_compute_disk_schema.Schema{ - "project": { - Type: iam_compute_disk_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "zone": { - Type: iam_compute_disk_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: iam_compute_disk_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeDiskIamUpdater struct { - project string - zone string - name string - d TerraformResourceData - Config *Config -} - -func ComputeDiskIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_compute_disk_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - zone, _ := getZone(d, config) - if zone != "" { - if err := d.Set("zone", zone); err != nil { - return nil, iam_compute_disk_fmt.Errorf("Error setting zone: %s", err) - } - } - values["zone"] = zone - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeDiskIamUpdater{ - project: values["project"], - zone: values["zone"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_compute_disk_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", u.zone); err != nil { - return nil, iam_compute_disk_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, iam_compute_disk_fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func ComputeDiskIdParseFunc(d *iam_compute_disk_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - zone, _ := getZone(d, config) - if zone != "" { - values["zone"] = zone - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeDiskIamUpdater{ - project: values["project"], - zone: values["zone"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return iam_compute_disk_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeDiskIamUpdater) GetResourceIamPolicy() (*iam_compute_disk_cloudresourcemanager.Policy, error) { - url, err := u.qualifyDiskUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_compute_disk_errwrap.Wrapf(iam_compute_disk_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_compute_disk_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_compute_disk_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeDiskIamUpdater) SetResourceIamPolicy(policy *iam_compute_disk_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyDiskUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_compute_disk_schema.TimeoutCreate)) - if err != nil { - return iam_compute_disk_errwrap.Wrapf(iam_compute_disk_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeDiskIamUpdater) qualifyDiskUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_compute_disk_fmt.Sprintf("{{ComputeBasePath}}%s/%s", iam_compute_disk_fmt.Sprintf("projects/%s/zones/%s/disks/%s", u.project, u.zone, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeDiskIamUpdater) GetResourceId() string { - return iam_compute_disk_fmt.Sprintf("projects/%s/zones/%s/disks/%s", u.project, u.zone, u.name) -} - -func (u *ComputeDiskIamUpdater) GetMutexKey() string { - return iam_compute_disk_fmt.Sprintf("iam-compute-disk-%s", u.GetResourceId()) -} - -func (u *ComputeDiskIamUpdater) DescribeResource() string { - return iam_compute_disk_fmt.Sprintf("compute disk %q", u.GetResourceId()) -} - -var ComputeImageIamSchema = map[string]*iam_compute_image_schema.Schema{ - "project": { - Type: iam_compute_image_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "image": { - Type: iam_compute_image_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeImageIamUpdater struct { - project string - image string - d TerraformResourceData - Config *Config -} - -func ComputeImageIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_compute_image_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("image"); ok { - values["image"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/images/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("image").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeImageIamUpdater{ - project: values["project"], - image: values["image"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_compute_image_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("image", u.GetResourceId()); err != nil { - return nil, iam_compute_image_fmt.Errorf("Error setting image: %s", err) - } - - return u, nil -} - -func ComputeImageIdParseFunc(d *iam_compute_image_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/global/images/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeImageIamUpdater{ - project: values["project"], - image: values["image"], - d: d, - Config: config, - } - if err := d.Set("image", u.GetResourceId()); err != nil { - return iam_compute_image_fmt.Errorf("Error setting image: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeImageIamUpdater) GetResourceIamPolicy() (*iam_compute_image_cloudresourcemanager.Policy, error) { - url, err := u.qualifyImageUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": iam_compute_image_fmt.Sprintf("%d", iamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_compute_image_errwrap.Wrapf(iam_compute_image_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_compute_image_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_compute_image_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeImageIamUpdater) SetResourceIamPolicy(policy *iam_compute_image_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyImageUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_compute_image_schema.TimeoutCreate)) - if err != nil { - return iam_compute_image_errwrap.Wrapf(iam_compute_image_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeImageIamUpdater) qualifyImageUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_compute_image_fmt.Sprintf("{{ComputeBasePath}}%s/%s", iam_compute_image_fmt.Sprintf("projects/%s/global/images/%s", u.project, u.image), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeImageIamUpdater) GetResourceId() string { - return iam_compute_image_fmt.Sprintf("projects/%s/global/images/%s", u.project, u.image) -} - -func (u *ComputeImageIamUpdater) GetMutexKey() string { - return iam_compute_image_fmt.Sprintf("iam-compute-image-%s", u.GetResourceId()) -} - -func (u *ComputeImageIamUpdater) DescribeResource() string { - return iam_compute_image_fmt.Sprintf("compute image %q", u.GetResourceId()) -} - -var ComputeInstanceIamSchema = map[string]*iam_compute_instance_schema.Schema{ - "project": { - Type: iam_compute_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "zone": { - Type: iam_compute_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "instance_name": { - Type: iam_compute_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeInstanceIamUpdater struct { - project string - zone string - instanceName string - d TerraformResourceData - Config *Config -} - -func ComputeInstanceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_compute_instance_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - zone, _ := getZone(d, config) - if zone != "" { - if err := d.Set("zone", zone); err != nil { - return nil, iam_compute_instance_fmt.Errorf("Error setting zone: %s", err) - } - } - values["zone"] = zone - if v, ok := d.GetOk("instance_name"); ok { - values["instance_name"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance_name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_compute_instance_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", u.zone); err != nil { - return nil, iam_compute_instance_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return nil, iam_compute_instance_fmt.Errorf("Error setting instance_name: %s", err) - } - - return u, nil -} - -func ComputeInstanceIdParseFunc(d *iam_compute_instance_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - zone, _ := getZone(d, config) - if zone != "" { - values["zone"] = zone - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return iam_compute_instance_fmt.Errorf("Error setting instance_name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeInstanceIamUpdater) GetResourceIamPolicy() (*iam_compute_instance_cloudresourcemanager.Policy, error) { - url, err := u.qualifyInstanceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": iam_compute_instance_fmt.Sprintf("%d", iamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_compute_instance_errwrap.Wrapf(iam_compute_instance_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_compute_instance_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_compute_instance_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeInstanceIamUpdater) SetResourceIamPolicy(policy *iam_compute_instance_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyInstanceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_compute_instance_schema.TimeoutCreate)) - if err != nil { - return iam_compute_instance_errwrap.Wrapf(iam_compute_instance_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_compute_instance_fmt.Sprintf("{{ComputeBasePath}}%s/%s", iam_compute_instance_fmt.Sprintf("projects/%s/zones/%s/instances/%s", u.project, u.zone, u.instanceName), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeInstanceIamUpdater) GetResourceId() string { - return iam_compute_instance_fmt.Sprintf("projects/%s/zones/%s/instances/%s", u.project, u.zone, u.instanceName) -} - -func (u *ComputeInstanceIamUpdater) GetMutexKey() string { - return iam_compute_instance_fmt.Sprintf("iam-compute-instance-%s", u.GetResourceId()) -} - -func (u *ComputeInstanceIamUpdater) DescribeResource() string { - return iam_compute_instance_fmt.Sprintf("compute instance %q", u.GetResourceId()) -} - -var ComputeRegionDiskIamSchema = map[string]*iam_compute_region_disk_schema.Schema{ - "project": { - Type: iam_compute_region_disk_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: iam_compute_region_disk_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "name": { - Type: iam_compute_region_disk_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeRegionDiskIamUpdater struct { - project string - region string - name string - d TerraformResourceData - Config *Config -} - -func ComputeRegionDiskIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_compute_region_disk_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, iam_compute_region_disk_fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("name"); ok { - values["name"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeRegionDiskIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_compute_region_disk_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, iam_compute_region_disk_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return nil, iam_compute_region_disk_fmt.Errorf("Error setting name: %s", err) - } - - return u, nil -} - -func ComputeRegionDiskIdParseFunc(d *iam_compute_region_disk_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeRegionDiskIamUpdater{ - project: values["project"], - region: values["region"], - name: values["name"], - d: d, - Config: config, - } - if err := d.Set("name", u.GetResourceId()); err != nil { - return iam_compute_region_disk_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeRegionDiskIamUpdater) GetResourceIamPolicy() (*iam_compute_region_disk_cloudresourcemanager.Policy, error) { - url, err := u.qualifyRegionDiskUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_compute_region_disk_errwrap.Wrapf(iam_compute_region_disk_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_compute_region_disk_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_compute_region_disk_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeRegionDiskIamUpdater) SetResourceIamPolicy(policy *iam_compute_region_disk_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyRegionDiskUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_compute_region_disk_schema.TimeoutCreate)) - if err != nil { - return iam_compute_region_disk_errwrap.Wrapf(iam_compute_region_disk_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeRegionDiskIamUpdater) qualifyRegionDiskUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_compute_region_disk_fmt.Sprintf("{{ComputeBasePath}}%s/%s", iam_compute_region_disk_fmt.Sprintf("projects/%s/regions/%s/disks/%s", u.project, u.region, u.name), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeRegionDiskIamUpdater) GetResourceId() string { - return iam_compute_region_disk_fmt.Sprintf("projects/%s/regions/%s/disks/%s", u.project, u.region, u.name) -} - -func (u *ComputeRegionDiskIamUpdater) GetMutexKey() string { - return iam_compute_region_disk_fmt.Sprintf("iam-compute-regiondisk-%s", u.GetResourceId()) -} - -func (u *ComputeRegionDiskIamUpdater) DescribeResource() string { - return iam_compute_region_disk_fmt.Sprintf("compute regiondisk %q", u.GetResourceId()) -} - -var ComputeSubnetworkIamSchema = map[string]*iam_compute_subnetwork_schema.Schema{ - "project": { - Type: iam_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: iam_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "subnetwork": { - Type: iam_compute_subnetwork_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ComputeSubnetworkIamUpdater struct { - project string - region string - subnetwork string - d TerraformResourceData - Config *Config -} - -func ComputeSubnetworkIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_compute_subnetwork_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, iam_compute_subnetwork_fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("subnetwork"); ok { - values["subnetwork"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("subnetwork").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeSubnetworkIamUpdater{ - project: values["project"], - region: values["region"], - subnetwork: values["subnetwork"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_compute_subnetwork_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, iam_compute_subnetwork_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("subnetwork", u.GetResourceId()); err != nil { - return nil, iam_compute_subnetwork_fmt.Errorf("Error setting subnetwork: %s", err) - } - - return u, nil -} - -func ComputeSubnetworkIdParseFunc(d *iam_compute_subnetwork_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ComputeSubnetworkIamUpdater{ - project: values["project"], - region: values["region"], - subnetwork: values["subnetwork"], - d: d, - Config: config, - } - if err := d.Set("subnetwork", u.GetResourceId()); err != nil { - return iam_compute_subnetwork_fmt.Errorf("Error setting subnetwork: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ComputeSubnetworkIamUpdater) GetResourceIamPolicy() (*iam_compute_subnetwork_cloudresourcemanager.Policy, error) { - url, err := u.qualifySubnetworkUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": iam_compute_subnetwork_fmt.Sprintf("%d", iamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_compute_subnetwork_errwrap.Wrapf(iam_compute_subnetwork_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_compute_subnetwork_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_compute_subnetwork_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ComputeSubnetworkIamUpdater) SetResourceIamPolicy(policy *iam_compute_subnetwork_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifySubnetworkUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_compute_subnetwork_schema.TimeoutCreate)) - if err != nil { - return iam_compute_subnetwork_errwrap.Wrapf(iam_compute_subnetwork_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ComputeSubnetworkIamUpdater) qualifySubnetworkUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_compute_subnetwork_fmt.Sprintf("{{ComputeBasePath}}%s/%s", iam_compute_subnetwork_fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", u.project, u.region, u.subnetwork), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ComputeSubnetworkIamUpdater) GetResourceId() string { - return iam_compute_subnetwork_fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", u.project, u.region, u.subnetwork) -} - -func (u *ComputeSubnetworkIamUpdater) GetMutexKey() string { - return iam_compute_subnetwork_fmt.Sprintf("iam-compute-subnetwork-%s", u.GetResourceId()) -} - -func (u *ComputeSubnetworkIamUpdater) DescribeResource() string { - return iam_compute_subnetwork_fmt.Sprintf("compute subnetwork %q", u.GetResourceId()) -} - -var DataCatalogEntryGroupIamSchema = map[string]*iam_data_catalog_entry_group_schema.Schema{ - "project": { - Type: iam_data_catalog_entry_group_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: iam_data_catalog_entry_group_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "entry_group": { - Type: iam_data_catalog_entry_group_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataCatalogEntryGroupIamUpdater struct { - project string - region string - entryGroup string - d TerraformResourceData - Config *Config -} - -func DataCatalogEntryGroupIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_data_catalog_entry_group_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, iam_data_catalog_entry_group_fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("entry_group"); ok { - values["entry_group"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/entryGroups/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("entry_group").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogEntryGroupIamUpdater{ - project: values["project"], - region: values["region"], - entryGroup: values["entry_group"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_data_catalog_entry_group_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, iam_data_catalog_entry_group_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("entry_group", u.GetResourceId()); err != nil { - return nil, iam_data_catalog_entry_group_fmt.Errorf("Error setting entry_group: %s", err) - } - - return u, nil -} - -func DataCatalogEntryGroupIdParseFunc(d *iam_data_catalog_entry_group_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/entryGroups/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogEntryGroupIamUpdater{ - project: values["project"], - region: values["region"], - entryGroup: values["entry_group"], - d: d, - Config: config, - } - if err := d.Set("entry_group", u.GetResourceId()); err != nil { - return iam_data_catalog_entry_group_fmt.Errorf("Error setting entry_group: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataCatalogEntryGroupIamUpdater) GetResourceIamPolicy() (*iam_data_catalog_entry_group_cloudresourcemanager.Policy, error) { - url, err := u.qualifyEntryGroupUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_data_catalog_entry_group_errwrap.Wrapf(iam_data_catalog_entry_group_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_data_catalog_entry_group_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_data_catalog_entry_group_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataCatalogEntryGroupIamUpdater) SetResourceIamPolicy(policy *iam_data_catalog_entry_group_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyEntryGroupUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_data_catalog_entry_group_schema.TimeoutCreate)) - if err != nil { - return iam_data_catalog_entry_group_errwrap.Wrapf(iam_data_catalog_entry_group_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataCatalogEntryGroupIamUpdater) qualifyEntryGroupUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_data_catalog_entry_group_fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", iam_data_catalog_entry_group_fmt.Sprintf("projects/%s/locations/%s/entryGroups/%s", u.project, u.region, u.entryGroup), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataCatalogEntryGroupIamUpdater) GetResourceId() string { - return iam_data_catalog_entry_group_fmt.Sprintf("projects/%s/locations/%s/entryGroups/%s", u.project, u.region, u.entryGroup) -} - -func (u *DataCatalogEntryGroupIamUpdater) GetMutexKey() string { - return iam_data_catalog_entry_group_fmt.Sprintf("iam-datacatalog-entrygroup-%s", u.GetResourceId()) -} - -func (u *DataCatalogEntryGroupIamUpdater) DescribeResource() string { - return iam_data_catalog_entry_group_fmt.Sprintf("datacatalog entrygroup %q", u.GetResourceId()) -} - -var DataCatalogTagTemplateIamSchema = map[string]*iam_data_catalog_tag_template_schema.Schema{ - "project": { - Type: iam_data_catalog_tag_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "region": { - Type: iam_data_catalog_tag_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "tag_template": { - Type: iam_data_catalog_tag_template_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type DataCatalogTagTemplateIamUpdater struct { - project string - region string - tagTemplate string - d TerraformResourceData - Config *Config -} - -func DataCatalogTagTemplateIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_data_catalog_tag_template_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - region, _ := getRegion(d, config) - if region != "" { - if err := d.Set("region", region); err != nil { - return nil, iam_data_catalog_tag_template_fmt.Errorf("Error setting region: %s", err) - } - } - values["region"] = region - if v, ok := d.GetOk("tag_template"); ok { - values["tag_template"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/tagTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_template").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogTagTemplateIamUpdater{ - project: values["project"], - region: values["region"], - tagTemplate: values["tag_template"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_data_catalog_tag_template_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", u.region); err != nil { - return nil, iam_data_catalog_tag_template_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("tag_template", u.GetResourceId()); err != nil { - return nil, iam_data_catalog_tag_template_fmt.Errorf("Error setting tag_template: %s", err) - } - - return u, nil -} - -func DataCatalogTagTemplateIdParseFunc(d *iam_data_catalog_tag_template_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - region, _ := getRegion(d, config) - if region != "" { - values["region"] = region - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/tagTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &DataCatalogTagTemplateIamUpdater{ - project: values["project"], - region: values["region"], - tagTemplate: values["tag_template"], - d: d, - Config: config, - } - if err := d.Set("tag_template", u.GetResourceId()); err != nil { - return iam_data_catalog_tag_template_fmt.Errorf("Error setting tag_template: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *DataCatalogTagTemplateIamUpdater) GetResourceIamPolicy() (*iam_data_catalog_tag_template_cloudresourcemanager.Policy, error) { - url, err := u.qualifyTagTemplateUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_data_catalog_tag_template_errwrap.Wrapf(iam_data_catalog_tag_template_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_data_catalog_tag_template_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_data_catalog_tag_template_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *DataCatalogTagTemplateIamUpdater) SetResourceIamPolicy(policy *iam_data_catalog_tag_template_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTagTemplateUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_data_catalog_tag_template_schema.TimeoutCreate)) - if err != nil { - return iam_data_catalog_tag_template_errwrap.Wrapf(iam_data_catalog_tag_template_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataCatalogTagTemplateIamUpdater) qualifyTagTemplateUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_data_catalog_tag_template_fmt.Sprintf("{{DataCatalogBasePath}}%s:%s", iam_data_catalog_tag_template_fmt.Sprintf("projects/%s/locations/%s/tagTemplates/%s", u.project, u.region, u.tagTemplate), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *DataCatalogTagTemplateIamUpdater) GetResourceId() string { - return iam_data_catalog_tag_template_fmt.Sprintf("projects/%s/locations/%s/tagTemplates/%s", u.project, u.region, u.tagTemplate) -} - -func (u *DataCatalogTagTemplateIamUpdater) GetMutexKey() string { - return iam_data_catalog_tag_template_fmt.Sprintf("iam-datacatalog-tagtemplate-%s", u.GetResourceId()) -} - -func (u *DataCatalogTagTemplateIamUpdater) DescribeResource() string { - return iam_data_catalog_tag_template_fmt.Sprintf("datacatalog tagtemplate %q", u.GetResourceId()) -} - -var IamDataprocClusterSchema = map[string]*iam_dataproc_cluster_schema.Schema{ - "cluster": { - Type: iam_dataproc_cluster_schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: iam_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "region": { - Type: iam_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type DataprocClusterIamUpdater struct { - project string - region string - cluster string - d TerraformResourceData - Config *Config -} - -func NewDataprocClusterUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, iam_dataproc_cluster_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return nil, iam_dataproc_cluster_fmt.Errorf("Error setting region: %s", err) - } - - return &DataprocClusterIamUpdater{ - project: project, - region: region, - cluster: d.Get("cluster").(string), - d: d, - Config: config, - }, nil -} - -func DataprocClusterIdParseFunc(d *iam_dataproc_cluster_schema.ResourceData, config *Config) error { - fv, err := parseRegionalFieldValue("clusters", d.Id(), "project", "region", "zone", d, config, true) - if err != nil { - return err - } - - if err := d.Set("project", fv.Project); err != nil { - return iam_dataproc_cluster_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", fv.Region); err != nil { - return iam_dataproc_cluster_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("cluster", fv.Name); err != nil { - return iam_dataproc_cluster_fmt.Errorf("Error setting cluster: %s", err) - } - - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *DataprocClusterIamUpdater) GetResourceIamPolicy() (*iam_dataproc_cluster_cloudresourcemanager.Policy, error) { - req := &iam_dataproc_cluster_dataproc.GetIamPolicyRequest{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, iam_dataproc_cluster_errwrap.Wrapf(iam_dataproc_cluster_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) - if err != nil { - return nil, iam_dataproc_cluster_errwrap.Wrapf(iam_dataproc_cluster_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *DataprocClusterIamUpdater) SetResourceIamPolicy(policy *iam_dataproc_cluster_cloudresourcemanager.Policy) error { - dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) - if err != nil { - return iam_dataproc_cluster_errwrap.Wrapf(iam_dataproc_cluster_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - req := &iam_dataproc_cluster_dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} - _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return iam_dataproc_cluster_errwrap.Wrapf(iam_dataproc_cluster_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataprocClusterIamUpdater) GetResourceId() string { - return iam_dataproc_cluster_fmt.Sprintf("projects/%s/regions/%s/clusters/%s", u.project, u.region, u.cluster) -} - -func (u *DataprocClusterIamUpdater) GetMutexKey() string { - return iam_dataproc_cluster_fmt.Sprintf("iam-dataproc-cluster-%s-%s-%s", u.project, u.region, u.cluster) -} - -func (u *DataprocClusterIamUpdater) DescribeResource() string { - return iam_dataproc_cluster_fmt.Sprintf("Dataproc Cluster %s/%s/%s", u.project, u.region, u.cluster) -} - -var IamDataprocJobSchema = map[string]*iam_dataproc_job_schema.Schema{ - "job_id": { - Type: iam_dataproc_job_schema.TypeString, - Required: true, - ForceNew: true, - }, - "project": { - Type: iam_dataproc_job_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "region": { - Type: iam_dataproc_job_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type DataprocJobIamUpdater struct { - project string - region string - jobId string - d TerraformResourceData - Config *Config -} - -func NewDataprocJobUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - - if err := d.Set("project", project); err != nil { - return nil, iam_dataproc_job_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return nil, iam_dataproc_job_fmt.Errorf("Error setting region: %s", err) - } - - return &DataprocJobIamUpdater{ - project: project, - region: region, - jobId: d.Get("job_id").(string), - d: d, - Config: config, - }, nil -} - -func DataprocJobIdParseFunc(d *iam_dataproc_job_schema.ResourceData, config *Config) error { - fv, err := parseRegionalFieldValue("jobs", d.Id(), "project", "region", "zone", d, config, true) - if err != nil { - return err - } - - if err := d.Set("job_id", fv.Name); err != nil { - return iam_dataproc_job_fmt.Errorf("Error setting job_id: %s", err) - } - if err := d.Set("project", fv.Project); err != nil { - return iam_dataproc_job_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", fv.Region); err != nil { - return iam_dataproc_job_fmt.Errorf("Error setting region: %s", err) - } - - d.SetId(fv.RelativeLink()) - return nil -} - -func (u *DataprocJobIamUpdater) GetResourceIamPolicy() (*iam_dataproc_job_cloudresourcemanager.Policy, error) { - req := &iam_dataproc_job_dataproc.GetIamPolicyRequest{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.GetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return nil, iam_dataproc_job_errwrap.Wrapf(iam_dataproc_job_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) - if err != nil { - return nil, iam_dataproc_job_errwrap.Wrapf(iam_dataproc_job_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *DataprocJobIamUpdater) SetResourceIamPolicy(policy *iam_dataproc_job_cloudresourcemanager.Policy) error { - dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) - if err != nil { - return iam_dataproc_job_errwrap.Wrapf(iam_dataproc_job_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - req := &iam_dataproc_job_dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} - _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.SetIamPolicy(u.GetResourceId(), req).Do() - if err != nil { - return iam_dataproc_job_errwrap.Wrapf(iam_dataproc_job_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *DataprocJobIamUpdater) GetResourceId() string { - return iam_dataproc_job_fmt.Sprintf("projects/%s/regions/%s/jobs/%s", u.project, u.region, u.jobId) -} - -func (u *DataprocJobIamUpdater) GetMutexKey() string { - return iam_dataproc_job_fmt.Sprintf("iam-dataproc-job-%s-%s-%s", u.project, u.region, u.jobId) -} - -func (u *DataprocJobIamUpdater) DescribeResource() string { - return iam_dataproc_job_fmt.Sprintf("Dataproc Job %s/%s/%s", u.project, u.region, u.jobId) -} - -func resourceManagerToDataprocPolicy(p *iam_dataproc_job_cloudresourcemanager.Policy) (*iam_dataproc_job_dataproc.Policy, error) { - out := &iam_dataproc_job_dataproc.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_dataproc_job_errwrap.Wrapf("Cannot convert a dataproc policy to a cloudresourcemanager policy: {{err}}", err) - } - return out, nil -} - -func dataprocToResourceManagerPolicy(p *iam_dataproc_job_dataproc.Policy) (*iam_dataproc_job_cloudresourcemanager.Policy, error) { - out := &iam_dataproc_job_cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_dataproc_job_errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a dataproc policy: {{err}}", err) - } - return out, nil -} - -var ServiceManagementServiceIamSchema = map[string]*iam_endpoints_service_schema.Schema{ - "service_name": { - Type: iam_endpoints_service_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type ServiceManagementServiceIamUpdater struct { - serviceName string - d TerraformResourceData - Config *Config -} - -func ServiceManagementServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("service_name"); ok { - values["serviceName"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"services/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service_name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &ServiceManagementServiceIamUpdater{ - serviceName: values["serviceName"], - d: d, - Config: config, - } - - if err := d.Set("service_name", u.GetResourceId()); err != nil { - return nil, iam_endpoints_service_fmt.Errorf("Error setting service_name: %s", err) - } - - return u, nil -} - -func ServiceManagementServiceIdParseFunc(d *iam_endpoints_service_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"services/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &ServiceManagementServiceIamUpdater{ - serviceName: values["serviceName"], - d: d, - Config: config, - } - if err := d.Set("service_name", u.GetResourceId()); err != nil { - return iam_endpoints_service_fmt.Errorf("Error setting service_name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *ServiceManagementServiceIamUpdater) GetResourceIamPolicy() (*iam_endpoints_service_cloudresourcemanager.Policy, error) { - url, err := u.qualifyServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, iam_endpoints_service_errwrap.Wrapf(iam_endpoints_service_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_endpoints_service_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_endpoints_service_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *ServiceManagementServiceIamUpdater) SetResourceIamPolicy(policy *iam_endpoints_service_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyServiceUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(iam_endpoints_service_schema.TimeoutCreate)) - if err != nil { - return iam_endpoints_service_errwrap.Wrapf(iam_endpoints_service_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ServiceManagementServiceIamUpdater) qualifyServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_endpoints_service_fmt.Sprintf("{{ServiceManagementBasePath}}%s:%s", iam_endpoints_service_fmt.Sprintf("services/%s", u.serviceName), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *ServiceManagementServiceIamUpdater) GetResourceId() string { - return iam_endpoints_service_fmt.Sprintf("services/%s", u.serviceName) -} - -func (u *ServiceManagementServiceIamUpdater) GetMutexKey() string { - return iam_endpoints_service_fmt.Sprintf("iam-servicemanagement-service-%s", u.GetResourceId()) -} - -func (u *ServiceManagementServiceIamUpdater) DescribeResource() string { - return iam_endpoints_service_fmt.Sprintf("servicemanagement service %q", u.GetResourceId()) -} - -var IamFolderSchema = map[string]*iam_folder_schema.Schema{ - "folder": { - Type: iam_folder_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type FolderIamUpdater struct { - folderId string - d TerraformResourceData - Config *Config -} - -func NewFolderIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &FolderIamUpdater{ - folderId: canonicalFolderId(d.Get("folder").(string)), - d: d, - Config: config, - }, nil -} - -func FolderIdParseFunc(d *iam_folder_schema.ResourceData, _ *Config) error { - if !iam_folder_strings.HasPrefix(d.Id(), "folders/") { - d.SetId(iam_folder_fmt.Sprintf("folders/%s", d.Id())) - } - if err := d.Set("folder", d.Id()); err != nil { - return iam_folder_fmt.Errorf("Error setting folder: %s", err) - } - return nil -} - -func (u *FolderIamUpdater) GetResourceIamPolicy() (*iam_folder_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - return getFolderIamPolicyByFolderName(u.folderId, userAgent, u.Config) -} - -func (u *FolderIamUpdater) SetResourceIamPolicy(policy *iam_folder_cloudresourcemanager.Policy) error { - v2Policy, err := v1PolicyToV2(policy) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewResourceManagerV2Client(userAgent).Folders.SetIamPolicy(u.folderId, &iam_folder_cloudresourcemanagerresourceManagerV2.SetIamPolicyRequest{ - Policy: v2Policy, - UpdateMask: "bindings,etag,auditConfigs", - }).Do() - - if err != nil { - return iam_folder_errwrap.Wrapf(iam_folder_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderIamUpdater) GetResourceId() string { - return u.folderId -} - -func (u *FolderIamUpdater) GetMutexKey() string { - return iam_folder_fmt.Sprintf("iam-folder-%s", u.folderId) -} - -func (u *FolderIamUpdater) DescribeResource() string { - return iam_folder_fmt.Sprintf("folder %q", u.folderId) -} - -func canonicalFolderId(folder string) string { - if iam_folder_strings.HasPrefix(folder, "folders/") { - return folder - } - - return "folders/" + folder -} - -func v1PolicyToV2(in *iam_folder_cloudresourcemanager.Policy) (*iam_folder_cloudresourcemanagerresourceManagerV2.Policy, error) { - out := &iam_folder_cloudresourcemanagerresourceManagerV2.Policy{} - err := Convert(in, out) - if err != nil { - return nil, iam_folder_errwrap.Wrapf("Cannot convert a v1 policy to a v2 policy: {{err}}", err) - } - return out, nil -} - -func v2PolicyToV1(in *iam_folder_cloudresourcemanagerresourceManagerV2.Policy) (*iam_folder_cloudresourcemanager.Policy, error) { - out := &iam_folder_cloudresourcemanager.Policy{} - err := Convert(in, out) - if err != nil { - return nil, iam_folder_errwrap.Wrapf("Cannot convert a v2 policy to a v1 policy: {{err}}", err) - } - return out, nil -} - -func getFolderIamPolicyByFolderName(folderName, userAgent string, config *Config) (*iam_folder_cloudresourcemanager.Policy, error) { - p, err := config.NewResourceManagerV2Client(userAgent).Folders.GetIamPolicy(folderName, - &iam_folder_cloudresourcemanagerresourceManagerV2.GetIamPolicyRequest{ - Options: &iam_folder_cloudresourcemanagerresourceManagerV2.GetPolicyOptions{ - RequestedPolicyVersion: iamPolicyVersion, - }, - }).Do() - if err != nil { - return nil, iam_folder_errwrap.Wrapf(iam_folder_fmt.Sprintf("Error retrieving IAM policy for folder %q: {{err}}", folderName), err) - } - - v1Policy, err := v2PolicyToV1(p) - if err != nil { - return nil, err - } - - return v1Policy, nil -} - -var HealthcareConsentStoreIamSchema = map[string]*iam_healthcare_consent_store_schema.Schema{ - "dataset": { - Type: iam_healthcare_consent_store_schema.TypeString, - Required: true, - ForceNew: true, - }, - "consent_store_id": { - Type: iam_healthcare_consent_store_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type HealthcareConsentStoreIamUpdater struct { - dataset string - consentStoreId string - d TerraformResourceData - Config *Config -} - -func HealthcareConsentStoreIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("dataset"); ok { - values["dataset"] = v.(string) - } - - if v, ok := d.GetOk("consent_store_id"); ok { - values["consent_store_id"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"(?P.+)/consentStores/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("consent_store_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &HealthcareConsentStoreIamUpdater{ - dataset: values["dataset"], - consentStoreId: values["consent_store_id"], - d: d, - Config: config, - } - - if err := d.Set("dataset", u.dataset); err != nil { - return nil, iam_healthcare_consent_store_fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("consent_store_id", u.GetResourceId()); err != nil { - return nil, iam_healthcare_consent_store_fmt.Errorf("Error setting consent_store_id: %s", err) - } - - return u, nil -} - -func HealthcareConsentStoreIdParseFunc(d *iam_healthcare_consent_store_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"(?P.+)/consentStores/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &HealthcareConsentStoreIamUpdater{ - dataset: values["dataset"], - consentStoreId: values["consent_store_id"], - d: d, - Config: config, - } - if err := d.Set("consent_store_id", u.GetResourceId()); err != nil { - return iam_healthcare_consent_store_fmt.Errorf("Error setting consent_store_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *HealthcareConsentStoreIamUpdater) GetResourceIamPolicy() (*iam_healthcare_consent_store_cloudresourcemanager.Policy, error) { - url, err := u.qualifyConsentStoreUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", "", url, userAgent, obj) - if err != nil { - return nil, iam_healthcare_consent_store_errwrap.Wrapf(iam_healthcare_consent_store_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_healthcare_consent_store_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_healthcare_consent_store_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *HealthcareConsentStoreIamUpdater) SetResourceIamPolicy(policy *iam_healthcare_consent_store_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyConsentStoreUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(iam_healthcare_consent_store_schema.TimeoutCreate)) - if err != nil { - return iam_healthcare_consent_store_errwrap.Wrapf(iam_healthcare_consent_store_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareConsentStoreIamUpdater) qualifyConsentStoreUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_healthcare_consent_store_fmt.Sprintf("{{HealthcareBasePath}}%s:%s", iam_healthcare_consent_store_fmt.Sprintf("%s/consentStores/%s", u.dataset, u.consentStoreId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *HealthcareConsentStoreIamUpdater) GetResourceId() string { - return iam_healthcare_consent_store_fmt.Sprintf("%s/consentStores/%s", u.dataset, u.consentStoreId) -} - -func (u *HealthcareConsentStoreIamUpdater) GetMutexKey() string { - return iam_healthcare_consent_store_fmt.Sprintf("iam-healthcare-consentstore-%s", u.GetResourceId()) -} - -func (u *HealthcareConsentStoreIamUpdater) DescribeResource() string { - return iam_healthcare_consent_store_fmt.Sprintf("healthcare consentstore %q", u.GetResourceId()) -} - -var IamHealthcareDatasetSchema = map[string]*iam_healthcare_dataset_schema.Schema{ - "dataset_id": { - Type: iam_healthcare_dataset_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareDatasetIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareDatasetIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - dataset := d.Get("dataset_id").(string) - datasetId, err := parseHealthcareDatasetId(dataset, config) - - if err != nil { - return nil, iam_healthcare_dataset_errwrap.Wrapf(iam_healthcare_dataset_fmt.Sprintf("Error parsing resource ID for %s: {{err}}", dataset), err) - } - - return &HealthcareDatasetIamUpdater{ - resourceId: datasetId.datasetId(), - d: d, - Config: config, - }, nil -} - -func DatasetIdParseFunc(d *iam_healthcare_dataset_schema.ResourceData, config *Config) error { - datasetId, err := parseHealthcareDatasetId(d.Id(), config) - if err != nil { - return err - } - - if err := d.Set("dataset_id", datasetId.datasetId()); err != nil { - return iam_healthcare_dataset_fmt.Errorf("Error setting dataset_id: %s", err) - } - d.SetId(datasetId.datasetId()) - return nil -} - -func (u *HealthcareDatasetIamUpdater) GetResourceIamPolicy() (*iam_healthcare_dataset_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, iam_healthcare_dataset_errwrap.Wrapf(iam_healthcare_dataset_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_healthcare_dataset_errwrap.Wrapf(iam_healthcare_dataset_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareDatasetIamUpdater) SetResourceIamPolicy(policy *iam_healthcare_dataset_cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return iam_healthcare_dataset_errwrap.Wrapf(iam_healthcare_dataset_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.SetIamPolicy(u.resourceId, &iam_healthcare_dataset_healthcarehealthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return iam_healthcare_dataset_errwrap.Wrapf(iam_healthcare_dataset_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareDatasetIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareDatasetIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareDatasetIamUpdater) DescribeResource() string { - return iam_healthcare_dataset_fmt.Sprintf("Healthcare Dataset %q", u.resourceId) -} - -func resourceManagerToHealthcarePolicy(p *iam_healthcare_dataset_cloudresourcemanager.Policy) (*iam_healthcare_dataset_healthcarehealthcare.Policy, error) { - out := &iam_healthcare_dataset_healthcarehealthcare.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_healthcare_dataset_errwrap.Wrapf("Cannot convert a v1 policy to a healthcare policy: {{err}}", err) - } - return out, nil -} - -func healthcareToResourceManagerPolicy(p *iam_healthcare_dataset_healthcarehealthcare.Policy) (*iam_healthcare_dataset_cloudresourcemanager.Policy, error) { - out := &iam_healthcare_dataset_cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_healthcare_dataset_errwrap.Wrapf("Cannot convert a healthcare policy to a v1 policy: {{err}}", err) - } - return out, nil -} - -var IamHealthcareDicomStoreSchema = map[string]*iam_healthcare_dicom_store_schema.Schema{ - "dicom_store_id": { - Type: iam_healthcare_dicom_store_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareDicomStoreIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareDicomStoreIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - dicomStore := d.Get("dicom_store_id").(string) - dicomStoreId, err := parseHealthcareDicomStoreId(dicomStore, config) - - if err != nil { - return nil, iam_healthcare_dicom_store_errwrap.Wrapf(iam_healthcare_dicom_store_fmt.Sprintf("Error parsing resource ID for %s: {{err}}", dicomStore), err) - } - - return &HealthcareDicomStoreIamUpdater{ - resourceId: dicomStoreId.dicomStoreId(), - d: d, - Config: config, - }, nil -} - -func DicomStoreIdParseFunc(d *iam_healthcare_dicom_store_schema.ResourceData, config *Config) error { - dicomStoreId, err := parseHealthcareDicomStoreId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("dicom_store_id", dicomStoreId.dicomStoreId()); err != nil { - return iam_healthcare_dicom_store_fmt.Errorf("Error setting dicom_store_id: %s", err) - } - d.SetId(dicomStoreId.dicomStoreId()) - return nil -} - -func (u *HealthcareDicomStoreIamUpdater) GetResourceIamPolicy() (*iam_healthcare_dicom_store_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.DicomStores.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, iam_healthcare_dicom_store_errwrap.Wrapf(iam_healthcare_dicom_store_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_healthcare_dicom_store_errwrap.Wrapf(iam_healthcare_dicom_store_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareDicomStoreIamUpdater) SetResourceIamPolicy(policy *iam_healthcare_dicom_store_cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return iam_healthcare_dicom_store_errwrap.Wrapf(iam_healthcare_dicom_store_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.DicomStores.SetIamPolicy(u.resourceId, &iam_healthcare_dicom_store_healthcarehealthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return iam_healthcare_dicom_store_errwrap.Wrapf(iam_healthcare_dicom_store_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareDicomStoreIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareDicomStoreIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareDicomStoreIamUpdater) DescribeResource() string { - return iam_healthcare_dicom_store_fmt.Sprintf("Healthcare DicomStore %q", u.resourceId) -} - -var IamHealthcareFhirStoreSchema = map[string]*iam_healthcare_fhir_store_schema.Schema{ - "fhir_store_id": { - Type: iam_healthcare_fhir_store_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareFhirStoreIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareFhirStoreIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - fhirStore := d.Get("fhir_store_id").(string) - fhirStoreId, err := parseHealthcareFhirStoreId(fhirStore, config) - - if err != nil { - return nil, iam_healthcare_fhir_store_errwrap.Wrapf(iam_healthcare_fhir_store_fmt.Sprintf("Error parsing resource ID for %s: {{err}}", fhirStore), err) - } - - return &HealthcareFhirStoreIamUpdater{ - resourceId: fhirStoreId.fhirStoreId(), - d: d, - Config: config, - }, nil -} - -func FhirStoreIdParseFunc(d *iam_healthcare_fhir_store_schema.ResourceData, config *Config) error { - fhirStoreId, err := parseHealthcareFhirStoreId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("fhir_store_id", fhirStoreId.fhirStoreId()); err != nil { - return iam_healthcare_fhir_store_fmt.Errorf("Error setting fhir_store_id: %s", err) - } - d.SetId(fhirStoreId.fhirStoreId()) - return nil -} - -func (u *HealthcareFhirStoreIamUpdater) GetResourceIamPolicy() (*iam_healthcare_fhir_store_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.FhirStores.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, iam_healthcare_fhir_store_errwrap.Wrapf(iam_healthcare_fhir_store_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_healthcare_fhir_store_errwrap.Wrapf(iam_healthcare_fhir_store_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareFhirStoreIamUpdater) SetResourceIamPolicy(policy *iam_healthcare_fhir_store_cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return iam_healthcare_fhir_store_errwrap.Wrapf(iam_healthcare_fhir_store_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.FhirStores.SetIamPolicy(u.resourceId, &iam_healthcare_fhir_store_healthcarehealthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return iam_healthcare_fhir_store_errwrap.Wrapf(iam_healthcare_fhir_store_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareFhirStoreIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareFhirStoreIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareFhirStoreIamUpdater) DescribeResource() string { - return iam_healthcare_fhir_store_fmt.Sprintf("Healthcare FhirStore %q", u.resourceId) -} - -var IamHealthcareHl7V2StoreSchema = map[string]*iam_healthcare_hl7_v2_store_schema.Schema{ - "hl7_v2_store_id": { - Type: iam_healthcare_hl7_v2_store_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type HealthcareHl7V2StoreIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewHealthcareHl7V2StoreIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - hl7V2Store := d.Get("hl7_v2_store_id").(string) - hl7V2StoreId, err := parseHealthcareHl7V2StoreId(hl7V2Store, config) - - if err != nil { - return nil, iam_healthcare_hl7_v2_store_errwrap.Wrapf(iam_healthcare_hl7_v2_store_fmt.Sprintf("Error parsing resource ID for %s: {{err}}", hl7V2Store), err) - } - - return &HealthcareHl7V2StoreIamUpdater{ - resourceId: hl7V2StoreId.hl7V2StoreId(), - d: d, - Config: config, - }, nil -} - -func Hl7V2StoreIdParseFunc(d *iam_healthcare_hl7_v2_store_schema.ResourceData, config *Config) error { - hl7V2StoreId, err := parseHealthcareHl7V2StoreId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("hl7_v2_store_id", hl7V2StoreId.hl7V2StoreId()); err != nil { - return iam_healthcare_hl7_v2_store_fmt.Errorf("Error setting hl7_v2_store_id: %s", err) - } - d.SetId(hl7V2StoreId.hl7V2StoreId()) - return nil -} - -func (u *HealthcareHl7V2StoreIamUpdater) GetResourceIamPolicy() (*iam_healthcare_hl7_v2_store_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.Hl7V2Stores.GetIamPolicy(u.resourceId).Do() - - if err != nil { - return nil, iam_healthcare_hl7_v2_store_errwrap.Wrapf(iam_healthcare_hl7_v2_store_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := healthcareToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_healthcare_hl7_v2_store_errwrap.Wrapf(iam_healthcare_hl7_v2_store_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *HealthcareHl7V2StoreIamUpdater) SetResourceIamPolicy(policy *iam_healthcare_hl7_v2_store_cloudresourcemanager.Policy) error { - healthcarePolicy, err := resourceManagerToHealthcarePolicy(policy) - - if err != nil { - return iam_healthcare_hl7_v2_store_errwrap.Wrapf(iam_healthcare_hl7_v2_store_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewHealthcareClient(userAgent).Projects.Locations.Datasets.Hl7V2Stores.SetIamPolicy(u.resourceId, &iam_healthcare_hl7_v2_store_healthcarehealthcare.SetIamPolicyRequest{ - Policy: healthcarePolicy, - }).Do() - - if err != nil { - return iam_healthcare_hl7_v2_store_errwrap.Wrapf(iam_healthcare_hl7_v2_store_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *HealthcareHl7V2StoreIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *HealthcareHl7V2StoreIamUpdater) GetMutexKey() string { - return u.resourceId -} - -func (u *HealthcareHl7V2StoreIamUpdater) DescribeResource() string { - return iam_healthcare_hl7_v2_store_fmt.Sprintf("Healthcare Hl7V2Store %q", u.resourceId) -} - -var IapAppEngineServiceIamSchema = map[string]*iam_iap_app_engine_service_schema.Schema{ - "project": { - Type: iam_iap_app_engine_service_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "app_id": { - Type: iam_iap_app_engine_service_schema.TypeString, - Required: true, - ForceNew: true, - }, - "service": { - Type: iam_iap_app_engine_service_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapAppEngineServiceIamUpdater struct { - project string - appId string - service string - d TerraformResourceData - Config *Config -} - -func IapAppEngineServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_app_engine_service_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("app_id"); ok { - values["appId"] = v.(string) - } - - if v, ok := d.GetOk("service"); ok { - values["service"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("service").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineServiceIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_app_engine_service_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("app_id", u.appId); err != nil { - return nil, iam_iap_app_engine_service_fmt.Errorf("Error setting app_id: %s", err) - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return nil, iam_iap_app_engine_service_fmt.Errorf("Error setting service: %s", err) - } - - return u, nil -} - -func IapAppEngineServiceIdParseFunc(d *iam_iap_app_engine_service_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineServiceIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - d: d, - Config: config, - } - if err := d.Set("service", u.GetResourceId()); err != nil { - return iam_iap_app_engine_service_fmt.Errorf("Error setting service: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapAppEngineServiceIamUpdater) GetResourceIamPolicy() (*iam_iap_app_engine_service_cloudresourcemanager.Policy, error) { - url, err := u.qualifyAppEngineServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_app_engine_service_errwrap.Wrapf(iam_iap_app_engine_service_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_app_engine_service_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_app_engine_service_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapAppEngineServiceIamUpdater) SetResourceIamPolicy(policy *iam_iap_app_engine_service_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAppEngineServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_app_engine_service_schema.TimeoutCreate)) - if err != nil { - return iam_iap_app_engine_service_errwrap.Wrapf(iam_iap_app_engine_service_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapAppEngineServiceIamUpdater) qualifyAppEngineServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_app_engine_service_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_app_engine_service_fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s", u.project, u.appId, u.service), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapAppEngineServiceIamUpdater) GetResourceId() string { - return iam_iap_app_engine_service_fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s", u.project, u.appId, u.service) -} - -func (u *IapAppEngineServiceIamUpdater) GetMutexKey() string { - return iam_iap_app_engine_service_fmt.Sprintf("iam-iap-appengineservice-%s", u.GetResourceId()) -} - -func (u *IapAppEngineServiceIamUpdater) DescribeResource() string { - return iam_iap_app_engine_service_fmt.Sprintf("iap appengineservice %q", u.GetResourceId()) -} - -var IapAppEngineVersionIamSchema = map[string]*iam_iap_app_engine_version_schema.Schema{ - "project": { - Type: iam_iap_app_engine_version_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "app_id": { - Type: iam_iap_app_engine_version_schema.TypeString, - Required: true, - ForceNew: true, - }, - "service": { - Type: iam_iap_app_engine_version_schema.TypeString, - Required: true, - ForceNew: true, - }, - "version_id": { - Type: iam_iap_app_engine_version_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapAppEngineVersionIamUpdater struct { - project string - appId string - service string - versionId string - d TerraformResourceData - Config *Config -} - -func IapAppEngineVersionIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_app_engine_version_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("app_id"); ok { - values["appId"] = v.(string) - } - - if v, ok := d.GetOk("service"); ok { - values["service"] = v.(string) - } - - if v, ok := d.GetOk("version_id"); ok { - values["versionId"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("version_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineVersionIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - versionId: values["versionId"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_app_engine_version_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("app_id", u.appId); err != nil { - return nil, iam_iap_app_engine_version_fmt.Errorf("Error setting app_id: %s", err) - } - if err := d.Set("service", u.service); err != nil { - return nil, iam_iap_app_engine_version_fmt.Errorf("Error setting service: %s", err) - } - if err := d.Set("version_id", u.GetResourceId()); err != nil { - return nil, iam_iap_app_engine_version_fmt.Errorf("Error setting version_id: %s", err) - } - - return u, nil -} - -func IapAppEngineVersionIdParseFunc(d *iam_iap_app_engine_version_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapAppEngineVersionIamUpdater{ - project: values["project"], - appId: values["appId"], - service: values["service"], - versionId: values["versionId"], - d: d, - Config: config, - } - if err := d.Set("version_id", u.GetResourceId()); err != nil { - return iam_iap_app_engine_version_fmt.Errorf("Error setting version_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapAppEngineVersionIamUpdater) GetResourceIamPolicy() (*iam_iap_app_engine_version_cloudresourcemanager.Policy, error) { - url, err := u.qualifyAppEngineVersionUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_app_engine_version_errwrap.Wrapf(iam_iap_app_engine_version_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_app_engine_version_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_app_engine_version_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapAppEngineVersionIamUpdater) SetResourceIamPolicy(policy *iam_iap_app_engine_version_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyAppEngineVersionUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_app_engine_version_schema.TimeoutCreate)) - if err != nil { - return iam_iap_app_engine_version_errwrap.Wrapf(iam_iap_app_engine_version_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapAppEngineVersionIamUpdater) qualifyAppEngineVersionUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_app_engine_version_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_app_engine_version_fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s/versions/%s", u.project, u.appId, u.service, u.versionId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapAppEngineVersionIamUpdater) GetResourceId() string { - return iam_iap_app_engine_version_fmt.Sprintf("projects/%s/iap_web/appengine-%s/services/%s/versions/%s", u.project, u.appId, u.service, u.versionId) -} - -func (u *IapAppEngineVersionIamUpdater) GetMutexKey() string { - return iam_iap_app_engine_version_fmt.Sprintf("iam-iap-appengineversion-%s", u.GetResourceId()) -} - -func (u *IapAppEngineVersionIamUpdater) DescribeResource() string { - return iam_iap_app_engine_version_fmt.Sprintf("iap appengineversion %q", u.GetResourceId()) -} - -var IapTunnelIamSchema = map[string]*iam_iap_tunnel_schema.Schema{ - "project": { - Type: iam_iap_tunnel_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapTunnelIamUpdater struct { - project string - d TerraformResourceData - Config *Config -} - -func IapTunnelIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_tunnel_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel", "(?P[^/]+)"}, d, config, d.Get("project").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_tunnel_fmt.Errorf("Error setting project: %s", err) - } - - return u, nil -} - -func IapTunnelIdParseFunc(d *iam_iap_tunnel_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - if err := d.Set("project", u.project); err != nil { - return iam_iap_tunnel_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapTunnelIamUpdater) GetResourceIamPolicy() (*iam_iap_tunnel_cloudresourcemanager.Policy, error) { - url, err := u.qualifyTunnelUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_tunnel_errwrap.Wrapf(iam_iap_tunnel_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_tunnel_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_tunnel_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapTunnelIamUpdater) SetResourceIamPolicy(policy *iam_iap_tunnel_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTunnelUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_tunnel_schema.TimeoutCreate)) - if err != nil { - return iam_iap_tunnel_errwrap.Wrapf(iam_iap_tunnel_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapTunnelIamUpdater) qualifyTunnelUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_tunnel_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_tunnel_fmt.Sprintf("projects/%s/iap_tunnel", u.project), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapTunnelIamUpdater) GetResourceId() string { - return iam_iap_tunnel_fmt.Sprintf("projects/%s/iap_tunnel", u.project) -} - -func (u *IapTunnelIamUpdater) GetMutexKey() string { - return iam_iap_tunnel_fmt.Sprintf("iam-iap-tunnel-%s", u.GetResourceId()) -} - -func (u *IapTunnelIamUpdater) DescribeResource() string { - return iam_iap_tunnel_fmt.Sprintf("iap tunnel %q", u.GetResourceId()) -} - -var IapTunnelInstanceIamSchema = map[string]*iam_iap_tunnel_instance_schema.Schema{ - "project": { - Type: iam_iap_tunnel_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "zone": { - Type: iam_iap_tunnel_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "instance": { - Type: iam_iap_tunnel_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapTunnelInstanceIamUpdater struct { - project string - zone string - instance string - d TerraformResourceData - Config *Config -} - -func IapTunnelInstanceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_tunnel_instance_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - zone, _ := getZone(d, config) - if zone != "" { - if err := d.Set("zone", zone); err != nil { - return nil, iam_iap_tunnel_instance_fmt.Errorf("Error setting zone: %s", err) - } - } - values["zone"] = zone - if v, ok := d.GetOk("instance"); ok { - values["instance"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel/zones/(?P[^/]+)/instances/(?P[^/]+)", "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instance: values["instance"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_tunnel_instance_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", u.zone); err != nil { - return nil, iam_iap_tunnel_instance_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("instance", u.GetResourceId()); err != nil { - return nil, iam_iap_tunnel_instance_fmt.Errorf("Error setting instance: %s", err) - } - - return u, nil -} - -func IapTunnelInstanceIdParseFunc(d *iam_iap_tunnel_instance_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - zone, _ := getZone(d, config) - if zone != "" { - values["zone"] = zone - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_tunnel/zones/(?P[^/]+)/instances/(?P[^/]+)", "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapTunnelInstanceIamUpdater{ - project: values["project"], - zone: values["zone"], - instance: values["instance"], - d: d, - Config: config, - } - if err := d.Set("instance", u.GetResourceId()); err != nil { - return iam_iap_tunnel_instance_fmt.Errorf("Error setting instance: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapTunnelInstanceIamUpdater) GetResourceIamPolicy() (*iam_iap_tunnel_instance_cloudresourcemanager.Policy, error) { - url, err := u.qualifyTunnelInstanceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_tunnel_instance_errwrap.Wrapf(iam_iap_tunnel_instance_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_tunnel_instance_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_tunnel_instance_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapTunnelInstanceIamUpdater) SetResourceIamPolicy(policy *iam_iap_tunnel_instance_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTunnelInstanceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_tunnel_instance_schema.TimeoutCreate)) - if err != nil { - return iam_iap_tunnel_instance_errwrap.Wrapf(iam_iap_tunnel_instance_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapTunnelInstanceIamUpdater) qualifyTunnelInstanceUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_tunnel_instance_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_tunnel_instance_fmt.Sprintf("projects/%s/iap_tunnel/zones/%s/instances/%s", u.project, u.zone, u.instance), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapTunnelInstanceIamUpdater) GetResourceId() string { - return iam_iap_tunnel_instance_fmt.Sprintf("projects/%s/iap_tunnel/zones/%s/instances/%s", u.project, u.zone, u.instance) -} - -func (u *IapTunnelInstanceIamUpdater) GetMutexKey() string { - return iam_iap_tunnel_instance_fmt.Sprintf("iam-iap-tunnelinstance-%s", u.GetResourceId()) -} - -func (u *IapTunnelInstanceIamUpdater) DescribeResource() string { - return iam_iap_tunnel_instance_fmt.Sprintf("iap tunnelinstance %q", u.GetResourceId()) -} - -var IapWebIamSchema = map[string]*iam_iap_web_schema.Schema{ - "project": { - Type: iam_iap_web_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapWebIamUpdater struct { - project string - d TerraformResourceData - Config *Config -} - -func IapWebIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_web_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web", "(?P[^/]+)"}, d, config, d.Get("project").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_web_fmt.Errorf("Error setting project: %s", err) - } - - return u, nil -} - -func IapWebIdParseFunc(d *iam_iap_web_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - if err := d.Set("project", u.project); err != nil { - return iam_iap_web_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebIamUpdater) GetResourceIamPolicy() (*iam_iap_web_cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_web_errwrap.Wrapf(iam_iap_web_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_web_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_web_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebIamUpdater) SetResourceIamPolicy(policy *iam_iap_web_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_web_schema.TimeoutCreate)) - if err != nil { - return iam_iap_web_errwrap.Wrapf(iam_iap_web_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebIamUpdater) qualifyWebUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_web_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_web_fmt.Sprintf("projects/%s/iap_web", u.project), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebIamUpdater) GetResourceId() string { - return iam_iap_web_fmt.Sprintf("projects/%s/iap_web", u.project) -} - -func (u *IapWebIamUpdater) GetMutexKey() string { - return iam_iap_web_fmt.Sprintf("iam-iap-web-%s", u.GetResourceId()) -} - -func (u *IapWebIamUpdater) DescribeResource() string { - return iam_iap_web_fmt.Sprintf("iap web %q", u.GetResourceId()) -} - -var IapWebBackendServiceIamSchema = map[string]*iam_iap_web_backend_service_schema.Schema{ - "project": { - Type: iam_iap_web_backend_service_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "web_backend_service": { - Type: iam_iap_web_backend_service_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapWebBackendServiceIamUpdater struct { - project string - webBackendService string - d TerraformResourceData - Config *Config -} - -func IapWebBackendServiceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_web_backend_service_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("web_backend_service"); ok { - values["web_backend_service"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("web_backend_service").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebBackendServiceIamUpdater{ - project: values["project"], - webBackendService: values["web_backend_service"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_web_backend_service_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("web_backend_service", u.GetResourceId()); err != nil { - return nil, iam_iap_web_backend_service_fmt.Errorf("Error setting web_backend_service: %s", err) - } - - return u, nil -} - -func IapWebBackendServiceIdParseFunc(d *iam_iap_web_backend_service_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute/services/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebBackendServiceIamUpdater{ - project: values["project"], - webBackendService: values["web_backend_service"], - d: d, - Config: config, - } - if err := d.Set("web_backend_service", u.GetResourceId()); err != nil { - return iam_iap_web_backend_service_fmt.Errorf("Error setting web_backend_service: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebBackendServiceIamUpdater) GetResourceIamPolicy() (*iam_iap_web_backend_service_cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebBackendServiceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_web_backend_service_errwrap.Wrapf(iam_iap_web_backend_service_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_web_backend_service_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_web_backend_service_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebBackendServiceIamUpdater) SetResourceIamPolicy(policy *iam_iap_web_backend_service_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebBackendServiceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_web_backend_service_schema.TimeoutCreate)) - if err != nil { - return iam_iap_web_backend_service_errwrap.Wrapf(iam_iap_web_backend_service_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebBackendServiceIamUpdater) qualifyWebBackendServiceUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_web_backend_service_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_web_backend_service_fmt.Sprintf("projects/%s/iap_web/compute/services/%s", u.project, u.webBackendService), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebBackendServiceIamUpdater) GetResourceId() string { - return iam_iap_web_backend_service_fmt.Sprintf("projects/%s/iap_web/compute/services/%s", u.project, u.webBackendService) -} - -func (u *IapWebBackendServiceIamUpdater) GetMutexKey() string { - return iam_iap_web_backend_service_fmt.Sprintf("iam-iap-webbackendservice-%s", u.GetResourceId()) -} - -func (u *IapWebBackendServiceIamUpdater) DescribeResource() string { - return iam_iap_web_backend_service_fmt.Sprintf("iap webbackendservice %q", u.GetResourceId()) -} - -var IapWebTypeAppEngineIamSchema = map[string]*iam_iap_web_type_app_engine_schema.Schema{ - "project": { - Type: iam_iap_web_type_app_engine_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "app_id": { - Type: iam_iap_web_type_app_engine_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: IapWebTypeAppEngineDiffSuppress, - }, -} - -func IapWebTypeAppEngineDiffSuppress(_, old, new string, _ *iam_iap_web_type_app_engine_schema.ResourceData) bool { - newParts := iam_iap_web_type_app_engine_strings.Split(new, "appengine-") - - if len(newParts) == 1 { - - if iam_iap_web_type_app_engine_strings.HasSuffix(old, iam_iap_web_type_app_engine_fmt.Sprintf("appengine-%s", new)) { - return true - } - } - return old == new -} - -type IapWebTypeAppEngineIamUpdater struct { - project string - appId string - d TerraformResourceData - Config *Config -} - -func IapWebTypeAppEngineIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_web_type_app_engine_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("app_id"); ok { - values["appId"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("app_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeAppEngineIamUpdater{ - project: values["project"], - appId: values["appId"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_web_type_app_engine_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("app_id", u.GetResourceId()); err != nil { - return nil, iam_iap_web_type_app_engine_fmt.Errorf("Error setting app_id: %s", err) - } - - return u, nil -} - -func IapWebTypeAppEngineIdParseFunc(d *iam_iap_web_type_app_engine_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/appengine-(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeAppEngineIamUpdater{ - project: values["project"], - appId: values["appId"], - d: d, - Config: config, - } - if err := d.Set("app_id", u.GetResourceId()); err != nil { - return iam_iap_web_type_app_engine_fmt.Errorf("Error setting app_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebTypeAppEngineIamUpdater) GetResourceIamPolicy() (*iam_iap_web_type_app_engine_cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebTypeAppEngineUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_web_type_app_engine_errwrap.Wrapf(iam_iap_web_type_app_engine_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_web_type_app_engine_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_web_type_app_engine_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebTypeAppEngineIamUpdater) SetResourceIamPolicy(policy *iam_iap_web_type_app_engine_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebTypeAppEngineUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_web_type_app_engine_schema.TimeoutCreate)) - if err != nil { - return iam_iap_web_type_app_engine_errwrap.Wrapf(iam_iap_web_type_app_engine_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebTypeAppEngineIamUpdater) qualifyWebTypeAppEngineUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_web_type_app_engine_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_web_type_app_engine_fmt.Sprintf("projects/%s/iap_web/appengine-%s", u.project, u.appId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebTypeAppEngineIamUpdater) GetResourceId() string { - return iam_iap_web_type_app_engine_fmt.Sprintf("projects/%s/iap_web/appengine-%s", u.project, u.appId) -} - -func (u *IapWebTypeAppEngineIamUpdater) GetMutexKey() string { - return iam_iap_web_type_app_engine_fmt.Sprintf("iam-iap-webtypeappengine-%s", u.GetResourceId()) -} - -func (u *IapWebTypeAppEngineIamUpdater) DescribeResource() string { - return iam_iap_web_type_app_engine_fmt.Sprintf("iap webtypeappengine %q", u.GetResourceId()) -} - -var IapWebTypeComputeIamSchema = map[string]*iam_iap_web_type_compute_schema.Schema{ - "project": { - Type: iam_iap_web_type_compute_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type IapWebTypeComputeIamUpdater struct { - project string - d TerraformResourceData - Config *Config -} - -func IapWebTypeComputeIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_iap_web_type_compute_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute", "(?P[^/]+)"}, d, config, d.Get("project").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeComputeIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_iap_web_type_compute_fmt.Errorf("Error setting project: %s", err) - } - - return u, nil -} - -func IapWebTypeComputeIdParseFunc(d *iam_iap_web_type_compute_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/iap_web/compute", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &IapWebTypeComputeIamUpdater{ - project: values["project"], - d: d, - Config: config, - } - if err := d.Set("project", u.project); err != nil { - return iam_iap_web_type_compute_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *IapWebTypeComputeIamUpdater) GetResourceIamPolicy() (*iam_iap_web_type_compute_cloudresourcemanager.Policy, error) { - url, err := u.qualifyWebTypeComputeUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - obj = map[string]interface{}{ - "options": map[string]interface{}{ - "requestedPolicyVersion": iamPolicyVersion, - }, - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", project, url, userAgent, obj) - if err != nil { - return nil, iam_iap_web_type_compute_errwrap.Wrapf(iam_iap_web_type_compute_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_iap_web_type_compute_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_iap_web_type_compute_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *IapWebTypeComputeIamUpdater) SetResourceIamPolicy(policy *iam_iap_web_type_compute_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyWebTypeComputeUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_iap_web_type_compute_schema.TimeoutCreate)) - if err != nil { - return iam_iap_web_type_compute_errwrap.Wrapf(iam_iap_web_type_compute_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *IapWebTypeComputeIamUpdater) qualifyWebTypeComputeUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_iap_web_type_compute_fmt.Sprintf("{{IapBasePath}}%s:%s", iam_iap_web_type_compute_fmt.Sprintf("projects/%s/iap_web/compute", u.project), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *IapWebTypeComputeIamUpdater) GetResourceId() string { - return iam_iap_web_type_compute_fmt.Sprintf("projects/%s/iap_web/compute", u.project) -} - -func (u *IapWebTypeComputeIamUpdater) GetMutexKey() string { - return iam_iap_web_type_compute_fmt.Sprintf("iam-iap-webtypecompute-%s", u.GetResourceId()) -} - -func (u *IapWebTypeComputeIamUpdater) DescribeResource() string { - return iam_iap_web_type_compute_fmt.Sprintf("iap webtypecompute %q", u.GetResourceId()) -} - -var IamKmsCryptoKeySchema = map[string]*iam_kms_crypto_key_schema.Schema{ - "crypto_key_id": { - Type: iam_kms_crypto_key_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type KmsCryptoKeyIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewKmsCryptoKeyIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - cryptoKey := d.Get("crypto_key_id").(string) - cryptoKeyId, err := parseKmsCryptoKeyId(cryptoKey, config) - - if err != nil { - return nil, iam_kms_crypto_key_errwrap.Wrapf(iam_kms_crypto_key_fmt.Sprintf("Error parsing resource ID for %s: {{err}}", cryptoKey), err) - } - - return &KmsCryptoKeyIamUpdater{ - resourceId: cryptoKeyId.cryptoKeyId(), - d: d, - Config: config, - }, nil -} - -func CryptoIdParseFunc(d *iam_kms_crypto_key_schema.ResourceData, config *Config) error { - cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) - if err != nil { - return err - } - if err := d.Set("crypto_key_id", cryptoKeyId.cryptoKeyId()); err != nil { - return iam_kms_crypto_key_fmt.Errorf("Error setting crypto_key_id: %s", err) - } - d.SetId(cryptoKeyId.cryptoKeyId()) - return nil -} - -func (u *KmsCryptoKeyIamUpdater) GetResourceIamPolicy() (*iam_kms_crypto_key_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(iamPolicyVersion).Do() - - if err != nil { - return nil, iam_kms_crypto_key_errwrap.Wrapf(iam_kms_crypto_key_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_kms_crypto_key_errwrap.Wrapf(iam_kms_crypto_key_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *KmsCryptoKeyIamUpdater) SetResourceIamPolicy(policy *iam_kms_crypto_key_cloudresourcemanager.Policy) error { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - kmsPolicy, err := resourceManagerToKmsPolicy(policy) - - if err != nil { - return iam_kms_crypto_key_errwrap.Wrapf(iam_kms_crypto_key_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.SetIamPolicy(u.resourceId, &iam_kms_crypto_key_cloudkms.SetIamPolicyRequest{ - Policy: kmsPolicy, - }).Do() - - if err != nil { - return iam_kms_crypto_key_errwrap.Wrapf(iam_kms_crypto_key_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *KmsCryptoKeyIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *KmsCryptoKeyIamUpdater) GetMutexKey() string { - return iam_kms_crypto_key_fmt.Sprintf("iam-kms-crypto-key-%s", u.resourceId) -} - -func (u *KmsCryptoKeyIamUpdater) DescribeResource() string { - return iam_kms_crypto_key_fmt.Sprintf("KMS CryptoKey %q", u.resourceId) -} - -var IamKmsKeyRingSchema = map[string]*iam_kms_key_ring_schema.Schema{ - "key_ring_id": { - Type: iam_kms_key_ring_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type KmsKeyRingIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewKmsKeyRingIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - keyRing := d.Get("key_ring_id").(string) - keyRingId, err := parseKmsKeyRingId(keyRing, config) - - if err != nil { - return nil, iam_kms_key_ring_errwrap.Wrapf(iam_kms_key_ring_fmt.Sprintf("Error parsing resource ID for %s: {{err}}", keyRing), err) - } - - return &KmsKeyRingIamUpdater{ - resourceId: keyRingId.keyRingId(), - d: d, - Config: config, - }, nil -} - -func KeyRingIdParseFunc(d *iam_kms_key_ring_schema.ResourceData, config *Config) error { - keyRingId, err := parseKmsKeyRingId(d.Id(), config) - if err != nil { - return err - } - - if err := d.Set("key_ring_id", keyRingId.keyRingId()); err != nil { - return iam_kms_key_ring_fmt.Errorf("Error setting key_ring_id: %s", err) - } - d.SetId(keyRingId.keyRingId()) - return nil -} - -func (u *KmsKeyRingIamUpdater) GetResourceIamPolicy() (*iam_kms_key_ring_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(iamPolicyVersion).Do() - - if err != nil { - return nil, iam_kms_key_ring_errwrap.Wrapf(iam_kms_key_ring_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_kms_key_ring_errwrap.Wrapf(iam_kms_key_ring_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *KmsKeyRingIamUpdater) SetResourceIamPolicy(policy *iam_kms_key_ring_cloudresourcemanager.Policy) error { - kmsPolicy, err := resourceManagerToKmsPolicy(policy) - - if err != nil { - return iam_kms_key_ring_errwrap.Wrapf(iam_kms_key_ring_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.SetIamPolicy(u.resourceId, &iam_kms_key_ring_cloudkms.SetIamPolicyRequest{ - Policy: kmsPolicy, - }).Do() - - if err != nil { - return iam_kms_key_ring_errwrap.Wrapf(iam_kms_key_ring_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *KmsKeyRingIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *KmsKeyRingIamUpdater) GetMutexKey() string { - return iam_kms_key_ring_fmt.Sprintf("iam-kms-key-ring-%s", u.resourceId) -} - -func (u *KmsKeyRingIamUpdater) DescribeResource() string { - return iam_kms_key_ring_fmt.Sprintf("KMS KeyRing %q", u.resourceId) -} - -func resourceManagerToKmsPolicy(p *iam_kms_key_ring_cloudresourcemanager.Policy) (*iam_kms_key_ring_cloudkms.Policy, error) { - out := &iam_kms_key_ring_cloudkms.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_kms_key_ring_errwrap.Wrapf("Cannot convert a v1 policy to a kms policy: {{err}}", err) - } - return out, nil -} - -func kmsToResourceManagerPolicy(p *iam_kms_key_ring_cloudkms.Policy) (*iam_kms_key_ring_cloudresourcemanager.Policy, error) { - out := &iam_kms_key_ring_cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_kms_key_ring_errwrap.Wrapf("Cannot convert a kms policy to a v1 policy: {{err}}", err) - } - return out, nil -} - -var NotebooksInstanceIamSchema = map[string]*iam_notebooks_instance_schema.Schema{ - "project": { - Type: iam_notebooks_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: iam_notebooks_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "instance_name": { - Type: iam_notebooks_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type NotebooksInstanceIamUpdater struct { - project string - location string - instanceName string - d TerraformResourceData - Config *Config -} - -func NotebooksInstanceIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_notebooks_instance_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, iam_notebooks_instance_fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("instance_name"); ok { - values["instance_name"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("instance_name").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &NotebooksInstanceIamUpdater{ - project: values["project"], - location: values["location"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_notebooks_instance_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, iam_notebooks_instance_fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return nil, iam_notebooks_instance_fmt.Errorf("Error setting instance_name: %s", err) - } - - return u, nil -} - -func NotebooksInstanceIdParseFunc(d *iam_notebooks_instance_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &NotebooksInstanceIamUpdater{ - project: values["project"], - location: values["location"], - instanceName: values["instance_name"], - d: d, - Config: config, - } - if err := d.Set("instance_name", u.GetResourceId()); err != nil { - return iam_notebooks_instance_fmt.Errorf("Error setting instance_name: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *NotebooksInstanceIamUpdater) GetResourceIamPolicy() (*iam_notebooks_instance_cloudresourcemanager.Policy, error) { - url, err := u.qualifyInstanceUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_notebooks_instance_errwrap.Wrapf(iam_notebooks_instance_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_notebooks_instance_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_notebooks_instance_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *NotebooksInstanceIamUpdater) SetResourceIamPolicy(policy *iam_notebooks_instance_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyInstanceUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_notebooks_instance_schema.TimeoutCreate)) - if err != nil { - return iam_notebooks_instance_errwrap.Wrapf(iam_notebooks_instance_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *NotebooksInstanceIamUpdater) qualifyInstanceUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_notebooks_instance_fmt.Sprintf("{{NotebooksBasePath}}%s:%s", iam_notebooks_instance_fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.location, u.instanceName), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *NotebooksInstanceIamUpdater) GetResourceId() string { - return iam_notebooks_instance_fmt.Sprintf("projects/%s/locations/%s/instances/%s", u.project, u.location, u.instanceName) -} - -func (u *NotebooksInstanceIamUpdater) GetMutexKey() string { - return iam_notebooks_instance_fmt.Sprintf("iam-notebooks-instance-%s", u.GetResourceId()) -} - -func (u *NotebooksInstanceIamUpdater) DescribeResource() string { - return iam_notebooks_instance_fmt.Sprintf("notebooks instance %q", u.GetResourceId()) -} - -var IamOrganizationSchema = map[string]*iam_organization_schema.Schema{ - "org_id": { - Type: iam_organization_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The numeric ID of the organization in which you want to manage the audit logging config.`, - }, -} - -type OrganizationIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewOrganizationIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &OrganizationIamUpdater{ - resourceId: d.Get("org_id").(string), - d: d, - Config: config, - }, nil -} - -func OrgIdParseFunc(d *iam_organization_schema.ResourceData, _ *Config) error { - if err := d.Set("org_id", d.Id()); err != nil { - return iam_organization_fmt.Errorf("Error setting org_id: %s", err) - } - return nil -} - -func (u *OrganizationIamUpdater) GetResourceIamPolicy() (*iam_organization_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewResourceManagerClient(userAgent).Organizations.GetIamPolicy( - "organizations/"+u.resourceId, - &iam_organization_cloudresourcemanager.GetIamPolicyRequest{ - Options: &iam_organization_cloudresourcemanager.GetPolicyOptions{ - RequestedPolicyVersion: iamPolicyVersion, - }, - }, - ).Do() - if err != nil { - return nil, iam_organization_errwrap.Wrapf(iam_organization_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return p, nil -} - -func (u *OrganizationIamUpdater) SetResourceIamPolicy(policy *iam_organization_cloudresourcemanager.Policy) error { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewResourceManagerClient(userAgent).Organizations.SetIamPolicy("organizations/"+u.resourceId, &iam_organization_cloudresourcemanager.SetIamPolicyRequest{ - Policy: policy, - UpdateMask: "bindings,etag,auditConfigs", - }).Do() - - if err != nil { - return iam_organization_errwrap.Wrapf(iam_organization_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *OrganizationIamUpdater) GetMutexKey() string { - return iam_organization_fmt.Sprintf("iam-organization-%s", u.resourceId) -} - -func (u *OrganizationIamUpdater) DescribeResource() string { - return iam_organization_fmt.Sprintf("organization %q", u.resourceId) -} - -var PrivatecaCaPoolIamSchema = map[string]*iam_privateca_ca_pool_schema.Schema{ - "project": { - Type: iam_privateca_ca_pool_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "location": { - Type: iam_privateca_ca_pool_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "ca_pool": { - Type: iam_privateca_ca_pool_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type PrivatecaCaPoolIamUpdater struct { - project string - location string - caPool string - d TerraformResourceData - Config *Config -} - -func PrivatecaCaPoolIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_privateca_ca_pool_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - location, _ := getLocation(d, config) - if location != "" { - if err := d.Set("location", location); err != nil { - return nil, iam_privateca_ca_pool_fmt.Errorf("Error setting location: %s", err) - } - } - values["location"] = location - if v, ok := d.GetOk("ca_pool"); ok { - values["ca_pool"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Get("ca_pool").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &PrivatecaCaPoolIamUpdater{ - project: values["project"], - location: values["location"], - caPool: values["ca_pool"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_privateca_ca_pool_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("location", u.location); err != nil { - return nil, iam_privateca_ca_pool_fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("ca_pool", u.GetResourceId()); err != nil { - return nil, iam_privateca_ca_pool_fmt.Errorf("Error setting ca_pool: %s", err) - } - - return u, nil -} - -func PrivatecaCaPoolIdParseFunc(d *iam_privateca_ca_pool_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - location, _ := getLocation(d, config) - if location != "" { - values["location"] = location - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &PrivatecaCaPoolIamUpdater{ - project: values["project"], - location: values["location"], - caPool: values["ca_pool"], - d: d, - Config: config, - } - if err := d.Set("ca_pool", u.GetResourceId()); err != nil { - return iam_privateca_ca_pool_fmt.Errorf("Error setting ca_pool: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *PrivatecaCaPoolIamUpdater) GetResourceIamPolicy() (*iam_privateca_ca_pool_cloudresourcemanager.Policy, error) { - url, err := u.qualifyCaPoolUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_privateca_ca_pool_errwrap.Wrapf(iam_privateca_ca_pool_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_privateca_ca_pool_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_privateca_ca_pool_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *PrivatecaCaPoolIamUpdater) SetResourceIamPolicy(policy *iam_privateca_ca_pool_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyCaPoolUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_privateca_ca_pool_schema.TimeoutCreate)) - if err != nil { - return iam_privateca_ca_pool_errwrap.Wrapf(iam_privateca_ca_pool_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *PrivatecaCaPoolIamUpdater) qualifyCaPoolUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_privateca_ca_pool_fmt.Sprintf("{{PrivatecaBasePath}}%s:%s", iam_privateca_ca_pool_fmt.Sprintf("projects/%s/locations/%s/caPools/%s", u.project, u.location, u.caPool), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *PrivatecaCaPoolIamUpdater) GetResourceId() string { - return iam_privateca_ca_pool_fmt.Sprintf("projects/%s/locations/%s/caPools/%s", u.project, u.location, u.caPool) -} - -func (u *PrivatecaCaPoolIamUpdater) GetMutexKey() string { - return iam_privateca_ca_pool_fmt.Sprintf("iam-privateca-capool-%s", u.GetResourceId()) -} - -func (u *PrivatecaCaPoolIamUpdater) DescribeResource() string { - return iam_privateca_ca_pool_fmt.Sprintf("privateca capool %q", u.GetResourceId()) -} - -var IamProjectSchema = map[string]*iam_project_schema.Schema{ - "project": { - Type: iam_project_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareProjectName, - }, -} - -type ProjectIamUpdater struct { - resourceId string - d TerraformResourceData - Config *Config -} - -func NewProjectIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &ProjectIamUpdater{ - resourceId: d.Get("project").(string), - d: d, - Config: config, - }, nil -} - -func ProjectIdParseFunc(d *iam_project_schema.ResourceData, _ *Config) error { - if err := d.Set("project", d.Id()); err != nil { - return iam_project_fmt.Errorf("Error setting project: %s", err) - } - return nil -} - -func (u *ProjectIamUpdater) GetResourceIamPolicy() (*iam_project_cloudresourcemanager.Policy, error) { - projectId := GetResourceNameFromSelfLink(u.resourceId) - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewResourceManagerClient(userAgent).Projects.GetIamPolicy(projectId, - &iam_project_cloudresourcemanager.GetIamPolicyRequest{ - Options: &iam_project_cloudresourcemanager.GetPolicyOptions{ - RequestedPolicyVersion: iamPolicyVersion, - }, - }).Do() - - if err != nil { - return nil, iam_project_errwrap.Wrapf(iam_project_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return p, nil -} - -func (u *ProjectIamUpdater) SetResourceIamPolicy(policy *iam_project_cloudresourcemanager.Policy) error { - projectId := GetResourceNameFromSelfLink(u.resourceId) - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewResourceManagerClient(userAgent).Projects.SetIamPolicy(projectId, - &iam_project_cloudresourcemanager.SetIamPolicyRequest{ - Policy: policy, - UpdateMask: "bindings,etag,auditConfigs", - }).Do() - - if err != nil { - return iam_project_errwrap.Wrapf(iam_project_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectIamUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *ProjectIamUpdater) GetMutexKey() string { - return getProjectIamPolicyMutexKey(u.resourceId) -} - -func (u *ProjectIamUpdater) DescribeResource() string { - return iam_project_fmt.Sprintf("project %q", u.resourceId) -} - -func compareProjectName(_, old, new string, _ *iam_project_schema.ResourceData) bool { - - return GetResourceNameFromSelfLink(old) == GetResourceNameFromSelfLink(new) -} - -func getProjectIamPolicyMutexKey(pid string) string { - return iam_project_fmt.Sprintf("iam-project-%s", pid) -} - -var IamPubsubSubscriptionSchema = map[string]*iam_pubsub_subscription_schema.Schema{ - "subscription": { - Type: iam_pubsub_subscription_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "project": { - Type: iam_pubsub_subscription_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type PubsubSubscriptionIamUpdater struct { - subscription string - d TerraformResourceData - Config *Config -} - -func NewPubsubSubscriptionIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - subscription := getComputedSubscriptionName(project, d.Get("subscription").(string)) - - return &PubsubSubscriptionIamUpdater{ - subscription: subscription, - d: d, - Config: config, - }, nil -} - -func PubsubSubscriptionIdParseFunc(d *iam_pubsub_subscription_schema.ResourceData, _ *Config) error { - if err := d.Set("subscription", d.Id()); err != nil { - return iam_pubsub_subscription_fmt.Errorf("Error setting subscription: %s", err) - } - return nil -} - -func (u *PubsubSubscriptionIamUpdater) GetResourceIamPolicy() (*iam_pubsub_subscription_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewPubsubClient(userAgent).Projects.Subscriptions.GetIamPolicy(u.subscription).Do() - - if err != nil { - return nil, iam_pubsub_subscription_errwrap.Wrapf(iam_pubsub_subscription_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - v1Policy, err := pubsubToResourceManagerPolicy(p) - if err != nil { - return nil, err - } - - return v1Policy, nil -} - -func (u *PubsubSubscriptionIamUpdater) SetResourceIamPolicy(policy *iam_pubsub_subscription_cloudresourcemanager.Policy) error { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - pubsubPolicy, err := resourceManagerToPubsubPolicy(policy) - if err != nil { - return err - } - - _, err = u.Config.NewPubsubClient(userAgent).Projects.Subscriptions.SetIamPolicy(u.subscription, &iam_pubsub_subscription_pubsub.SetIamPolicyRequest{ - Policy: pubsubPolicy, - }).Do() - - if err != nil { - return iam_pubsub_subscription_errwrap.Wrapf(iam_pubsub_subscription_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *PubsubSubscriptionIamUpdater) GetResourceId() string { - return u.subscription -} - -func (u *PubsubSubscriptionIamUpdater) GetMutexKey() string { - return iam_pubsub_subscription_fmt.Sprintf("iam-pubsub-subscription-%s", u.subscription) -} - -func (u *PubsubSubscriptionIamUpdater) DescribeResource() string { - return iam_pubsub_subscription_fmt.Sprintf("pubsub subscription %q", u.subscription) -} - -func resourceManagerToPubsubPolicy(in *iam_pubsub_subscription_cloudresourcemanager.Policy) (*iam_pubsub_subscription_pubsub.Policy, error) { - out := &iam_pubsub_subscription_pubsub.Policy{} - err := Convert(in, out) - if err != nil { - return nil, iam_pubsub_subscription_errwrap.Wrapf("Cannot convert a v1 policy to a pubsub policy: {{err}}", err) - } - return out, nil -} - -func pubsubToResourceManagerPolicy(in *iam_pubsub_subscription_pubsub.Policy) (*iam_pubsub_subscription_cloudresourcemanager.Policy, error) { - out := &iam_pubsub_subscription_cloudresourcemanager.Policy{} - err := Convert(in, out) - if err != nil { - return nil, iam_pubsub_subscription_errwrap.Wrapf("Cannot convert a pubsub policy to a v1 policy: {{err}}", err) - } - return out, nil -} - -var PubsubTopicIamSchema = map[string]*iam_pubsub_topic_schema.Schema{ - "project": { - Type: iam_pubsub_topic_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "topic": { - Type: iam_pubsub_topic_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type PubsubTopicIamUpdater struct { - project string - topic string - d TerraformResourceData - Config *Config -} - -func PubsubTopicIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_pubsub_topic_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("topic"); ok { - values["topic"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/topics/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("topic").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &PubsubTopicIamUpdater{ - project: values["project"], - topic: values["topic"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_pubsub_topic_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("topic", u.GetResourceId()); err != nil { - return nil, iam_pubsub_topic_fmt.Errorf("Error setting topic: %s", err) - } - - return u, nil -} - -func PubsubTopicIdParseFunc(d *iam_pubsub_topic_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/topics/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &PubsubTopicIamUpdater{ - project: values["project"], - topic: values["topic"], - d: d, - Config: config, - } - if err := d.Set("topic", u.GetResourceId()); err != nil { - return iam_pubsub_topic_fmt.Errorf("Error setting topic: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *PubsubTopicIamUpdater) GetResourceIamPolicy() (*iam_pubsub_topic_cloudresourcemanager.Policy, error) { - url, err := u.qualifyTopicUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj, pubsubTopicProjectNotReady) - if err != nil { - return nil, iam_pubsub_topic_errwrap.Wrapf(iam_pubsub_topic_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_pubsub_topic_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_pubsub_topic_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *PubsubTopicIamUpdater) SetResourceIamPolicy(policy *iam_pubsub_topic_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTopicUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_pubsub_topic_schema.TimeoutCreate), pubsubTopicProjectNotReady) - if err != nil { - return iam_pubsub_topic_errwrap.Wrapf(iam_pubsub_topic_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *PubsubTopicIamUpdater) qualifyTopicUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_pubsub_topic_fmt.Sprintf("{{PubsubBasePath}}%s:%s", iam_pubsub_topic_fmt.Sprintf("projects/%s/topics/%s", u.project, u.topic), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *PubsubTopicIamUpdater) GetResourceId() string { - return iam_pubsub_topic_fmt.Sprintf("projects/%s/topics/%s", u.project, u.topic) -} - -func (u *PubsubTopicIamUpdater) GetMutexKey() string { - return iam_pubsub_topic_fmt.Sprintf("iam-pubsub-topic-%s", u.GetResourceId()) -} - -func (u *PubsubTopicIamUpdater) DescribeResource() string { - return iam_pubsub_topic_fmt.Sprintf("pubsub topic %q", u.GetResourceId()) -} - -var SecretManagerSecretIamSchema = map[string]*iam_secret_manager_secret_schema.Schema{ - "project": { - Type: iam_secret_manager_secret_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "secret_id": { - Type: iam_secret_manager_secret_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type SecretManagerSecretIamUpdater struct { - project string - secretId string - d TerraformResourceData - Config *Config -} - -func SecretManagerSecretIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_secret_manager_secret_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("secret_id"); ok { - values["secret_id"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("secret_id").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &SecretManagerSecretIamUpdater{ - project: values["project"], - secretId: values["secret_id"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_secret_manager_secret_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("secret_id", u.GetResourceId()); err != nil { - return nil, iam_secret_manager_secret_fmt.Errorf("Error setting secret_id: %s", err) - } - - return u, nil -} - -func SecretManagerSecretIdParseFunc(d *iam_secret_manager_secret_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/secrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &SecretManagerSecretIamUpdater{ - project: values["project"], - secretId: values["secret_id"], - d: d, - Config: config, - } - if err := d.Set("secret_id", u.GetResourceId()); err != nil { - return iam_secret_manager_secret_fmt.Errorf("Error setting secret_id: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *SecretManagerSecretIamUpdater) GetResourceIamPolicy() (*iam_secret_manager_secret_cloudresourcemanager.Policy, error) { - url, err := u.qualifySecretUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_secret_manager_secret_errwrap.Wrapf(iam_secret_manager_secret_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_secret_manager_secret_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_secret_manager_secret_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *SecretManagerSecretIamUpdater) SetResourceIamPolicy(policy *iam_secret_manager_secret_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifySecretUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_secret_manager_secret_schema.TimeoutCreate)) - if err != nil { - return iam_secret_manager_secret_errwrap.Wrapf(iam_secret_manager_secret_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SecretManagerSecretIamUpdater) qualifySecretUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_secret_manager_secret_fmt.Sprintf("{{SecretManagerBasePath}}%s:%s", iam_secret_manager_secret_fmt.Sprintf("projects/%s/secrets/%s", u.project, u.secretId), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *SecretManagerSecretIamUpdater) GetResourceId() string { - return iam_secret_manager_secret_fmt.Sprintf("projects/%s/secrets/%s", u.project, u.secretId) -} - -func (u *SecretManagerSecretIamUpdater) GetMutexKey() string { - return iam_secret_manager_secret_fmt.Sprintf("iam-secretmanager-secret-%s", u.GetResourceId()) -} - -func (u *SecretManagerSecretIamUpdater) DescribeResource() string { - return iam_secret_manager_secret_fmt.Sprintf("secretmanager secret %q", u.GetResourceId()) -} - -var IamServiceAccountSchema = map[string]*iam_service_account_schema.Schema{ - "service_account_id": { - Type: iam_service_account_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(ServiceAccountLinkRegex), - }, -} - -type ServiceAccountIamUpdater struct { - serviceAccountId string - d TerraformResourceData - Config *Config -} - -func NewServiceAccountIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - return &ServiceAccountIamUpdater{ - serviceAccountId: d.Get("service_account_id").(string), - d: d, - Config: config, - }, nil -} - -func ServiceAccountIdParseFunc(d *iam_service_account_schema.ResourceData, _ *Config) error { - if err := d.Set("service_account_id", d.Id()); err != nil { - return iam_service_account_fmt.Errorf("Error setting service_account_id: %s", err) - } - return nil -} - -func (u *ServiceAccountIamUpdater) GetResourceIamPolicy() (*iam_service_account_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.GetIamPolicy(u.serviceAccountId).OptionsRequestedPolicyVersion(iamPolicyVersion).Do() - - if err != nil { - return nil, iam_service_account_errwrap.Wrapf(iam_service_account_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := iamToResourceManagerPolicy(p) - if err != nil { - return nil, err - } - - return cloudResourcePolicy, nil -} - -func (u *ServiceAccountIamUpdater) SetResourceIamPolicy(policy *iam_service_account_cloudresourcemanager.Policy) error { - iamPolicy, err := resourceManagerToIamPolicy(policy) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.SetIamPolicy(u.GetResourceId(), &iam_service_account_iam.SetIamPolicyRequest{ - Policy: iamPolicy, - }).Do() - - if err != nil { - return iam_service_account_errwrap.Wrapf(iam_service_account_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ServiceAccountIamUpdater) GetResourceId() string { - return u.serviceAccountId -} - -func (u *ServiceAccountIamUpdater) GetMutexKey() string { - return iam_service_account_fmt.Sprintf("iam-service-account-%s", u.serviceAccountId) -} - -func (u *ServiceAccountIamUpdater) DescribeResource() string { - return iam_service_account_fmt.Sprintf("service account '%s'", u.serviceAccountId) -} - -func resourceManagerToIamPolicy(p *iam_service_account_cloudresourcemanager.Policy) (*iam_service_account_iam.Policy, error) { - out := &iam_service_account_iam.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_service_account_errwrap.Wrapf("Cannot convert a v1 policy to a iam policy: {{err}}", err) - } - return out, nil -} - -func iamToResourceManagerPolicy(p *iam_service_account_iam.Policy) (*iam_service_account_cloudresourcemanager.Policy, error) { - out := &iam_service_account_cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_service_account_errwrap.Wrapf("Cannot convert a iam policy to a v1 policy: {{err}}", err) - } - return out, nil -} - -var SourceRepoRepositoryIamSchema = map[string]*iam_sourcerepo_repository_schema.Schema{ - "project": { - Type: iam_sourcerepo_repository_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - }, - "repository": { - Type: iam_sourcerepo_repository_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: SourceRepoRepositoryDiffSuppress, - }, -} - -func SourceRepoRepositoryDiffSuppress(_, old, new string, _ *iam_sourcerepo_repository_schema.ResourceData) bool { - oldParts := iam_sourcerepo_repository_regexp.MustCompile("projects/[^/]+/repos/").Split(old, -1) - if len(oldParts) == 2 { - return oldParts[1] == new - } - return new == old -} - -type SourceRepoRepositoryIamUpdater struct { - project string - repository string - d TerraformResourceData - Config *Config -} - -func SourceRepoRepositoryIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - if err := d.Set("project", project); err != nil { - return nil, iam_sourcerepo_repository_fmt.Errorf("Error setting project: %s", err) - } - } - values["project"] = project - if v, ok := d.GetOk("repository"); ok { - values["repository"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/repos/(?P.+)", "(?P.+)"}, d, config, d.Get("repository").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &SourceRepoRepositoryIamUpdater{ - project: values["project"], - repository: values["repository"], - d: d, - Config: config, - } - - if err := d.Set("project", u.project); err != nil { - return nil, iam_sourcerepo_repository_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("repository", u.GetResourceId()); err != nil { - return nil, iam_sourcerepo_repository_fmt.Errorf("Error setting repository: %s", err) - } - - return u, nil -} - -func SourceRepoRepositoryIdParseFunc(d *iam_sourcerepo_repository_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - project, _ := getProject(d, config) - if project != "" { - values["project"] = project - } - - m, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/repos/(?P.+)", "(?P.+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &SourceRepoRepositoryIamUpdater{ - project: values["project"], - repository: values["repository"], - d: d, - Config: config, - } - if err := d.Set("repository", u.GetResourceId()); err != nil { - return iam_sourcerepo_repository_fmt.Errorf("Error setting repository: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *SourceRepoRepositoryIamUpdater) GetResourceIamPolicy() (*iam_sourcerepo_repository_cloudresourcemanager.Policy, error) { - url, err := u.qualifyRepositoryUrl("getIamPolicy") - if err != nil { - return nil, err - } - - project, err := getProject(u.d, u.Config) - if err != nil { - return nil, err - } - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", project, url, userAgent, obj) - if err != nil { - return nil, iam_sourcerepo_repository_errwrap.Wrapf(iam_sourcerepo_repository_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_sourcerepo_repository_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_sourcerepo_repository_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *SourceRepoRepositoryIamUpdater) SetResourceIamPolicy(policy *iam_sourcerepo_repository_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyRepositoryUrl("setIamPolicy") - if err != nil { - return err - } - project, err := getProject(u.d, u.Config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", project, url, userAgent, obj, u.d.Timeout(iam_sourcerepo_repository_schema.TimeoutCreate)) - if err != nil { - return iam_sourcerepo_repository_errwrap.Wrapf(iam_sourcerepo_repository_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SourceRepoRepositoryIamUpdater) qualifyRepositoryUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_sourcerepo_repository_fmt.Sprintf("{{SourceRepoBasePath}}%s:%s", iam_sourcerepo_repository_fmt.Sprintf("projects/%s/repos/%s", u.project, u.repository), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *SourceRepoRepositoryIamUpdater) GetResourceId() string { - return iam_sourcerepo_repository_fmt.Sprintf("projects/%s/repos/%s", u.project, u.repository) -} - -func (u *SourceRepoRepositoryIamUpdater) GetMutexKey() string { - return iam_sourcerepo_repository_fmt.Sprintf("iam-sourcerepo-repository-%s", u.GetResourceId()) -} - -func (u *SourceRepoRepositoryIamUpdater) DescribeResource() string { - return iam_sourcerepo_repository_fmt.Sprintf("sourcerepo repository %q", u.GetResourceId()) -} - -var IamSpannerDatabaseSchema = map[string]*iam_spanner_database_schema.Schema{ - "instance": { - Type: iam_spanner_database_schema.TypeString, - Required: true, - ForceNew: true, - }, - "database": { - Type: iam_spanner_database_schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": { - Type: iam_spanner_database_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type SpannerDatabaseIamUpdater struct { - project string - instance string - database string - d TerraformResourceData - Config *Config -} - -func NewSpannerDatabaseIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return &SpannerDatabaseIamUpdater{ - project: project, - instance: d.Get("instance").(string), - database: d.Get("database").(string), - d: d, - Config: config, - }, nil -} - -func SpannerDatabaseIdParseFunc(d *iam_spanner_database_schema.ResourceData, config *Config) error { - return parseImportId([]string{"(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) -} - -func (u *SpannerDatabaseIamUpdater) GetResourceIamPolicy() (*iam_spanner_database_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewSpannerClient(userAgent).Projects.Instances.Databases.GetIamPolicy(spannerDatabaseId{ - Project: u.project, - Database: u.database, - Instance: u.instance, - }.databaseUri(), &iam_spanner_database_spanner.GetIamPolicyRequest{}).Do() - - if err != nil { - return nil, iam_spanner_database_errwrap.Wrapf(iam_spanner_database_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := spannerToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_spanner_database_errwrap.Wrapf(iam_spanner_database_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *SpannerDatabaseIamUpdater) SetResourceIamPolicy(policy *iam_spanner_database_cloudresourcemanager.Policy) error { - spannerPolicy, err := resourceManagerToSpannerPolicy(policy) - - if err != nil { - return iam_spanner_database_errwrap.Wrapf(iam_spanner_database_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewSpannerClient(userAgent).Projects.Instances.Databases.SetIamPolicy(spannerDatabaseId{ - Project: u.project, - Database: u.database, - Instance: u.instance, - }.databaseUri(), &iam_spanner_database_spanner.SetIamPolicyRequest{ - Policy: spannerPolicy, - }).Do() - - if err != nil { - return iam_spanner_database_errwrap.Wrapf(iam_spanner_database_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SpannerDatabaseIamUpdater) GetResourceId() string { - return spannerDatabaseId{ - Project: u.project, - Instance: u.instance, - Database: u.database, - }.terraformId() -} - -func (u *SpannerDatabaseIamUpdater) GetMutexKey() string { - return iam_spanner_database_fmt.Sprintf("iam-spanner-database-%s-%s-%s", u.project, u.instance, u.database) -} - -func (u *SpannerDatabaseIamUpdater) DescribeResource() string { - return iam_spanner_database_fmt.Sprintf("Spanner Database: %s/%s/%s", u.project, u.instance, u.database) -} - -func resourceManagerToSpannerPolicy(p *iam_spanner_database_cloudresourcemanager.Policy) (*iam_spanner_database_spanner.Policy, error) { - out := &iam_spanner_database_spanner.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_spanner_database_errwrap.Wrapf("Cannot convert a resourcemanager policy to a spanner policy: {{err}}", err) - } - return out, nil -} - -func spannerToResourceManagerPolicy(p *iam_spanner_database_spanner.Policy) (*iam_spanner_database_cloudresourcemanager.Policy, error) { - out := &iam_spanner_database_cloudresourcemanager.Policy{} - err := Convert(p, out) - if err != nil { - return nil, iam_spanner_database_errwrap.Wrapf("Cannot convert a spanner policy to a resourcemanager policy: {{err}}", err) - } - return out, nil -} - -type spannerDatabaseId struct { - Project string - Instance string - Database string -} - -func (s spannerDatabaseId) terraformId() string { - return iam_spanner_database_fmt.Sprintf("%s/%s/%s", s.Project, s.Instance, s.Database) -} - -func (s spannerDatabaseId) parentProjectUri() string { - return iam_spanner_database_fmt.Sprintf("projects/%s", s.Project) -} - -func (s spannerDatabaseId) parentInstanceUri() string { - return iam_spanner_database_fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) -} - -func (s spannerDatabaseId) databaseUri() string { - return iam_spanner_database_fmt.Sprintf("%s/databases/%s", s.parentInstanceUri(), s.Database) -} - -var IamSpannerInstanceSchema = map[string]*iam_spanner_instance_schema.Schema{ - "instance": { - Type: iam_spanner_instance_schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": { - Type: iam_spanner_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type SpannerInstanceIamUpdater struct { - project string - instance string - d TerraformResourceData - Config *Config -} - -func NewSpannerInstanceIamUpdater(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return &SpannerInstanceIamUpdater{ - project: project, - instance: d.Get("instance").(string), - d: d, - Config: config, - }, nil -} - -func SpannerInstanceIdParseFunc(d *iam_spanner_instance_schema.ResourceData, config *Config) error { - id, err := extractSpannerInstanceId(d.Id()) - if err != nil { - return err - } - if err := d.Set("instance", id.Instance); err != nil { - return iam_spanner_instance_fmt.Errorf("Error setting instance: %s", err) - } - if err := d.Set("project", id.Project); err != nil { - return iam_spanner_instance_fmt.Errorf("Error setting project: %s", err) - } - - d.SetId(id.terraformId()) - return nil -} - -func (u *SpannerInstanceIamUpdater) GetResourceIamPolicy() (*iam_spanner_instance_cloudresourcemanager.Policy, error) { - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - p, err := u.Config.NewSpannerClient(userAgent).Projects.Instances.GetIamPolicy(spannerInstanceId{ - Project: u.project, - Instance: u.instance, - }.instanceUri(), &iam_spanner_instance_spannerspanner.GetIamPolicyRequest{}).Do() - - if err != nil { - return nil, iam_spanner_instance_errwrap.Wrapf(iam_spanner_instance_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - cloudResourcePolicy, err := spannerToResourceManagerPolicy(p) - - if err != nil { - return nil, iam_spanner_instance_errwrap.Wrapf(iam_spanner_instance_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return cloudResourcePolicy, nil -} - -func (u *SpannerInstanceIamUpdater) SetResourceIamPolicy(policy *iam_spanner_instance_cloudresourcemanager.Policy) error { - spannerPolicy, err := resourceManagerToSpannerPolicy(policy) - - if err != nil { - return iam_spanner_instance_errwrap.Wrapf(iam_spanner_instance_fmt.Sprintf("Invalid IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = u.Config.NewSpannerClient(userAgent).Projects.Instances.SetIamPolicy(spannerInstanceId{ - Project: u.project, - Instance: u.instance, - }.instanceUri(), &iam_spanner_instance_spannerspanner.SetIamPolicyRequest{ - Policy: spannerPolicy, - }).Do() - - if err != nil { - return iam_spanner_instance_errwrap.Wrapf(iam_spanner_instance_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *SpannerInstanceIamUpdater) GetResourceId() string { - return spannerInstanceId{ - Project: u.project, - Instance: u.instance, - }.terraformId() -} - -func (u *SpannerInstanceIamUpdater) GetMutexKey() string { - return iam_spanner_instance_fmt.Sprintf("iam-spanner-instance-%s-%s", u.project, u.instance) -} - -func (u *SpannerInstanceIamUpdater) DescribeResource() string { - return iam_spanner_instance_fmt.Sprintf("Spanner Instance: %s/%s", u.project, u.instance) -} - -type spannerInstanceId struct { - Project string - Instance string -} - -func (s spannerInstanceId) terraformId() string { - return iam_spanner_instance_fmt.Sprintf("%s/%s", s.Project, s.Instance) -} - -func (s spannerInstanceId) parentProjectUri() string { - return iam_spanner_instance_fmt.Sprintf("projects/%s", s.Project) -} - -func (s spannerInstanceId) instanceUri() string { - return iam_spanner_instance_fmt.Sprintf("%s/instances/%s", s.parentProjectUri(), s.Instance) -} - -func (s spannerInstanceId) instanceConfigUri(c string) string { - return iam_spanner_instance_fmt.Sprintf("%s/instanceConfigs/%s", s.parentProjectUri(), c) -} - -func extractSpannerInstanceId(id string) (*spannerInstanceId, error) { - if !iam_spanner_instance_regexp.MustCompile("^" + ProjectRegex + "/[a-z0-9-]+$").Match([]byte(id)) { - return nil, iam_spanner_instance_fmt.Errorf("Invalid spanner id format, expecting {projectId}/{instanceId}") - } - parts := iam_spanner_instance_strings.Split(id, "/") - return &spannerInstanceId{ - Project: parts[0], - Instance: parts[1], - }, nil -} - -var StorageBucketIamSchema = map[string]*iam_storage_bucket_schema.Schema{ - "bucket": { - Type: iam_storage_bucket_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: StorageBucketDiffSuppress, - }, -} - -func StorageBucketDiffSuppress(_, old, new string, _ *iam_storage_bucket_schema.ResourceData) bool { - return compareResourceNames("", old, new, nil) -} - -type StorageBucketIamUpdater struct { - bucket string - d TerraformResourceData - Config *Config -} - -func StorageBucketIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("bucket"); ok { - values["bucket"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"b/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("bucket").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &StorageBucketIamUpdater{ - bucket: values["bucket"], - d: d, - Config: config, - } - - if err := d.Set("bucket", u.GetResourceId()); err != nil { - return nil, iam_storage_bucket_fmt.Errorf("Error setting bucket: %s", err) - } - - return u, nil -} - -func StorageBucketIdParseFunc(d *iam_storage_bucket_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"b/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &StorageBucketIamUpdater{ - bucket: values["bucket"], - d: d, - Config: config, - } - if err := d.Set("bucket", u.GetResourceId()); err != nil { - return iam_storage_bucket_fmt.Errorf("Error setting bucket: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *StorageBucketIamUpdater) GetResourceIamPolicy() (*iam_storage_bucket_cloudresourcemanager.Policy, error) { - url, err := u.qualifyBucketUrl("iam") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - url, err = addQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": iam_storage_bucket_fmt.Sprintf("%d", iamPolicyVersion)}) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "GET", "", url, userAgent, obj) - if err != nil { - return nil, iam_storage_bucket_errwrap.Wrapf(iam_storage_bucket_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_storage_bucket_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_storage_bucket_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *StorageBucketIamUpdater) SetResourceIamPolicy(policy *iam_storage_bucket_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := json - - url, err := u.qualifyBucketUrl("iam") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "PUT", "", url, userAgent, obj, u.d.Timeout(iam_storage_bucket_schema.TimeoutCreate)) - if err != nil { - return iam_storage_bucket_errwrap.Wrapf(iam_storage_bucket_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *StorageBucketIamUpdater) qualifyBucketUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_storage_bucket_fmt.Sprintf("{{StorageBasePath}}%s/%s", iam_storage_bucket_fmt.Sprintf("b/%s", u.bucket), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *StorageBucketIamUpdater) GetResourceId() string { - return iam_storage_bucket_fmt.Sprintf("b/%s", u.bucket) -} - -func (u *StorageBucketIamUpdater) GetMutexKey() string { - return iam_storage_bucket_fmt.Sprintf("iam-storage-bucket-%s", u.GetResourceId()) -} - -func (u *StorageBucketIamUpdater) DescribeResource() string { - return iam_storage_bucket_fmt.Sprintf("storage bucket %q", u.GetResourceId()) -} - -var TagsTagKeyIamSchema = map[string]*iam_tags_tag_key_schema.Schema{ - "tag_key": { - Type: iam_tags_tag_key_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type TagsTagKeyIamUpdater struct { - tagKey string - d TerraformResourceData - Config *Config -} - -func TagsTagKeyIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("tag_key"); ok { - values["tag_key"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"tagKeys/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_key").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagKeyIamUpdater{ - tagKey: values["tag_key"], - d: d, - Config: config, - } - - if err := d.Set("tag_key", u.GetResourceId()); err != nil { - return nil, iam_tags_tag_key_fmt.Errorf("Error setting tag_key: %s", err) - } - - return u, nil -} - -func TagsTagKeyIdParseFunc(d *iam_tags_tag_key_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"tagKeys/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagKeyIamUpdater{ - tagKey: values["tag_key"], - d: d, - Config: config, - } - if err := d.Set("tag_key", u.GetResourceId()); err != nil { - return iam_tags_tag_key_fmt.Errorf("Error setting tag_key: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *TagsTagKeyIamUpdater) GetResourceIamPolicy() (*iam_tags_tag_key_cloudresourcemanager.Policy, error) { - url, err := u.qualifyTagKeyUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, iam_tags_tag_key_errwrap.Wrapf(iam_tags_tag_key_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_tags_tag_key_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_tags_tag_key_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *TagsTagKeyIamUpdater) SetResourceIamPolicy(policy *iam_tags_tag_key_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTagKeyUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(iam_tags_tag_key_schema.TimeoutCreate)) - if err != nil { - return iam_tags_tag_key_errwrap.Wrapf(iam_tags_tag_key_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *TagsTagKeyIamUpdater) qualifyTagKeyUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_tags_tag_key_fmt.Sprintf("{{TagsBasePath}}%s:%s", iam_tags_tag_key_fmt.Sprintf("tagKeys/%s", u.tagKey), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *TagsTagKeyIamUpdater) GetResourceId() string { - return iam_tags_tag_key_fmt.Sprintf("tagKeys/%s", u.tagKey) -} - -func (u *TagsTagKeyIamUpdater) GetMutexKey() string { - return iam_tags_tag_key_fmt.Sprintf("iam-tags-tagkey-%s", u.GetResourceId()) -} - -func (u *TagsTagKeyIamUpdater) DescribeResource() string { - return iam_tags_tag_key_fmt.Sprintf("tags tagkey %q", u.GetResourceId()) -} - -var TagsTagValueIamSchema = map[string]*iam_tags_tag_value_schema.Schema{ - "tag_value": { - Type: iam_tags_tag_value_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, -} - -type TagsTagValueIamUpdater struct { - tagValue string - d TerraformResourceData - Config *Config -} - -func TagsTagValueIamUpdaterProducer(d TerraformResourceData, config *Config) (ResourceIamUpdater, error) { - values := make(map[string]string) - - if v, ok := d.GetOk("tag_value"); ok { - values["tag_value"] = v.(string) - } - - m, err := getImportIdQualifiers([]string{"tagValues/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Get("tag_value").(string)) - if err != nil { - return nil, err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagValueIamUpdater{ - tagValue: values["tag_value"], - d: d, - Config: config, - } - - if err := d.Set("tag_value", u.GetResourceId()); err != nil { - return nil, iam_tags_tag_value_fmt.Errorf("Error setting tag_value: %s", err) - } - - return u, nil -} - -func TagsTagValueIdParseFunc(d *iam_tags_tag_value_schema.ResourceData, config *Config) error { - values := make(map[string]string) - - m, err := getImportIdQualifiers([]string{"tagValues/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return err - } - - for k, v := range m { - values[k] = v - } - - u := &TagsTagValueIamUpdater{ - tagValue: values["tag_value"], - d: d, - Config: config, - } - if err := d.Set("tag_value", u.GetResourceId()); err != nil { - return iam_tags_tag_value_fmt.Errorf("Error setting tag_value: %s", err) - } - d.SetId(u.GetResourceId()) - return nil -} - -func (u *TagsTagValueIamUpdater) GetResourceIamPolicy() (*iam_tags_tag_value_cloudresourcemanager.Policy, error) { - url, err := u.qualifyTagValueUrl("getIamPolicy") - if err != nil { - return nil, err - } - - var obj map[string]interface{} - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return nil, err - } - - policy, err := sendRequest(u.Config, "POST", "", url, userAgent, obj) - if err != nil { - return nil, iam_tags_tag_value_errwrap.Wrapf(iam_tags_tag_value_fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - out := &iam_tags_tag_value_cloudresourcemanager.Policy{} - err = Convert(policy, out) - if err != nil { - return nil, iam_tags_tag_value_errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) - } - - return out, nil -} - -func (u *TagsTagValueIamUpdater) SetResourceIamPolicy(policy *iam_tags_tag_value_cloudresourcemanager.Policy) error { - json, err := ConvertToMap(policy) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["policy"] = json - - url, err := u.qualifyTagValueUrl("setIamPolicy") - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(u.d, u.Config.userAgent) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(u.Config, "POST", "", url, userAgent, obj, u.d.Timeout(iam_tags_tag_value_schema.TimeoutCreate)) - if err != nil { - return iam_tags_tag_value_errwrap.Wrapf(iam_tags_tag_value_fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *TagsTagValueIamUpdater) qualifyTagValueUrl(methodIdentifier string) (string, error) { - urlTemplate := iam_tags_tag_value_fmt.Sprintf("{{TagsBasePath}}%s:%s", iam_tags_tag_value_fmt.Sprintf("tagValues/%s", u.tagValue), methodIdentifier) - url, err := replaceVars(u.d, u.Config, urlTemplate) - if err != nil { - return "", err - } - return url, nil -} - -func (u *TagsTagValueIamUpdater) GetResourceId() string { - return iam_tags_tag_value_fmt.Sprintf("tagValues/%s", u.tagValue) -} - -func (u *TagsTagValueIamUpdater) GetMutexKey() string { - return iam_tags_tag_value_fmt.Sprintf("iam-tags-tagvalue-%s", u.GetResourceId()) -} - -func (u *TagsTagValueIamUpdater) DescribeResource() string { - return iam_tags_tag_value_fmt.Sprintf("tags tagvalue %q", u.GetResourceId()) -} - -const ( - resolveImageFamilyRegex = "[-_a-zA-Z0-9]*" - resolveImageImageRegex = "[-_a-zA-Z0-9]*" -) - -var ( - resolveImageProjectImage = image_regexp.MustCompile(image_fmt.Sprintf("projects/(%s)/global/images/(%s)$", ProjectRegex, resolveImageImageRegex)) - resolveImageProjectFamily = image_regexp.MustCompile(image_fmt.Sprintf("projects/(%s)/global/images/family/(%s)$", ProjectRegex, resolveImageFamilyRegex)) - resolveImageGlobalImage = image_regexp.MustCompile(image_fmt.Sprintf("^global/images/(%s)$", resolveImageImageRegex)) - resolveImageGlobalFamily = image_regexp.MustCompile(image_fmt.Sprintf("^global/images/family/(%s)$", resolveImageFamilyRegex)) - resolveImageFamilyFamily = image_regexp.MustCompile(image_fmt.Sprintf("^family/(%s)$", resolveImageFamilyRegex)) - resolveImageProjectImageShorthand = image_regexp.MustCompile(image_fmt.Sprintf("^(%s)/(%s)$", ProjectRegex, resolveImageImageRegex)) - resolveImageProjectFamilyShorthand = image_regexp.MustCompile(image_fmt.Sprintf("^(%s)/(%s)$", ProjectRegex, resolveImageFamilyRegex)) - resolveImageFamily = image_regexp.MustCompile(image_fmt.Sprintf("^(%s)$", resolveImageFamilyRegex)) - resolveImageImage = image_regexp.MustCompile(image_fmt.Sprintf("^(%s)$", resolveImageImageRegex)) - resolveImageLink = image_regexp.MustCompile(image_fmt.Sprintf("^https://www.googleapis.com/compute/[a-z0-9]+/projects/(%s)/global/images/(%s)", ProjectRegex, resolveImageImageRegex)) - - windowsSqlImage = image_regexp.MustCompile("^sql-(?:server-)?([0-9]{4})-([a-z]+)-windows-(?:server-)?([0-9]{4})(?:-r([0-9]+))?-dc-v[0-9]+$") - canonicalUbuntuLtsImage = image_regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)-") - cosLtsImage = image_regexp.MustCompile("^cos-([0-9]+)-") -) - -var imageMap = map[string]string{ - "centos": "centos-cloud", - "coreos": "coreos-cloud", - "debian": "debian-cloud", - "opensuse": "opensuse-cloud", - "rhel": "rhel-cloud", - "sles": "suse-cloud", - "ubuntu": "ubuntu-os-cloud", - "windows": "windows-cloud", - "windows-sql": "windows-sql-cloud", -} - -func resolveImageImageExists(c *Config, project, name, userAgent string) (bool, error) { - if _, err := c.NewComputeClient(userAgent).Images.Get(project, name).Do(); err == nil { - return true, nil - } else if gerr, ok := err.(*image_googleapi.Error); ok && gerr.Code == 404 { - return false, nil - } else { - return false, image_fmt.Errorf("Error checking if image %s exists: %s", name, err) - } -} - -func resolveImageFamilyExists(c *Config, project, name, userAgent string) (bool, error) { - if _, err := c.NewComputeClient(userAgent).Images.GetFromFamily(project, name).Do(); err == nil { - return true, nil - } else if gerr, ok := err.(*image_googleapi.Error); ok && gerr.Code == 404 { - return false, nil - } else { - return false, image_fmt.Errorf("Error checking if family %s exists: %s", name, err) - } -} - -func sanityTestRegexMatches(expected int, got []string, regexType, name string) error { - if len(got)-1 != expected { - return image_fmt.Errorf("Expected %d %s regex matches, got %d for %s", expected, regexType, len(got)-1, name) - } - return nil -} - -func resolveImage(c *Config, project, name, userAgent string) (string, error) { - var builtInProject string - for k, v := range imageMap { - if image_strings.Contains(name, k) { - builtInProject = v - break - } - } - switch { - case resolveImageLink.MatchString(name): - return name, nil - case resolveImageProjectImage.MatchString(name): - res := resolveImageProjectImage.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project image", name); err != nil { - return "", err - } - return image_fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil - case resolveImageProjectFamily.MatchString(name): - res := resolveImageProjectFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project family", name); err != nil { - return "", err - } - return image_fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil - case resolveImageGlobalImage.MatchString(name): - res := resolveImageGlobalImage.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "global image", name); err != nil { - return "", err - } - return image_fmt.Sprintf("global/images/%s", res[1]), nil - case resolveImageGlobalFamily.MatchString(name): - res := resolveImageGlobalFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "global family", name); err != nil { - return "", err - } - return image_fmt.Sprintf("global/images/family/%s", res[1]), nil - case resolveImageFamilyFamily.MatchString(name): - res := resolveImageFamilyFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "family family", name); err != nil { - return "", err - } - if ok, err := resolveImageFamilyExists(c, project, res[1], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("global/images/family/%s", res[1]), nil - } - if builtInProject != "" { - if ok, err := resolveImageFamilyExists(c, builtInProject, res[1], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil - } - } - case resolveImageProjectImageShorthand.MatchString(name): - res := resolveImageProjectImageShorthand.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project image shorthand", name); err != nil { - return "", err - } - if ok, err := resolveImageImageExists(c, res[1], res[2], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil - } - fallthrough - case resolveImageProjectFamilyShorthand.MatchString(name): - res := resolveImageProjectFamilyShorthand.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project family shorthand", name); err != nil { - return "", err - } - if ok, err := resolveImageFamilyExists(c, res[1], res[2], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil - } - case resolveImageImage.MatchString(name): - res := resolveImageImage.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "image", name); err != nil { - return "", err - } - if ok, err := resolveImageImageExists(c, project, res[1], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("global/images/%s", res[1]), nil - } - if builtInProject != "" { - - if ok, err := resolveImageImageExists(c, builtInProject, res[1], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("projects/%s/global/images/%s", builtInProject, res[1]), nil - } - } - fallthrough - case resolveImageFamily.MatchString(name): - res := resolveImageFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "family", name); err != nil { - return "", err - } - if ok, err := resolveImageFamilyExists(c, c.Project, res[1], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("global/images/family/%s", res[1]), nil - } - if builtInProject != "" { - - if ok, err := resolveImageFamilyExists(c, builtInProject, res[1], userAgent); err != nil { - return "", err - } else if ok { - return image_fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil - } - } - } - return "", image_fmt.Errorf("Could not find image or family %s", name) -} - -func resolveImageRefToRelativeURI(providerProject, name string) (string, error) { - switch { - case resolveImageLink.MatchString(name): - namePath, err := getRelativePath(name) - if err != nil { - return "", err - } - - return namePath, nil - case resolveImageProjectImage.MatchString(name): - return name, nil - case resolveImageProjectFamily.MatchString(name): - res := resolveImageProjectFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(2, res, "project family", name); err != nil { - return "", err - } - return image_fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil - case resolveImageGlobalImage.MatchString(name): - res := resolveImageGlobalImage.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "global image", name); err != nil { - return "", err - } - return image_fmt.Sprintf("projects/%s/global/images/%s", providerProject, res[1]), nil - case resolveImageGlobalFamily.MatchString(name): - res := resolveImageGlobalFamily.FindStringSubmatch(name) - if err := sanityTestRegexMatches(1, res, "global family", name); err != nil { - return "", err - } - return image_fmt.Sprintf("projects/%s/global/images/family/%s", providerProject, res[1]), nil - } - return "", image_fmt.Errorf("Could not expand image or family %q into a relative URI", name) - -} - -func parseImportId(idRegexes []string, d TerraformResourceData, config *Config) error { - for _, idFormat := range idRegexes { - re, err := import_regexp.Compile(idFormat) - - if err != nil { - import_log.Printf("[DEBUG] Could not compile %s.", idFormat) - return import_fmt.Errorf("Import is not supported. Invalid regex formats.") - } - - if fieldValues := re.FindStringSubmatch(d.Id()); fieldValues != nil { - import_log.Printf("[DEBUG] matching ID %s to regex %s.", d.Id(), idFormat) - - for i := 1; i < len(fieldValues); i++ { - fieldName := re.SubexpNames()[i] - fieldValue := fieldValues[i] - import_log.Printf("[DEBUG] importing %s = %s", fieldName, fieldValue) - - val, _ := d.GetOk(fieldName) - if _, ok := val.(string); val == nil || ok { - if err = d.Set(fieldName, fieldValue); err != nil { - return err - } - } else if _, ok := val.(int); ok { - if intVal, atoiErr := import_strconv.Atoi(fieldValue); atoiErr == nil { - - if err = d.Set(fieldName, intVal); err != nil { - return err - } - } else { - return import_fmt.Errorf("%s appears to be an integer, but %v cannot be parsed as an int", fieldName, fieldValue) - } - } else { - return import_fmt.Errorf( - "cannot handle %s, which currently has value %v, and should be set to %#v, during import", fieldName, val, fieldValue) - } - } - - err := setDefaultValues(idRegexes[0], d, config) - if err != nil { - return err - } - - return nil - } - } - return import_fmt.Errorf("Import id %q doesn't match any of the accepted formats: %v", d.Id(), idRegexes) -} - -func setDefaultValues(idRegex string, d TerraformResourceData, config *Config) error { - if _, ok := d.GetOk("project"); !ok && import_strings.Contains(idRegex, "?P") { - project, err := getProject(d, config) - if err != nil { - return err - } - if err := d.Set("project", project); err != nil { - return import_fmt.Errorf("Error setting project: %s", err) - } - } - if _, ok := d.GetOk("region"); !ok && import_strings.Contains(idRegex, "?P") { - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return import_fmt.Errorf("Error setting region: %s", err) - } - } - if _, ok := d.GetOk("zone"); !ok && import_strings.Contains(idRegex, "?P") { - zone, err := getZone(d, config) - if err != nil { - return err - } - if err := d.Set("zone", zone); err != nil { - return import_fmt.Errorf("Error setting zone: %s", err) - } - } - return nil -} - -func getImportIdQualifiers(idRegexes []string, d TerraformResourceData, config *Config, id string) (map[string]string, error) { - for _, idFormat := range idRegexes { - re, err := import_regexp.Compile(idFormat) - - if err != nil { - import_log.Printf("[DEBUG] Could not compile %s.", idFormat) - return nil, import_fmt.Errorf("Import is not supported. Invalid regex formats.") - } - - if fieldValues := re.FindStringSubmatch(id); fieldValues != nil { - result := make(map[string]string) - import_log.Printf("[DEBUG] matching ID %s to regex %s.", id, idFormat) - - for i := 1; i < len(fieldValues); i++ { - fieldName := re.SubexpNames()[i] - fieldValue := fieldValues[i] - result[fieldName] = fieldValue - } - - defaults, err := getDefaultValues(idRegexes[0], d, config) - if err != nil { - return nil, err - } - - for k, v := range defaults { - if _, ok := result[k]; !ok { - if v == "" { - - return nil, import_fmt.Errorf("No value was found for %s during import", k) - } - - result[k] = v - } - } - - return result, nil - } - } - return nil, import_fmt.Errorf("Import id %q doesn't match any of the accepted formats: %v", id, idRegexes) -} - -func getDefaultValues(idRegex string, d TerraformResourceData, config *Config) (map[string]string, error) { - result := make(map[string]string) - if _, ok := d.GetOk("project"); !ok && import_strings.Contains(idRegex, "?P") { - project, _ := getProject(d, config) - result["project"] = project - } - if _, ok := d.GetOk("region"); !ok && import_strings.Contains(idRegex, "?P") { - region, _ := getRegion(d, config) - result["region"] = region - } - if _, ok := d.GetOk("zone"); !ok && import_strings.Contains(idRegex, "?P") { - zone, _ := getZone(d, config) - result["zone"] = zone - } - return result, nil -} - -type kmsKeyRingId struct { - Project string - Location string - Name string -} - -func (s *kmsKeyRingId) keyRingId() string { - return kms_utils_fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", s.Project, s.Location, s.Name) -} - -func (s *kmsKeyRingId) terraformId() string { - return kms_utils_fmt.Sprintf("%s/%s/%s", s.Project, s.Location, s.Name) -} - -func parseKmsKeyRingId(id string, config *Config) (*kmsKeyRingId, error) { - parts := kms_utils_strings.Split(id, "/") - - keyRingIdRegex := kms_utils_regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") - keyRingIdWithoutProjectRegex := kms_utils_regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") - keyRingRelativeLinkRegex := kms_utils_regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})$") - - if keyRingIdRegex.MatchString(id) { - return &kmsKeyRingId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, nil - } - - if keyRingIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, kms_utils_fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}` id format.") - } - - return &kmsKeyRingId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, nil - } - - if parts := keyRingRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &kmsKeyRingId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, nil - } - return nil, kms_utils_fmt.Errorf("Invalid KeyRing id format, expecting `{projectId}/{locationId}/{keyRingName}` or `{locationId}/{keyRingName}.`") -} - -func kmsCryptoKeyRingsEquivalent(k, old, new string, d *kms_utils_schema.ResourceData) bool { - keyRingIdWithSpecifiersRegex := kms_utils_regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-])+/keyRings/([a-zA-Z0-9_-]{1,63})$") - normalizedKeyRingIdRegex := kms_utils_regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})$") - if matches := keyRingIdWithSpecifiersRegex.FindStringSubmatch(new); matches != nil { - normMatches := normalizedKeyRingIdRegex.FindStringSubmatch(old) - return normMatches != nil && normMatches[1] == matches[1] && normMatches[2] == matches[2] && normMatches[3] == matches[3] - } - return false -} - -type kmsCryptoKeyId struct { - KeyRingId kmsKeyRingId - Name string -} - -func (s *kmsCryptoKeyId) cryptoKeyId() string { - return kms_utils_fmt.Sprintf("%s/cryptoKeys/%s", s.KeyRingId.keyRingId(), s.Name) -} - -func (s *kmsCryptoKeyId) terraformId() string { - return kms_utils_fmt.Sprintf("%s/%s", s.KeyRingId.terraformId(), s.Name) -} - -func validateKmsCryptoKeyRotationPeriod(value interface{}, _ string) (ws []string, errors []error) { - period := value.(string) - pattern := kms_utils_regexp.MustCompile(`^([0-9.]*\d)s$`) - match := pattern.FindStringSubmatch(period) - - if len(match) == 0 { - errors = append(errors, kms_utils_fmt.Errorf("Invalid rotation period format: %s", period)) - - return - } - - number := match[1] - seconds, err := kms_utils_strconv.ParseFloat(number, 64) - - if err != nil { - errors = append(errors, err) - } else { - if seconds < 86400.0 { - errors = append(errors, kms_utils_fmt.Errorf("Rotation period must be greater than one day")) - } - - parts := kms_utils_strings.Split(number, ".") - - if len(parts) > 1 && len(parts[1]) > 9 { - errors = append(errors, kms_utils_fmt.Errorf("Rotation period cannot have more than 9 fractional digits")) - } - } - - return -} - -func kmsCryptoKeyNextRotation(now kms_utils_time.Time, period string) (result string, err error) { - var duration kms_utils_time.Duration - - duration, err = kms_utils_time.ParseDuration(period) - - if err == nil { - result = now.UTC().Add(duration).Format(kms_utils_time.RFC3339Nano) - } - - return -} - -func parseKmsCryptoKeyId(id string, config *Config) (*kmsCryptoKeyId, error) { - parts := kms_utils_strings.Split(id, "/") - - cryptoKeyIdRegex := kms_utils_regexp.MustCompile("^(" + ProjectRegex + ")/([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") - cryptoKeyIdWithoutProjectRegex := kms_utils_regexp.MustCompile("^([a-z0-9-])+/([a-zA-Z0-9_-]{1,63})/([a-zA-Z0-9_-]{1,63})$") - cryptoKeyRelativeLinkRegex := kms_utils_regexp.MustCompile("^projects/(" + ProjectRegex + ")/locations/([a-z0-9-]+)/keyRings/([a-zA-Z0-9_-]{1,63})/cryptoKeys/([a-zA-Z0-9_-]{1,63})$") - - if cryptoKeyIdRegex.MatchString(id) { - return &kmsCryptoKeyId{ - KeyRingId: kmsKeyRingId{ - Project: parts[0], - Location: parts[1], - Name: parts[2], - }, - Name: parts[3], - }, nil - } - - if cryptoKeyIdWithoutProjectRegex.MatchString(id) { - if config.Project == "" { - return nil, kms_utils_fmt.Errorf("The default project for the provider must be set when using the `{location}/{keyRingName}/{cryptoKeyName}` id format.") - } - - return &kmsCryptoKeyId{ - KeyRingId: kmsKeyRingId{ - Project: config.Project, - Location: parts[0], - Name: parts[1], - }, - Name: parts[2], - }, nil - } - - if parts := cryptoKeyRelativeLinkRegex.FindStringSubmatch(id); parts != nil { - return &kmsCryptoKeyId{ - KeyRingId: kmsKeyRingId{ - Project: parts[1], - Location: parts[2], - Name: parts[3], - }, - Name: parts[4], - }, nil - } - return nil, kms_utils_fmt.Errorf("Invalid CryptoKey id format, expecting `{projectId}/{locationId}/{KeyringName}/{cryptoKeyName}` or `{locationId}/{keyRingName}/{cryptoKeyName}, got id: %s`", id) -} - -func clearCryptoKeyVersions(cryptoKeyId *kmsCryptoKeyId, userAgent string, config *Config) error { - versionsClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions - - listCall := versionsClient.List(cryptoKeyId.cryptoKeyId()) - if config.UserProjectOverride { - listCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) - } - versionsResponse, err := listCall.Do() - - if err != nil { - return err - } - - for _, version := range versionsResponse.CryptoKeyVersions { - request := &kms_utils_cloudkms.DestroyCryptoKeyVersionRequest{} - destroyCall := versionsClient.Destroy(version.Name, request) - if config.UserProjectOverride { - destroyCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) - } - _, err = destroyCall.Do() - - if err != nil { - return err - } - } - - return nil -} - -func disableCryptoKeyRotation(cryptoKeyId *kmsCryptoKeyId, userAgent string, config *Config) error { - keyClient := config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys - patchCall := keyClient.Patch(cryptoKeyId.cryptoKeyId(), &kms_utils_cloudkms.CryptoKey{ - NullFields: []string{"rotationPeriod", "nextRotationTime"}, - }). - UpdateMask("rotationPeriod,nextRotationTime") - if config.UserProjectOverride { - patchCall.Header().Set("X-Goog-User-Project", cryptoKeyId.KeyRingId.Project) - } - _, err := patchCall.Do() - - return err -} - -var BillingAccountLoggingExclusionSchema = map[string]*logging_exclusion_billing_account_schema.Schema{ - "billing_account": { - Type: logging_exclusion_billing_account_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type BillingAccountLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *Config -} - -func NewBillingAccountLoggingExclusionUpdater(d *logging_exclusion_billing_account_schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { - billingAccount := d.Get("billing_account").(string) - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - return &BillingAccountLoggingExclusionUpdater{ - resourceType: "billingAccounts", - resourceId: billingAccount, - userAgent: userAgent, - Config: config, - }, nil -} - -func billingAccountLoggingExclusionIdParseFunc(d *logging_exclusion_billing_account_schema.ResourceData, _ *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "billingAccounts" != loggingExclusionId.resourceType { - return logging_exclusion_billing_account_fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if err := d.Set("billing_account", loggingExclusionId.resourceId); err != nil { - return logging_exclusion_billing_account_fmt.Errorf("Error setting billing_account: %s", err) - } - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging_exclusion_billing_account_logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return logging_exclusion_billing_account_errwrap.Wrapf(logging_exclusion_billing_account_fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging_exclusion_billing_account_logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Get(id).Do() - - if err != nil { - return nil, logging_exclusion_billing_account_errwrap.Wrapf(logging_exclusion_billing_account_fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *BillingAccountLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging_exclusion_billing_account_logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return logging_exclusion_billing_account_errwrap.Wrapf(logging_exclusion_billing_account_fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).BillingAccounts.Exclusions.Delete(id).Do() - if err != nil { - return logging_exclusion_billing_account_errwrap.Wrap(logging_exclusion_billing_account_fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) - } - - return nil -} - -func (u *BillingAccountLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *BillingAccountLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *BillingAccountLoggingExclusionUpdater) DescribeResource() string { - return logging_exclusion_billing_account_fmt.Sprintf("%q %q", u.resourceType, u.resourceId) -} - -var FolderLoggingExclusionSchema = map[string]*logging_exclusion_folder_schema.Schema{ - "folder": { - Type: logging_exclusion_folder_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: optionalPrefixSuppress("folders/"), - }, -} - -type FolderLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *Config -} - -func NewFolderLoggingExclusionUpdater(d *logging_exclusion_folder_schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { - folder := parseFolderId(d.Get("folder")) - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - return &FolderLoggingExclusionUpdater{ - resourceType: "folders", - resourceId: folder, - userAgent: userAgent, - Config: config, - }, nil -} - -func folderLoggingExclusionIdParseFunc(d *logging_exclusion_folder_schema.ResourceData, _ *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "folders" != loggingExclusionId.resourceType { - return logging_exclusion_folder_fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if err := d.Set("folder", loggingExclusionId.resourceId); err != nil { - return logging_exclusion_folder_fmt.Errorf("Error setting folder: %s", err) - } - return nil -} - -func (u *FolderLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging_exclusion_folder_logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return logging_exclusion_folder_errwrap.Wrapf(logging_exclusion_folder_fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging_exclusion_folder_logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Get(id).Do() - - if err != nil { - return nil, logging_exclusion_folder_errwrap.Wrapf(logging_exclusion_folder_fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *FolderLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging_exclusion_folder_logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return logging_exclusion_folder_errwrap.Wrapf(logging_exclusion_folder_fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Folders.Exclusions.Delete(id).Do() - if err != nil { - return logging_exclusion_folder_errwrap.Wrap(logging_exclusion_folder_fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) - } - - return nil -} - -func (u *FolderLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *FolderLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *FolderLoggingExclusionUpdater) DescribeResource() string { - return logging_exclusion_folder_fmt.Sprintf("%q %q", u.resourceType, u.resourceId) -} - -var OrganizationLoggingExclusionSchema = map[string]*logging_exclusion_organization_schema.Schema{ - "org_id": { - Type: logging_exclusion_organization_schema.TypeString, - Required: true, - ForceNew: true, - }, -} - -type OrganizationLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *Config -} - -func NewOrganizationLoggingExclusionUpdater(d *logging_exclusion_organization_schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { - organization := d.Get("org_id").(string) - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - return &OrganizationLoggingExclusionUpdater{ - resourceType: "organizations", - resourceId: organization, - userAgent: userAgent, - Config: config, - }, nil -} - -func organizationLoggingExclusionIdParseFunc(d *logging_exclusion_organization_schema.ResourceData, _ *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "organizations" != loggingExclusionId.resourceType { - return logging_exclusion_organization_fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if err := d.Set("org_id", loggingExclusionId.resourceId); err != nil { - return logging_exclusion_organization_fmt.Errorf("Error setting org_id: %s", err) - } - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging_exclusion_organization_logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return logging_exclusion_organization_errwrap.Wrapf(logging_exclusion_organization_fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging_exclusion_organization_logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Get(id).Do() - - if err != nil { - return nil, logging_exclusion_organization_errwrap.Wrapf(logging_exclusion_organization_fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *OrganizationLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging_exclusion_organization_logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return logging_exclusion_organization_errwrap.Wrapf(logging_exclusion_organization_fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Organizations.Exclusions.Delete(id).Do() - if err != nil { - return logging_exclusion_organization_errwrap.Wrap(logging_exclusion_organization_fmt.Errorf("Error deleting logging exclusion for %s.", u.DescribeResource()), err) - } - - return nil -} - -func (u *OrganizationLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *OrganizationLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *OrganizationLoggingExclusionUpdater) DescribeResource() string { - return logging_exclusion_organization_fmt.Sprintf("%q %q", u.resourceType, u.resourceId) -} - -var ProjectLoggingExclusionSchema = map[string]*logging_exclusion_project_schema.Schema{ - "project": { - Type: logging_exclusion_project_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, -} - -type ProjectLoggingExclusionUpdater struct { - resourceType string - resourceId string - userAgent string - Config *Config -} - -func NewProjectLoggingExclusionUpdater(d *logging_exclusion_project_schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) { - pid, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - return &ProjectLoggingExclusionUpdater{ - resourceType: "projects", - resourceId: pid, - userAgent: userAgent, - Config: config, - }, nil -} - -func projectLoggingExclusionIdParseFunc(d *logging_exclusion_project_schema.ResourceData, config *Config) error { - loggingExclusionId, err := parseLoggingExclusionId(d.Id()) - if err != nil { - return err - } - - if "projects" != loggingExclusionId.resourceType { - return logging_exclusion_project_fmt.Errorf("Error importing logging exclusion, invalid resourceType %#v", loggingExclusionId.resourceType) - } - - if config.Project != loggingExclusionId.resourceId { - if err := d.Set("project", loggingExclusionId.resourceId); err != nil { - return logging_exclusion_project_fmt.Errorf("Error setting project: %s", err) - } - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) CreateLoggingExclusion(parent string, exclusion *logging_exclusion_project_logging.LogExclusion) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Create(parent, exclusion).Do() - if err != nil { - return logging_exclusion_project_errwrap.Wrapf(logging_exclusion_project_fmt.Sprintf("Error creating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) ReadLoggingExclusion(id string) (*logging_exclusion_project_logging.LogExclusion, error) { - exclusion, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Get(id).Do() - - if err != nil { - return nil, logging_exclusion_project_errwrap.Wrapf(logging_exclusion_project_fmt.Sprintf("Error retrieving logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return exclusion, nil -} - -func (u *ProjectLoggingExclusionUpdater) UpdateLoggingExclusion(id string, exclusion *logging_exclusion_project_logging.LogExclusion, updateMask string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Patch(id, exclusion).UpdateMask(updateMask).Do() - if err != nil { - return logging_exclusion_project_errwrap.Wrapf(logging_exclusion_project_fmt.Sprintf("Error updating logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) DeleteLoggingExclusion(id string) error { - _, err := u.Config.NewLoggingClient(u.userAgent).Projects.Exclusions.Delete(id).Do() - if err != nil { - return logging_exclusion_project_errwrap.Wrapf(logging_exclusion_project_fmt.Sprintf("Error deleting logging exclusion for %s: {{err}}", u.DescribeResource()), err) - } - - return nil -} - -func (u *ProjectLoggingExclusionUpdater) GetResourceType() string { - return u.resourceType -} - -func (u *ProjectLoggingExclusionUpdater) GetResourceId() string { - return u.resourceId -} - -func (u *ProjectLoggingExclusionUpdater) DescribeResource() string { - return logging_exclusion_project_fmt.Sprintf("%q %q", u.resourceType, u.resourceId) -} - -var loggingSinkResourceTypes = []string{ - "billingAccounts", - "folders", - "organizations", - "projects", -} - -type LoggingSinkId struct { - resourceType string - resourceId string - name string -} - -var loggingSinkIdRegex = logging_utils_regexp.MustCompile("(.+)/(.+)/sinks/(.+)") - -func (l LoggingSinkId) canonicalId() string { - return logging_utils_fmt.Sprintf("%s/%s/sinks/%s", l.resourceType, l.resourceId, l.name) -} - -func (l LoggingSinkId) parent() string { - return logging_utils_fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) -} - -func parseLoggingSinkId(id string) (*LoggingSinkId, error) { - parts := loggingSinkIdRegex.FindStringSubmatch(id) - if parts == nil { - return nil, logging_utils_fmt.Errorf("unable to parse logging sink id %#v", id) - } - - validLoggingSinkResourceType := false - for _, v := range loggingSinkResourceTypes { - if v == parts[1] { - validLoggingSinkResourceType = true - break - } - } - - if !validLoggingSinkResourceType { - return nil, logging_utils_fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], - loggingSinkResourceTypes) - } - return &LoggingSinkId{ - resourceType: parts[1], - resourceId: parts[2], - name: parts[3], - }, nil -} - -type MemcacheOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *MemcacheOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, memcache_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := memcache_operation_fmt.Sprintf("https://memcache.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createMemcacheWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*MemcacheOperationWaiter, error) { - w := &MemcacheOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func memcacheOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout memcache_operation_time.Duration) error { - w, err := createMemcacheWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return memcache_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func memcacheOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout memcache_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createMemcacheWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -const METADATA_FINGERPRINT_RETRIES = 10 - -func MetadataRetryWrapper(update func() error) error { - attempt := 0 - for attempt < METADATA_FINGERPRINT_RETRIES { - err := update() - if err == nil { - return nil - } - - if ok, _ := isFingerprintError(err); !ok { - - return err - } - - metadata_log.Printf("[DEBUG] Dismissed an error as retryable as a fingerprint mismatch: %s", err) - attempt++ - } - return metadata_fmt.Errorf("Failed to update metadata after %d retries", attempt) -} - -func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *metadata_compute.Metadata) { - curMDMap := make(map[string]string) - - for _, kv := range serverMD.Items { - - _, okOld := oldMDMap[kv.Key] - _, okNew := newMDMap[kv.Key] - if okOld && !okNew { - continue - } else { - curMDMap[kv.Key] = *kv.Value - } - } - - for key, val := range newMDMap { - curMDMap[key] = val.(string) - } - - serverMD.Items = nil - for key, val := range curMDMap { - v := val - serverMD.Items = append(serverMD.Items, &metadata_compute.MetadataItems{ - Key: key, - Value: &v, - }) - } -} - -func BetaMetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *metadata_compute.Metadata) { - curMDMap := make(map[string]string) - - for _, kv := range serverMD.Items { - - _, okOld := oldMDMap[kv.Key] - _, okNew := newMDMap[kv.Key] - if okOld && !okNew { - continue - } else { - curMDMap[kv.Key] = *kv.Value - } - } - - for key, val := range newMDMap { - curMDMap[key] = val.(string) - } - - serverMD.Items = nil - for key, val := range curMDMap { - v := val - serverMD.Items = append(serverMD.Items, &metadata_compute.MetadataItems{ - Key: key, - Value: &v, - }) - } -} - -func expandComputeMetadata(m map[string]interface{}) []*metadata_compute.MetadataItems { - metadata := make([]*metadata_compute.MetadataItems, len(m)) - var keys []string - for key := range m { - keys = append(keys, key) - } - metadata_sort.Strings(keys) - - for _, key := range keys { - v := m[key].(string) - metadata = append(metadata, &metadata_compute.MetadataItems{ - Key: key, - Value: &v, - }) - } - - return metadata -} - -func flattenMetadataBeta(metadata *metadata_compute.Metadata) map[string]string { - metadataMap := make(map[string]string) - for _, item := range metadata.Items { - metadataMap[item.Key] = *item.Value - } - return metadataMap -} - -func flattenMetadata(metadata *metadata_compute.Metadata) map[string]interface{} { - metadataMap := make(map[string]interface{}) - for _, item := range metadata.Items { - metadataMap[item.Key] = *item.Value - } - return metadataMap -} - -func resourceInstanceMetadata(d TerraformResourceData) (*metadata_compute.Metadata, error) { - m := &metadata_compute.Metadata{} - mdMap := d.Get("metadata").(map[string]interface{}) - if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { - if _, ok := mdMap["startup-script"]; ok { - return nil, metadata_errors.New("Cannot provide both metadata_startup_script and metadata.startup-script.") - } - mdMap["startup-script"] = v - } - if len(mdMap) > 0 { - m.Items = make([]*metadata_compute.MetadataItems, 0, len(mdMap)) - var keys []string - for k := range mdMap { - keys = append(keys, k) - } - metadata_sort.Strings(keys) - for _, k := range keys { - v := mdMap[k].(string) - m.Items = append(m.Items, &metadata_compute.MetadataItems{ - Key: k, - Value: &v, - }) - } - - m.Fingerprint = d.Get("metadata_fingerprint").(string) - } - - return m, nil -} - -type MLEngineOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *MLEngineOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, ml_engine_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := ml_engine_operation_fmt.Sprintf("https://ml.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createMLEngineWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*MLEngineOperationWaiter, error) { - w := &MLEngineOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func mLEngineOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout ml_engine_operation_time.Duration) error { - w, err := createMLEngineWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return ml_engine_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func mLEngineOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout ml_engine_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createMLEngineWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func isMtls() bool { - regularEndpoint := "https://mockservice.googleapis.com/v1/" - mtlsEndpoint := getMtlsEndpoint(regularEndpoint) - _, endpoint, err := mtls_util_transport.NewHTTPClient(mtls_util_context.Background(), - mtls_util_internaloption.WithDefaultEndpoint(regularEndpoint), - mtls_util_internaloption.WithDefaultMTLSEndpoint(mtlsEndpoint), - ) - if err != nil { - return false - } - isMtls := endpoint == mtlsEndpoint - return isMtls -} - -func getMtlsEndpoint(baseEndpoint string) string { - u, err := mtls_util_url.Parse(baseEndpoint) - if err != nil { - if mtls_util_strings.Contains(baseEndpoint, ".googleapis") { - return mtls_util_strings.Replace(baseEndpoint, ".googleapis", ".mtls.googleapis", 1) - } - return baseEndpoint - } - domainParts := mtls_util_strings.Split(u.Host, ".") - if len(domainParts) > 1 { - u.Host = mtls_util_fmt.Sprintf("%s.mtls.%s", domainParts[0], mtls_util_strings.Join(domainParts[1:], ".")) - } else { - u.Host = mtls_util_fmt.Sprintf("%s.mtls", domainParts[0]) - } - return u.String() -} - -type MutexKV struct { - lock mutexkv_sync.Mutex - store map[string]*mutexkv_sync.Mutex -} - -func (m *MutexKV) Lock(key string) { - mutexkv_log.Printf("[DEBUG] Locking %q", key) - m.get(key).Lock() - mutexkv_log.Printf("[DEBUG] Locked %q", key) -} - -func (m *MutexKV) Unlock(key string) { - mutexkv_log.Printf("[DEBUG] Unlocking %q", key) - m.get(key).Unlock() - mutexkv_log.Printf("[DEBUG] Unlocked %q", key) -} - -func (m *MutexKV) get(key string) *mutexkv_sync.Mutex { - m.lock.Lock() - defer m.lock.Unlock() - mutex, ok := m.store[key] - if !ok { - mutex = &mutexkv_sync.Mutex{} - m.store[key] = mutex - } - return mutex -} - -func NewMutexKV() *MutexKV { - return &MutexKV{ - store: make(map[string]*mutexkv_sync.Mutex), - } -} - -type NetworkManagementOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *NetworkManagementOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, network_management_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := network_management_operation_fmt.Sprintf("https://networkmanagement.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createNetworkManagementWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*NetworkManagementOperationWaiter, error) { - w := &NetworkManagementOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func networkManagementOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout network_management_operation_time.Duration) error { - w, err := createNetworkManagementWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return network_management_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func networkManagementOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout network_management_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createNetworkManagementWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type NetworkServicesOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *NetworkServicesOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, network_services_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := network_services_operation_fmt.Sprintf("https://networkservices.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createNetworkServicesWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*NetworkServicesOperationWaiter, error) { - w := &NetworkServicesOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func networkServicesOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout network_services_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createNetworkServicesWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -var defaultOauthScopes = []string{ - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/trace.append", -} - -func schemaNodeConfig() *node_config_schema.Schema { - return &node_config_schema.Schema{ - Type: node_config_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The configuration of the nodepool`, - MaxItems: 1, - Elem: &node_config_schema.Resource{ - Schema: map[string]*node_config_schema.Schema{ - "disk_size_gb": { - Type: node_config_schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: node_config_validation.IntAtLeast(10), - Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`, - }, - - "disk_type": { - Type: node_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: node_config_validation.StringInSlice([]string{"pd-standard", "pd-balanced", "pd-ssd"}, false), - Description: `Type of the disk attached to each node.`, - }, - - "guest_accelerator": { - Type: node_config_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - - ConfigMode: node_config_schema.SchemaConfigModeAttr, - Description: `List of the type and count of accelerator cards attached to the instance.`, - Elem: &node_config_schema.Resource{ - Schema: map[string]*node_config_schema.Schema{ - "count": { - Type: node_config_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of the accelerator cards exposed to an instance.`, - }, - "type": { - Type: node_config_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The accelerator type resource name.`, - }, - "gpu_partition_size": { - Type: node_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)`, - }, - }, - }, - }, - - "image_type": { - Type: node_config_schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: caseDiffSuppress, - Description: `The image type to use for this node. Note that for a given image type, the latest version of it will be used.`, - }, - - "labels": { - Type: node_config_schema.TypeMap, - Optional: true, - - Computed: true, - ForceNew: true, - Elem: &node_config_schema.Schema{Type: node_config_schema.TypeString}, - Description: `The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.`, - }, - - "local_ssd_count": { - Type: node_config_schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: node_config_validation.IntAtLeast(0), - Description: `The number of local SSD disks to be attached to the node.`, - }, - - "gcfs_config": { - Type: node_config_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `GCFS configuration for this node.`, - ForceNew: true, - Elem: &node_config_schema.Resource{ - Schema: map[string]*node_config_schema.Schema{ - "enabled": { - Type: node_config_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Whether or not GCFS is enabled`, - }, - }, - }, - }, - - "machine_type": { - Type: node_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The name of a Google Compute Engine machine type.`, - }, - - "metadata": { - Type: node_config_schema.TypeMap, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &node_config_schema.Schema{Type: node_config_schema.TypeString}, - Description: `The metadata key/value pairs assigned to instances in the cluster.`, - }, - - "min_cpu_platform": { - Type: node_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.`, - }, - - "oauth_scopes": { - Type: node_config_schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The set of Google API scopes to be made available on all of the node VMs.`, - Elem: &node_config_schema.Schema{ - Type: node_config_schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - DiffSuppressFunc: containerClusterAddedScopesSuppress, - Set: stringScopeHashcode, - }, - - "preemptible": { - Type: node_config_schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Whether the nodes are created as preemptible VM instances.`, - }, - - "service_account": { - Type: node_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The Google Cloud Platform Service Account to be used by the node VMs.`, - }, - - "tags": { - Type: node_config_schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &node_config_schema.Schema{Type: node_config_schema.TypeString}, - Description: `The list of instance tags applied to all nodes.`, - }, - - "shielded_instance_config": { - Type: node_config_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Shielded Instance options.`, - MaxItems: 1, - Elem: &node_config_schema.Resource{ - Schema: map[string]*node_config_schema.Schema{ - "enable_secure_boot": { - Type: node_config_schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Defines whether the instance has Secure Boot enabled.`, - }, - "enable_integrity_monitoring": { - Type: node_config_schema.TypeBool, - Optional: true, - ForceNew: true, - Default: true, - Description: `Defines whether the instance has integrity monitoring enabled.`, - }, - }, - }, - }, - - "taint": { - Type: node_config_schema.TypeList, - Optional: true, - - Computed: true, - ForceNew: true, - - ConfigMode: node_config_schema.SchemaConfigModeAttr, - Description: `List of Kubernetes taints to be applied to each node.`, - Elem: &node_config_schema.Resource{ - Schema: map[string]*node_config_schema.Schema{ - "key": { - Type: node_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Key for taint.`, - }, - "value": { - Type: node_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Value for taint.`, - }, - "effect": { - Type: node_config_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: node_config_validation.StringInSlice([]string{"NO_SCHEDULE", "PREFER_NO_SCHEDULE", "NO_EXECUTE"}, false), - Description: `Effect for taint.`, - }, - }, - }, - }, - - "workload_metadata_config": { - Computed: true, - Type: node_config_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The workload metadata configuration for this node.`, - Elem: &node_config_schema.Resource{ - Schema: map[string]*node_config_schema.Schema{ - "mode": { - Type: node_config_schema.TypeString, - Required: true, - ValidateFunc: node_config_validation.StringInSlice([]string{"MODE_UNSPECIFIED", "GCE_METADATA", "GKE_METADATA"}, false), - Description: `Mode is the configuration for how to expose metadata to workloads running on the node.`, - }, - }, - }, - }, - }, - }, - } -} - -func expandNodeConfig(v interface{}) *node_config_container.NodeConfig { - nodeConfigs := v.([]interface{}) - nc := &node_config_container.NodeConfig{ - - OauthScopes: defaultOauthScopes, - } - if len(nodeConfigs) == 0 { - return nc - } - - nodeConfig := nodeConfigs[0].(map[string]interface{}) - - if v, ok := nodeConfig["machine_type"]; ok { - nc.MachineType = v.(string) - } - - if v, ok := nodeConfig["guest_accelerator"]; ok { - accels := v.([]interface{}) - guestAccelerators := make([]*node_config_container.AcceleratorConfig, 0, len(accels)) - for _, raw := range accels { - data := raw.(map[string]interface{}) - if data["count"].(int) == 0 { - continue - } - guestAccelerators = append(guestAccelerators, &node_config_container.AcceleratorConfig{ - AcceleratorCount: int64(data["count"].(int)), - AcceleratorType: data["type"].(string), - GpuPartitionSize: data["gpu_partition_size"].(string), - }) - } - nc.Accelerators = guestAccelerators - } - - if v, ok := nodeConfig["disk_size_gb"]; ok { - nc.DiskSizeGb = int64(v.(int)) - } - - if v, ok := nodeConfig["disk_type"]; ok { - nc.DiskType = v.(string) - } - - if v, ok := nodeConfig["local_ssd_count"]; ok { - nc.LocalSsdCount = int64(v.(int)) - } - - if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - nc.GcfsConfig = &node_config_container.GcfsConfig{ - Enabled: conf["enabled"].(bool), - } - } - - if scopes, ok := nodeConfig["oauth_scopes"]; ok { - scopesSet := scopes.(*node_config_schema.Set) - scopes := make([]string, scopesSet.Len()) - for i, scope := range scopesSet.List() { - scopes[i] = canonicalizeServiceScope(scope.(string)) - } - - nc.OauthScopes = scopes - } - - if v, ok := nodeConfig["service_account"]; ok { - nc.ServiceAccount = v.(string) - } - - if v, ok := nodeConfig["metadata"]; ok { - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - nc.Metadata = m - } - - if v, ok := nodeConfig["image_type"]; ok { - nc.ImageType = v.(string) - } - - if v, ok := nodeConfig["labels"]; ok { - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - nc.Labels = m - } - - if v, ok := nodeConfig["tags"]; ok { - tagsList := v.([]interface{}) - tags := []string{} - for _, v := range tagsList { - if v != nil { - tags = append(tags, v.(string)) - } - } - nc.Tags = tags - } - - if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - nc.ShieldedInstanceConfig = &node_config_container.ShieldedInstanceConfig{ - EnableSecureBoot: conf["enable_secure_boot"].(bool), - EnableIntegrityMonitoring: conf["enable_integrity_monitoring"].(bool), - } - } - - nc.Preemptible = nodeConfig["preemptible"].(bool) - - if v, ok := nodeConfig["min_cpu_platform"]; ok { - nc.MinCpuPlatform = v.(string) - } - - if v, ok := nodeConfig["taint"]; ok && len(v.([]interface{})) > 0 { - taints := v.([]interface{}) - nodeTaints := make([]*node_config_container.NodeTaint, 0, len(taints)) - for _, raw := range taints { - data := raw.(map[string]interface{}) - taint := &node_config_container.NodeTaint{ - Key: data["key"].(string), - Value: data["value"].(string), - Effect: data["effect"].(string), - } - nodeTaints = append(nodeTaints, taint) - } - nc.Taints = nodeTaints - } - - if v, ok := nodeConfig["workload_metadata_config"]; ok { - nc.WorkloadMetadataConfig = expandWorkloadMetadataConfig(v) - } - - return nc -} - -func expandWorkloadMetadataConfig(v interface{}) *node_config_container.WorkloadMetadataConfig { - if v == nil { - return nil - } - ls := v.([]interface{}) - if len(ls) == 0 { - return nil - } - wmc := &node_config_container.WorkloadMetadataConfig{} - - cfg := ls[0].(map[string]interface{}) - - if v, ok := cfg["mode"]; ok { - wmc.Mode = v.(string) - } - - return wmc -} - -func flattenNodeConfig(c *node_config_container.NodeConfig) []map[string]interface{} { - config := make([]map[string]interface{}, 0, 1) - - if c == nil { - return config - } - - config = append(config, map[string]interface{}{ - "machine_type": c.MachineType, - "disk_size_gb": c.DiskSizeGb, - "disk_type": c.DiskType, - "guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators), - "local_ssd_count": c.LocalSsdCount, - "gcfs_config": flattenGcfsConfig(c.GcfsConfig), - "service_account": c.ServiceAccount, - "metadata": c.Metadata, - "image_type": c.ImageType, - "labels": c.Labels, - "tags": c.Tags, - "preemptible": c.Preemptible, - "min_cpu_platform": c.MinCpuPlatform, - "shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig), - "taint": flattenTaints(c.Taints), - "workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig), - }) - - if len(c.OauthScopes) > 0 { - config[0]["oauth_scopes"] = node_config_schema.NewSet(stringScopeHashcode, convertStringArrToInterface(c.OauthScopes)) - } - - return config -} - -func flattenContainerGuestAccelerators(c []*node_config_container.AcceleratorConfig) []map[string]interface{} { - result := []map[string]interface{}{} - for _, accel := range c { - result = append(result, map[string]interface{}{ - "count": accel.AcceleratorCount, - "type": accel.AcceleratorType, - "gpu_partition_size": accel.GpuPartitionSize, - }) - } - return result -} - -func flattenShieldedInstanceConfig(c *node_config_container.ShieldedInstanceConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enable_secure_boot": c.EnableSecureBoot, - "enable_integrity_monitoring": c.EnableIntegrityMonitoring, - }) - } - return result -} - -func flattenGcfsConfig(c *node_config_container.GcfsConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enabled": c.Enabled, - }) - } - return result -} - -func flattenTaints(c []*node_config_container.NodeTaint) []map[string]interface{} { - result := []map[string]interface{}{} - for _, taint := range c { - result = append(result, map[string]interface{}{ - "key": taint.Key, - "value": taint.Value, - "effect": taint.Effect, - }) - } - return result -} - -func flattenWorkloadMetadataConfig(c *node_config_container.WorkloadMetadataConfig) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "mode": c.Mode, - }) - } - return result -} - -type NotebooksOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *NotebooksOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, notebooks_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := notebooks_operation_fmt.Sprintf("https://notebooks.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createNotebooksWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*NotebooksOperationWaiter, error) { - w := &NotebooksOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func notebooksOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout notebooks_operation_time.Duration) error { - w, err := createNotebooksWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return notebooks_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func notebooksOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout notebooks_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createNotebooksWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func resourceOrgPolicyPolicyCustomImport(d *orgpolicy_utils_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - if err := parseImportId([]string{ - "^(?P[^/]+/?[^/]*)/policies/(?P[^/]+)", - "^(?P[^/]+/?[^/]*)/(?P[^/]+)", - }, d, config); err != nil { - return err - } - - id, err := replaceVarsRecursive(d, config, "{{parent}}/policies/{{name}}", false, 0) - if err != nil { - return orgpolicy_utils_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return nil -} - -func pathOrContents(poc string) (string, bool, error) { - if len(poc) == 0 { - return poc, false, nil - } - - path := poc - if path[0] == '~' { - var err error - path, err = path_or_contents_homedir.Expand(path) - if err != nil { - return path, true, err - } - } - - if _, err := path_or_contents_os.Stat(path); err == nil { - contents, err := path_or_contents_ioutil.ReadFile(path) - if err != nil { - return string(contents), true, err - } - return string(contents), true, nil - } - - return poc, false, nil -} - -type PrivatecaOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *PrivatecaOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, privateca_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := privateca_operation_fmt.Sprintf("https://privateca.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createPrivatecaWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*PrivatecaOperationWaiter, error) { - w := &PrivatecaOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func privatecaOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout privateca_operation_time.Duration) error { - w, err := createPrivatecaWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return privateca_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func privatecaOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout privateca_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createPrivatecaWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "critical": flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsCritical(original["critical"], d, config), - "value": flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsValue(original["value"], d, config), - "object_id": flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(original["objectId"], d, config), - }) - } - return transformed -} - -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsCritical(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsValue(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["object_id_path"] = - flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(original["objectIdPath"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "object_id_path": flattenPrivatecaCertificateConfigX509ConfigPolicyIdsObjectIdPath(original["objectIdPath"], d, config), - }) - } - return transformed -} - -func flattenPrivatecaCertificateConfigX509ConfigPolicyIdsObjectIdPath(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigAiaOcspServers(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - - if v == nil || len(v.(map[string]interface{})) == 0 { - v = make(map[string]interface{}) - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["is_ca"] = flattenPrivatecaCertificateConfigX509ConfigCaOptionsIsCa(original["isCa"], d, config) - return []interface{}{transformed} - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["is_ca"] = - flattenPrivatecaCertificateConfigX509ConfigCaOptionsIsCa(original["isCa"], d, config) - transformed["max_issuer_path_length"] = - flattenPrivatecaCertificateConfigX509ConfigCaOptionsMaxIssuerPathLength(original["maxIssuerPathLength"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigX509ConfigCaOptionsIsCa(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigCaOptionsMaxIssuerPathLength(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := privateca_utils_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - if v == nil { - v = make(map[string]interface{}) - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["base_key_usage"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(original["baseKeyUsage"], d, config) - transformed["extended_key_usage"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(original["extendedKeyUsage"], d, config) - transformed["unknown_extended_key_usages"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(original["unknownExtendedKeyUsages"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - if v == nil { - v = make(map[string]interface{}) - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["digital_signature"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(original["digitalSignature"], d, config) - transformed["content_commitment"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(original["contentCommitment"], d, config) - transformed["key_encipherment"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(original["keyEncipherment"], d, config) - transformed["data_encipherment"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(original["dataEncipherment"], d, config) - transformed["key_agreement"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(original["keyAgreement"], d, config) - transformed["cert_sign"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCertSign(original["certSign"], d, config) - transformed["crl_sign"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(original["crlSign"], d, config) - transformed["encipher_only"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(original["encipherOnly"], d, config) - transformed["decipher_only"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(original["decipherOnly"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCertSign(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - if v == nil { - v = make(map[string]interface{}) - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["server_auth"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(original["serverAuth"], d, config) - transformed["client_auth"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(original["clientAuth"], d, config) - transformed["code_signing"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(original["codeSigning"], d, config) - transformed["email_protection"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(original["emailProtection"], d, config) - transformed["time_stamping"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(original["timeStamping"], d, config) - transformed["ocsp_signing"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(original["ocspSigning"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "object_id_path": flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(original["objectIdPath"], d, config), - }) - } - return transformed -} - -func flattenPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d *privateca_utils_schema.ResourceData, config *Config) interface{} { - return v -} - -const TestEnvVar = "TF_ACC" - -var mutexKV = NewMutexKV() - -func Provider() *provider_schema.Provider { - - if isMtls() { - - for key, bp := range DefaultBasePaths { - DefaultBasePaths[key] = getMtlsEndpoint(bp) - } - } - - provider := &provider_schema.Provider{ - Schema: map[string]*provider_schema.Schema{ - "credentials": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCredentials, - ConflictsWith: []string{"access_token"}, - }, - - "access_token": { - Type: provider_schema.TypeString, - Optional: true, - ConflictsWith: []string{"credentials"}, - }, - - "impersonate_service_account": { - Type: provider_schema.TypeString, - Optional: true, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT", - }, nil), - }, - - "impersonate_service_account_delegates": { - Type: provider_schema.TypeList, - Optional: true, - Elem: &provider_schema.Schema{Type: provider_schema.TypeString}, - }, - - "project": { - Type: provider_schema.TypeString, - Optional: true, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PROJECT", - "GOOGLE_CLOUD_PROJECT", - "GCLOUD_PROJECT", - "CLOUDSDK_CORE_PROJECT", - }, nil), - }, - - "billing_project": { - Type: provider_schema.TypeString, - Optional: true, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BILLING_PROJECT", - }, nil), - }, - - "region": { - Type: provider_schema.TypeString, - Optional: true, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_REGION", - "GCLOUD_REGION", - "CLOUDSDK_COMPUTE_REGION", - }, nil), - }, - - "zone": { - Type: provider_schema.TypeString, - Optional: true, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ZONE", - "GCLOUD_ZONE", - "CLOUDSDK_COMPUTE_ZONE", - }, nil), - }, - - "scopes": { - Type: provider_schema.TypeList, - Optional: true, - Elem: &provider_schema.Schema{Type: provider_schema.TypeString}, - }, - - "batching": { - Type: provider_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &provider_schema.Resource{ - Schema: map[string]*provider_schema.Schema{ - "send_after": { - Type: provider_schema.TypeString, - Optional: true, - Default: "10s", - ValidateFunc: validateNonNegativeDuration(), - }, - "enable_batching": { - Type: provider_schema.TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - - "user_project_override": { - Type: provider_schema.TypeBool, - Optional: true, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "USER_PROJECT_OVERRIDE", - }, nil), - }, - - "request_timeout": { - Type: provider_schema.TypeString, - Optional: true, - }, - - "request_reason": { - Type: provider_schema.TypeString, - Optional: true, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "CLOUDSDK_CORE_REQUEST_REASON", - }, nil), - }, - - "access_approval_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ACCESS_APPROVAL_CUSTOM_ENDPOINT", - }, DefaultBasePaths[AccessApprovalBasePathKey]), - }, - "access_context_manager_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ACCESS_CONTEXT_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[AccessContextManagerBasePathKey]), - }, - "active_directory_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ACTIVE_DIRECTORY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ActiveDirectoryBasePathKey]), - }, - "apigee_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_APIGEE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ApigeeBasePathKey]), - }, - "app_engine_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_APP_ENGINE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[AppEngineBasePathKey]), - }, - "big_query_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIG_QUERY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigQueryBasePathKey]), - }, - "bigquery_data_transfer_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGQUERY_DATA_TRANSFER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigqueryDataTransferBasePathKey]), - }, - "bigquery_reservation_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGQUERY_RESERVATION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigqueryReservationBasePathKey]), - }, - "bigtable_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigtableBasePathKey]), - }, - "billing_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BILLING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BillingBasePathKey]), - }, - "binary_authorization_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BINARY_AUTHORIZATION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BinaryAuthorizationBasePathKey]), - }, - "cloud_asset_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_ASSET_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudAssetBasePathKey]), - }, - "cloud_build_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_BUILD_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudBuildBasePathKey]), - }, - "cloud_functions_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_FUNCTIONS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudFunctionsBasePathKey]), - }, - "cloud_identity_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_IDENTITY_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudIdentityBasePathKey]), - }, - "cloud_iot_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_IOT_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudIotBasePathKey]), - }, - "cloud_run_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_RUN_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudRunBasePathKey]), - }, - "cloud_scheduler_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_SCHEDULER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudSchedulerBasePathKey]), - }, - "cloud_tasks_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_TASKS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudTasksBasePathKey]), - }, - "compute_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ComputeBasePathKey]), - }, - "container_analysis_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_ANALYSIS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ContainerAnalysisBasePathKey]), - }, - "data_catalog_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATA_CATALOG_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataCatalogBasePathKey]), - }, - "data_loss_prevention_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATA_LOSS_PREVENTION_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataLossPreventionBasePathKey]), - }, - "dataproc_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATAPROC_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataprocBasePathKey]), - }, - "datastore_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATASTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DatastoreBasePathKey]), - }, - "deployment_manager_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DEPLOYMENT_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DeploymentManagerBasePathKey]), - }, - "dialogflow_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DIALOGFLOW_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DialogflowBasePathKey]), - }, - "dialogflow_cx_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DIALOGFLOW_CX_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DialogflowCXBasePathKey]), - }, - "dns_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DNS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DNSBasePathKey]), - }, - "essential_contacts_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ESSENTIAL_CONTACTS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[EssentialContactsBasePathKey]), - }, - "filestore_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_FILESTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[FilestoreBasePathKey]), - }, - "firestore_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_FIRESTORE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[FirestoreBasePathKey]), - }, - "game_services_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GAME_SERVICES_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GameServicesBasePathKey]), - }, - "gke_hub_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GKE_HUB_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GKEHubBasePathKey]), - }, - "healthcare_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_HEALTHCARE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[HealthcareBasePathKey]), - }, - "iap_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAP_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IapBasePathKey]), - }, - "identity_platform_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IDENTITY_PLATFORM_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IdentityPlatformBasePathKey]), - }, - "kms_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_KMS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[KMSBasePathKey]), - }, - "logging_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_LOGGING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[LoggingBasePathKey]), - }, - "memcache_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_MEMCACHE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[MemcacheBasePathKey]), - }, - "ml_engine_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ML_ENGINE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[MLEngineBasePathKey]), - }, - "monitoring_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_MONITORING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[MonitoringBasePathKey]), - }, - "network_management_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_NETWORK_MANAGEMENT_CUSTOM_ENDPOINT", - }, DefaultBasePaths[NetworkManagementBasePathKey]), - }, - "network_services_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_NETWORK_SERVICES_CUSTOM_ENDPOINT", - }, DefaultBasePaths[NetworkServicesBasePathKey]), - }, - "notebooks_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_NOTEBOOKS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[NotebooksBasePathKey]), - }, - "os_config_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_OS_CONFIG_CUSTOM_ENDPOINT", - }, DefaultBasePaths[OSConfigBasePathKey]), - }, - "os_login_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_OS_LOGIN_CUSTOM_ENDPOINT", - }, DefaultBasePaths[OSLoginBasePathKey]), - }, - "privateca_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PrivatecaBasePathKey]), - }, - "pubsub_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PUBSUB_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PubsubBasePathKey]), - }, - "pubsub_lite_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PUBSUB_LITE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PubsubLiteBasePathKey]), - }, - "redis_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_REDIS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[RedisBasePathKey]), - }, - "resource_manager_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_RESOURCE_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ResourceManagerBasePathKey]), - }, - "secret_manager_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SECRET_MANAGER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SecretManagerBasePathKey]), - }, - "security_center_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SECURITY_CENTER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SecurityCenterBasePathKey]), - }, - "service_management_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_MANAGEMENT_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceManagementBasePathKey]), - }, - "service_usage_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceUsageBasePathKey]), - }, - "source_repo_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SOURCE_REPO_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SourceRepoBasePathKey]), - }, - "spanner_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SPANNER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SpannerBasePathKey]), - }, - "sql_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SQL_CUSTOM_ENDPOINT", - }, DefaultBasePaths[SQLBasePathKey]), - }, - "storage_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_STORAGE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[StorageBasePathKey]), - }, - "tags_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_TAGS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[TagsBasePathKey]), - }, - "tpu_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_TPU_CUSTOM_ENDPOINT", - }, DefaultBasePaths[TPUBasePathKey]), - }, - "vertex_ai_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_VERTEX_AI_CUSTOM_ENDPOINT", - }, DefaultBasePaths[VertexAIBasePathKey]), - }, - "vpc_access_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_VPC_ACCESS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[VPCAccessBasePathKey]), - }, - "workflows_custom_endpoint": { - Type: provider_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_WORKFLOWS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[WorkflowsBasePathKey]), - }, - - CloudBillingCustomEndpointEntryKey: CloudBillingCustomEndpointEntry, - ComposerCustomEndpointEntryKey: ComposerCustomEndpointEntry, - ContainerCustomEndpointEntryKey: ContainerCustomEndpointEntry, - DataflowCustomEndpointEntryKey: DataflowCustomEndpointEntry, - IamCredentialsCustomEndpointEntryKey: IamCredentialsCustomEndpointEntry, - ResourceManagerV2CustomEndpointEntryKey: ResourceManagerV2CustomEndpointEntry, - IAMCustomEndpointEntryKey: IAMCustomEndpointEntry, - ServiceNetworkingCustomEndpointEntryKey: ServiceNetworkingCustomEndpointEntry, - ServiceUsageCustomEndpointEntryKey: ServiceUsageCustomEndpointEntry, - StorageTransferCustomEndpointEntryKey: StorageTransferCustomEndpointEntry, - BigtableAdminCustomEndpointEntryKey: BigtableAdminCustomEndpointEntry, - - AssuredWorkloadsEndpointEntryKey: AssuredWorkloadsEndpointEntry, - CloudResourceManagerEndpointEntryKey: CloudResourceManagerEndpointEntry, - EventarcEndpointEntryKey: EventarcEndpointEntry, - GkeHubFeatureCustomEndpointEntryKey: GkeHubFeatureCustomEndpointEntry, - OrgPolicyEndpointEntryKey: OrgPolicyEndpointEntry, - PrivatecaCertificateTemplateEndpointEntryKey: PrivatecaCertificateTemplateCustomEndpointEntry, - }, - - ProviderMetaSchema: map[string]*provider_schema.Schema{ - "module_name": { - Type: provider_schema.TypeString, - Optional: true, - }, - }, - - DataSourcesMap: map[string]*provider_schema.Resource{ - "google_active_folder": dataSourceGoogleActiveFolder(), - "google_app_engine_default_service_account": dataSourceGoogleAppEngineDefaultServiceAccount(), - "google_billing_account": dataSourceGoogleBillingAccount(), - "google_bigquery_default_service_account": dataSourceGoogleBigqueryDefaultServiceAccount(), - "google_client_config": dataSourceGoogleClientConfig(), - "google_client_openid_userinfo": dataSourceGoogleClientOpenIDUserinfo(), - "google_cloudfunctions_function": dataSourceGoogleCloudFunctionsFunction(), - "google_cloud_identity_groups": dataSourceGoogleCloudIdentityGroups(), - "google_cloud_identity_group_memberships": dataSourceGoogleCloudIdentityGroupMemberships(), - "google_cloud_run_locations": dataSourceGoogleCloudRunLocations(), - "google_cloud_run_service": dataSourceGoogleCloudRunService(), - "google_composer_environment": dataSourceGoogleComposerEnvironment(), - "google_composer_image_versions": dataSourceGoogleComposerImageVersions(), - "google_compute_address": dataSourceGoogleComputeAddress(), - "google_compute_backend_service": dataSourceGoogleComputeBackendService(), - "google_compute_backend_bucket": dataSourceGoogleComputeBackendBucket(), - "google_compute_default_service_account": dataSourceGoogleComputeDefaultServiceAccount(), - "google_compute_forwarding_rule": dataSourceGoogleComputeForwardingRule(), - "google_compute_global_address": dataSourceGoogleComputeGlobalAddress(), - "google_compute_global_forwarding_rule": dataSourceGoogleComputeGlobalForwardingRule(), - "google_compute_ha_vpn_gateway": dataSourceGoogleComputeHaVpnGateway(), - "google_compute_health_check": dataSourceGoogleComputeHealthCheck(), - "google_compute_image": dataSourceGoogleComputeImage(), - "google_compute_instance": dataSourceGoogleComputeInstance(), - "google_compute_instance_group": dataSourceGoogleComputeInstanceGroup(), - "google_compute_instance_serial_port": dataSourceGoogleComputeInstanceSerialPort(), - "google_compute_instance_template": dataSourceGoogleComputeInstanceTemplate(), - "google_compute_lb_ip_ranges": dataSourceGoogleComputeLbIpRanges(), - "google_compute_network": dataSourceGoogleComputeNetwork(), - "google_compute_network_endpoint_group": dataSourceGoogleComputeNetworkEndpointGroup(), - "google_compute_node_types": dataSourceGoogleComputeNodeTypes(), - "google_compute_regions": dataSourceGoogleComputeRegions(), - "google_compute_region_instance_group": dataSourceGoogleComputeRegionInstanceGroup(), - "google_compute_region_ssl_certificate": dataSourceGoogleRegionComputeSslCertificate(), - "google_compute_resource_policy": dataSourceGoogleComputeResourcePolicy(), - "google_compute_router": dataSourceGoogleComputeRouter(), - "google_compute_router_status": dataSourceGoogleComputeRouterStatus(), - "google_compute_ssl_certificate": dataSourceGoogleComputeSslCertificate(), - "google_compute_ssl_policy": dataSourceGoogleComputeSslPolicy(), - "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), - "google_compute_vpn_gateway": dataSourceGoogleComputeVpnGateway(), - "google_compute_zones": dataSourceGoogleComputeZones(), - "google_container_cluster": dataSourceGoogleContainerCluster(), - "google_container_engine_versions": dataSourceGoogleContainerEngineVersions(), - "google_container_registry_image": dataSourceGoogleContainerImage(), - "google_container_registry_repository": dataSourceGoogleContainerRepo(), - "google_dns_keys": dataSourceDNSKeys(), - "google_dns_managed_zone": dataSourceDnsManagedZone(), - "google_game_services_game_server_deployment_rollout": dataSourceGameServicesGameServerDeploymentRollout(), - "google_iam_policy": dataSourceGoogleIamPolicy(), - "google_iam_role": dataSourceGoogleIamRole(), - "google_iam_testable_permissions": dataSourceGoogleIamTestablePermissions(), - "google_iap_client": dataSourceGoogleIapClient(), - "google_kms_crypto_key": dataSourceGoogleKmsCryptoKey(), - "google_kms_crypto_key_version": dataSourceGoogleKmsCryptoKeyVersion(), - "google_kms_key_ring": dataSourceGoogleKmsKeyRing(), - "google_kms_secret": dataSourceGoogleKmsSecret(), - "google_kms_secret_ciphertext": dataSourceGoogleKmsSecretCiphertext(), - "google_folder": dataSourceGoogleFolder(), - "google_folder_organization_policy": dataSourceGoogleFolderOrganizationPolicy(), - "google_monitoring_notification_channel": dataSourceMonitoringNotificationChannel(), - "google_monitoring_cluster_istio_service": dataSourceMonitoringServiceClusterIstio(), - "google_monitoring_istio_canonical_service": dataSourceMonitoringIstioCanonicalService(), - "google_monitoring_mesh_istio_service": dataSourceMonitoringServiceMeshIstio(), - "google_monitoring_app_engine_service": dataSourceMonitoringServiceAppEngine(), - "google_monitoring_uptime_check_ips": dataSourceGoogleMonitoringUptimeCheckIps(), - "google_netblock_ip_ranges": dataSourceGoogleNetblockIpRanges(), - "google_organization": dataSourceGoogleOrganization(), - "google_project": dataSourceGoogleProject(), - "google_projects": dataSourceGoogleProjects(), - "google_project_organization_policy": dataSourceGoogleProjectOrganizationPolicy(), - "google_pubsub_topic": dataSourceGooglePubsubTopic(), - "google_secret_manager_secret": dataSourceSecretManagerSecret(), - "google_secret_manager_secret_version": dataSourceSecretManagerSecretVersion(), - "google_service_account": dataSourceGoogleServiceAccount(), - "google_service_account_access_token": dataSourceGoogleServiceAccountAccessToken(), - "google_service_account_id_token": dataSourceGoogleServiceAccountIdToken(), - "google_service_account_key": dataSourceGoogleServiceAccountKey(), - "google_sourcerepo_repository": dataSourceGoogleSourceRepoRepository(), - "google_spanner_instance": dataSourceSpannerInstance(), - "google_sql_ca_certs": dataSourceGoogleSQLCaCerts(), - "google_sql_backup_run": dataSourceSqlBackupRun(), - "google_sql_database_instance": dataSourceSqlDatabaseInstance(), - "google_service_networking_peered_dns_domain": dataSourceGoogleServiceNetworkingPeeredDNSDomain(), - "google_storage_bucket": dataSourceGoogleStorageBucket(), - "google_storage_bucket_object": dataSourceGoogleStorageBucketObject(), - "google_storage_bucket_object_content": dataSourceGoogleStorageBucketObjectContent(), - "google_storage_object_signed_url": dataSourceGoogleSignedUrl(), - "google_storage_project_service_account": dataSourceGoogleStorageProjectServiceAccount(), - "google_storage_transfer_project_service_account": dataSourceGoogleStorageTransferProjectServiceAccount(), - "google_tpu_tensorflow_versions": dataSourceTpuTensorflowVersions(), - "google_redis_instance": dataSourceGoogleRedisInstance(), - }, - - ResourcesMap: ResourceMap(), - } - - provider.ConfigureContextFunc = func(ctx provider_context.Context, d *provider_schema.ResourceData) (interface{}, provider_diag.Diagnostics) { - return providerConfigure(ctx, d, provider) - } - - return provider -} - -func ResourceMap() map[string]*provider_schema.Resource { - resourceMap, _ := ResourceMapWithErrors() - return resourceMap -} - -func ResourceMapWithErrors() (map[string]*provider_schema.Resource, error) { - return mergeResourceMaps( - map[string]*provider_schema.Resource{ - "google_folder_access_approval_settings": resourceAccessApprovalFolderSettings(), - "google_project_access_approval_settings": resourceAccessApprovalProjectSettings(), - "google_organization_access_approval_settings": resourceAccessApprovalOrganizationSettings(), - "google_access_context_manager_access_policy": resourceAccessContextManagerAccessPolicy(), - "google_access_context_manager_access_level": resourceAccessContextManagerAccessLevel(), - "google_access_context_manager_access_levels": resourceAccessContextManagerAccessLevels(), - "google_access_context_manager_access_level_condition": resourceAccessContextManagerAccessLevelCondition(), - "google_access_context_manager_service_perimeter": resourceAccessContextManagerServicePerimeter(), - "google_access_context_manager_service_perimeters": resourceAccessContextManagerServicePerimeters(), - "google_access_context_manager_service_perimeter_resource": resourceAccessContextManagerServicePerimeterResource(), - "google_access_context_manager_gcp_user_access_binding": resourceAccessContextManagerGcpUserAccessBinding(), - "google_active_directory_domain": resourceActiveDirectoryDomain(), - "google_active_directory_domain_trust": resourceActiveDirectoryDomainTrust(), - "google_apigee_organization": resourceApigeeOrganization(), - "google_apigee_instance": resourceApigeeInstance(), - "google_apigee_environment": resourceApigeeEnvironment(), - "google_apigee_envgroup": resourceApigeeEnvgroup(), - "google_apigee_instance_attachment": resourceApigeeInstanceAttachment(), - "google_apigee_envgroup_attachment": resourceApigeeEnvgroupAttachment(), - "google_app_engine_domain_mapping": resourceAppEngineDomainMapping(), - "google_app_engine_firewall_rule": resourceAppEngineFirewallRule(), - "google_app_engine_standard_app_version": resourceAppEngineStandardAppVersion(), - "google_app_engine_flexible_app_version": resourceAppEngineFlexibleAppVersion(), - "google_app_engine_application_url_dispatch_rules": resourceAppEngineApplicationUrlDispatchRules(), - "google_app_engine_service_split_traffic": resourceAppEngineServiceSplitTraffic(), - "google_app_engine_service_network_settings": resourceAppEngineServiceNetworkSettings(), - "google_bigquery_dataset": resourceBigQueryDataset(), - "google_bigquery_dataset_access": resourceBigQueryDatasetAccess(), - "google_bigquery_job": resourceBigQueryJob(), - "google_bigquery_table_iam_binding": ResourceIamBinding(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), - "google_bigquery_table_iam_member": ResourceIamMember(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), - "google_bigquery_table_iam_policy": ResourceIamPolicy(BigQueryTableIamSchema, BigQueryTableIamUpdaterProducer, BigQueryTableIdParseFunc), - "google_bigquery_routine": resourceBigQueryRoutine(), - "google_bigquery_data_transfer_config": resourceBigqueryDataTransferConfig(), - "google_bigquery_reservation": resourceBigqueryReservationReservation(), - "google_bigtable_app_profile": resourceBigtableAppProfile(), - "google_billing_budget": resourceBillingBudget(), - "google_binary_authorization_attestor": resourceBinaryAuthorizationAttestor(), - "google_binary_authorization_attestor_iam_binding": ResourceIamBinding(BinaryAuthorizationAttestorIamSchema, BinaryAuthorizationAttestorIamUpdaterProducer, BinaryAuthorizationAttestorIdParseFunc), - "google_binary_authorization_attestor_iam_member": ResourceIamMember(BinaryAuthorizationAttestorIamSchema, BinaryAuthorizationAttestorIamUpdaterProducer, BinaryAuthorizationAttestorIdParseFunc), - "google_binary_authorization_attestor_iam_policy": ResourceIamPolicy(BinaryAuthorizationAttestorIamSchema, BinaryAuthorizationAttestorIamUpdaterProducer, BinaryAuthorizationAttestorIdParseFunc), - "google_binary_authorization_policy": resourceBinaryAuthorizationPolicy(), - "google_cloud_asset_project_feed": resourceCloudAssetProjectFeed(), - "google_cloud_asset_folder_feed": resourceCloudAssetFolderFeed(), - "google_cloud_asset_organization_feed": resourceCloudAssetOrganizationFeed(), - "google_cloudbuild_trigger": resourceCloudBuildTrigger(), - "google_cloudfunctions_function_iam_binding": ResourceIamBinding(CloudFunctionsCloudFunctionIamSchema, CloudFunctionsCloudFunctionIamUpdaterProducer, CloudFunctionsCloudFunctionIdParseFunc), - "google_cloudfunctions_function_iam_member": ResourceIamMember(CloudFunctionsCloudFunctionIamSchema, CloudFunctionsCloudFunctionIamUpdaterProducer, CloudFunctionsCloudFunctionIdParseFunc), - "google_cloudfunctions_function_iam_policy": ResourceIamPolicy(CloudFunctionsCloudFunctionIamSchema, CloudFunctionsCloudFunctionIamUpdaterProducer, CloudFunctionsCloudFunctionIdParseFunc), - "google_cloud_identity_group": resourceCloudIdentityGroup(), - "google_cloud_identity_group_membership": resourceCloudIdentityGroupMembership(), - "google_cloudiot_registry": resourceCloudIotDeviceRegistry(), - "google_cloudiot_device": resourceCloudIotDevice(), - "google_cloud_run_domain_mapping": resourceCloudRunDomainMapping(), - "google_cloud_run_service": resourceCloudRunService(), - "google_cloud_run_service_iam_binding": ResourceIamBinding(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), - "google_cloud_run_service_iam_member": ResourceIamMember(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), - "google_cloud_run_service_iam_policy": ResourceIamPolicy(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), - "google_cloud_scheduler_job": resourceCloudSchedulerJob(), - "google_cloud_tasks_queue": resourceCloudTasksQueue(), - "google_compute_address": resourceComputeAddress(), - "google_compute_autoscaler": resourceComputeAutoscaler(), - "google_compute_backend_bucket": resourceComputeBackendBucket(), - "google_compute_backend_bucket_signed_url_key": resourceComputeBackendBucketSignedUrlKey(), - "google_compute_backend_service": resourceComputeBackendService(), - "google_compute_region_backend_service": resourceComputeRegionBackendService(), - "google_compute_backend_service_signed_url_key": resourceComputeBackendServiceSignedUrlKey(), - "google_compute_region_disk_resource_policy_attachment": resourceComputeRegionDiskResourcePolicyAttachment(), - "google_compute_disk_resource_policy_attachment": resourceComputeDiskResourcePolicyAttachment(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_disk_iam_binding": ResourceIamBinding(ComputeDiskIamSchema, ComputeDiskIamUpdaterProducer, ComputeDiskIdParseFunc), - "google_compute_disk_iam_member": ResourceIamMember(ComputeDiskIamSchema, ComputeDiskIamUpdaterProducer, ComputeDiskIdParseFunc), - "google_compute_disk_iam_policy": ResourceIamPolicy(ComputeDiskIamSchema, ComputeDiskIamUpdaterProducer, ComputeDiskIdParseFunc), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_forwarding_rule": resourceComputeForwardingRule(), - "google_compute_global_address": resourceComputeGlobalAddress(), - "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), - "google_compute_http_health_check": resourceComputeHttpHealthCheck(), - "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), - "google_compute_health_check": resourceComputeHealthCheck(), - "google_compute_image": resourceComputeImage(), - "google_compute_image_iam_binding": ResourceIamBinding(ComputeImageIamSchema, ComputeImageIamUpdaterProducer, ComputeImageIdParseFunc), - "google_compute_image_iam_member": ResourceIamMember(ComputeImageIamSchema, ComputeImageIamUpdaterProducer, ComputeImageIdParseFunc), - "google_compute_image_iam_policy": ResourceIamPolicy(ComputeImageIamSchema, ComputeImageIamUpdaterProducer, ComputeImageIdParseFunc), - "google_compute_instance_iam_binding": ResourceIamBinding(ComputeInstanceIamSchema, ComputeInstanceIamUpdaterProducer, ComputeInstanceIdParseFunc), - "google_compute_instance_iam_member": ResourceIamMember(ComputeInstanceIamSchema, ComputeInstanceIamUpdaterProducer, ComputeInstanceIdParseFunc), - "google_compute_instance_iam_policy": ResourceIamPolicy(ComputeInstanceIamSchema, ComputeInstanceIamUpdaterProducer, ComputeInstanceIdParseFunc), - "google_compute_instance_group_named_port": resourceComputeInstanceGroupNamedPort(), - "google_compute_interconnect_attachment": resourceComputeInterconnectAttachment(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_network_endpoint": resourceComputeNetworkEndpoint(), - "google_compute_network_endpoint_group": resourceComputeNetworkEndpointGroup(), - "google_compute_global_network_endpoint": resourceComputeGlobalNetworkEndpoint(), - "google_compute_global_network_endpoint_group": resourceComputeGlobalNetworkEndpointGroup(), - "google_compute_region_network_endpoint_group": resourceComputeRegionNetworkEndpointGroup(), - "google_compute_node_group": resourceComputeNodeGroup(), - "google_compute_network_peering_routes_config": resourceComputeNetworkPeeringRoutesConfig(), - "google_compute_node_template": resourceComputeNodeTemplate(), - "google_compute_packet_mirroring": resourceComputePacketMirroring(), - "google_compute_per_instance_config": resourceComputePerInstanceConfig(), - "google_compute_region_per_instance_config": resourceComputeRegionPerInstanceConfig(), - "google_compute_region_autoscaler": resourceComputeRegionAutoscaler(), - "google_compute_region_disk": resourceComputeRegionDisk(), - "google_compute_region_disk_iam_binding": ResourceIamBinding(ComputeRegionDiskIamSchema, ComputeRegionDiskIamUpdaterProducer, ComputeRegionDiskIdParseFunc), - "google_compute_region_disk_iam_member": ResourceIamMember(ComputeRegionDiskIamSchema, ComputeRegionDiskIamUpdaterProducer, ComputeRegionDiskIdParseFunc), - "google_compute_region_disk_iam_policy": ResourceIamPolicy(ComputeRegionDiskIamSchema, ComputeRegionDiskIamUpdaterProducer, ComputeRegionDiskIdParseFunc), - "google_compute_region_url_map": resourceComputeRegionUrlMap(), - "google_compute_region_health_check": resourceComputeRegionHealthCheck(), - "google_compute_resource_policy": resourceComputeResourcePolicy(), - "google_compute_route": resourceComputeRoute(), - "google_compute_router": resourceComputeRouter(), - "google_compute_router_nat": resourceComputeRouterNat(), - "google_compute_router_peer": resourceComputeRouterBgpPeer(), - "google_compute_snapshot": resourceComputeSnapshot(), - "google_compute_ssl_certificate": resourceComputeSslCertificate(), - "google_compute_managed_ssl_certificate": resourceComputeManagedSslCertificate(), - "google_compute_region_ssl_certificate": resourceComputeRegionSslCertificate(), - "google_compute_reservation": resourceComputeReservation(), - "google_compute_service_attachment": resourceComputeServiceAttachment(), - "google_compute_ssl_policy": resourceComputeSslPolicy(), - "google_compute_subnetwork": resourceComputeSubnetwork(), - "google_compute_subnetwork_iam_binding": ResourceIamBinding(ComputeSubnetworkIamSchema, ComputeSubnetworkIamUpdaterProducer, ComputeSubnetworkIdParseFunc), - "google_compute_subnetwork_iam_member": ResourceIamMember(ComputeSubnetworkIamSchema, ComputeSubnetworkIamUpdaterProducer, ComputeSubnetworkIdParseFunc), - "google_compute_subnetwork_iam_policy": ResourceIamPolicy(ComputeSubnetworkIamSchema, ComputeSubnetworkIamUpdaterProducer, ComputeSubnetworkIdParseFunc), - "google_compute_target_http_proxy": resourceComputeTargetHttpProxy(), - "google_compute_target_https_proxy": resourceComputeTargetHttpsProxy(), - "google_compute_region_target_http_proxy": resourceComputeRegionTargetHttpProxy(), - "google_compute_region_target_https_proxy": resourceComputeRegionTargetHttpsProxy(), - "google_compute_target_instance": resourceComputeTargetInstance(), - "google_compute_target_ssl_proxy": resourceComputeTargetSslProxy(), - "google_compute_target_tcp_proxy": resourceComputeTargetTcpProxy(), - "google_compute_vpn_gateway": resourceComputeVpnGateway(), - "google_compute_ha_vpn_gateway": resourceComputeHaVpnGateway(), - "google_compute_external_vpn_gateway": resourceComputeExternalVpnGateway(), - "google_compute_url_map": resourceComputeUrlMap(), - "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), - "google_compute_target_grpc_proxy": resourceComputeTargetGrpcProxy(), - "google_container_analysis_note": resourceContainerAnalysisNote(), - "google_container_analysis_occurrence": resourceContainerAnalysisOccurrence(), - "google_data_catalog_entry_group": resourceDataCatalogEntryGroup(), - "google_data_catalog_entry_group_iam_binding": ResourceIamBinding(DataCatalogEntryGroupIamSchema, DataCatalogEntryGroupIamUpdaterProducer, DataCatalogEntryGroupIdParseFunc), - "google_data_catalog_entry_group_iam_member": ResourceIamMember(DataCatalogEntryGroupIamSchema, DataCatalogEntryGroupIamUpdaterProducer, DataCatalogEntryGroupIdParseFunc), - "google_data_catalog_entry_group_iam_policy": ResourceIamPolicy(DataCatalogEntryGroupIamSchema, DataCatalogEntryGroupIamUpdaterProducer, DataCatalogEntryGroupIdParseFunc), - "google_data_catalog_entry": resourceDataCatalogEntry(), - "google_data_catalog_tag_template": resourceDataCatalogTagTemplate(), - "google_data_catalog_tag_template_iam_binding": ResourceIamBinding(DataCatalogTagTemplateIamSchema, DataCatalogTagTemplateIamUpdaterProducer, DataCatalogTagTemplateIdParseFunc), - "google_data_catalog_tag_template_iam_member": ResourceIamMember(DataCatalogTagTemplateIamSchema, DataCatalogTagTemplateIamUpdaterProducer, DataCatalogTagTemplateIdParseFunc), - "google_data_catalog_tag_template_iam_policy": ResourceIamPolicy(DataCatalogTagTemplateIamSchema, DataCatalogTagTemplateIamUpdaterProducer, DataCatalogTagTemplateIdParseFunc), - "google_data_catalog_tag": resourceDataCatalogTag(), - "google_data_loss_prevention_job_trigger": resourceDataLossPreventionJobTrigger(), - "google_data_loss_prevention_inspect_template": resourceDataLossPreventionInspectTemplate(), - "google_data_loss_prevention_stored_info_type": resourceDataLossPreventionStoredInfoType(), - "google_data_loss_prevention_deidentify_template": resourceDataLossPreventionDeidentifyTemplate(), - "google_dataproc_autoscaling_policy": resourceDataprocAutoscalingPolicy(), - "google_datastore_index": resourceDatastoreIndex(), - "google_deployment_manager_deployment": resourceDeploymentManagerDeployment(), - "google_dialogflow_agent": resourceDialogflowAgent(), - "google_dialogflow_intent": resourceDialogflowIntent(), - "google_dialogflow_entity_type": resourceDialogflowEntityType(), - "google_dialogflow_fulfillment": resourceDialogflowFulfillment(), - "google_dialogflow_cx_agent": resourceDialogflowCXAgent(), - "google_dialogflow_cx_intent": resourceDialogflowCXIntent(), - "google_dialogflow_cx_flow": resourceDialogflowCXFlow(), - "google_dialogflow_cx_version": resourceDialogflowCXVersion(), - "google_dialogflow_cx_page": resourceDialogflowCXPage(), - "google_dialogflow_cx_entity_type": resourceDialogflowCXEntityType(), - "google_dialogflow_cx_environment": resourceDialogflowCXEnvironment(), - "google_dns_managed_zone": resourceDNSManagedZone(), - "google_dns_policy": resourceDNSPolicy(), - "google_essential_contacts_contact": resourceEssentialContactsContact(), - "google_filestore_instance": resourceFilestoreInstance(), - "google_firestore_index": resourceFirestoreIndex(), - "google_firestore_document": resourceFirestoreDocument(), - "google_game_services_realm": resourceGameServicesRealm(), - "google_game_services_game_server_cluster": resourceGameServicesGameServerCluster(), - "google_game_services_game_server_deployment": resourceGameServicesGameServerDeployment(), - "google_game_services_game_server_config": resourceGameServicesGameServerConfig(), - "google_game_services_game_server_deployment_rollout": resourceGameServicesGameServerDeploymentRollout(), - "google_gke_hub_membership": resourceGKEHubMembership(), - "google_healthcare_dataset": resourceHealthcareDataset(), - "google_healthcare_dicom_store": resourceHealthcareDicomStore(), - "google_healthcare_fhir_store": resourceHealthcareFhirStore(), - "google_healthcare_hl7_v2_store": resourceHealthcareHl7V2Store(), - "google_healthcare_consent_store": resourceHealthcareConsentStore(), - "google_healthcare_consent_store_iam_binding": ResourceIamBinding(HealthcareConsentStoreIamSchema, HealthcareConsentStoreIamUpdaterProducer, HealthcareConsentStoreIdParseFunc), - "google_healthcare_consent_store_iam_member": ResourceIamMember(HealthcareConsentStoreIamSchema, HealthcareConsentStoreIamUpdaterProducer, HealthcareConsentStoreIdParseFunc), - "google_healthcare_consent_store_iam_policy": ResourceIamPolicy(HealthcareConsentStoreIamSchema, HealthcareConsentStoreIamUpdaterProducer, HealthcareConsentStoreIdParseFunc), - "google_iap_web_iam_binding": ResourceIamBinding(IapWebIamSchema, IapWebIamUpdaterProducer, IapWebIdParseFunc), - "google_iap_web_iam_member": ResourceIamMember(IapWebIamSchema, IapWebIamUpdaterProducer, IapWebIdParseFunc), - "google_iap_web_iam_policy": ResourceIamPolicy(IapWebIamSchema, IapWebIamUpdaterProducer, IapWebIdParseFunc), - "google_iap_web_type_compute_iam_binding": ResourceIamBinding(IapWebTypeComputeIamSchema, IapWebTypeComputeIamUpdaterProducer, IapWebTypeComputeIdParseFunc), - "google_iap_web_type_compute_iam_member": ResourceIamMember(IapWebTypeComputeIamSchema, IapWebTypeComputeIamUpdaterProducer, IapWebTypeComputeIdParseFunc), - "google_iap_web_type_compute_iam_policy": ResourceIamPolicy(IapWebTypeComputeIamSchema, IapWebTypeComputeIamUpdaterProducer, IapWebTypeComputeIdParseFunc), - "google_iap_web_type_app_engine_iam_binding": ResourceIamBinding(IapWebTypeAppEngineIamSchema, IapWebTypeAppEngineIamUpdaterProducer, IapWebTypeAppEngineIdParseFunc), - "google_iap_web_type_app_engine_iam_member": ResourceIamMember(IapWebTypeAppEngineIamSchema, IapWebTypeAppEngineIamUpdaterProducer, IapWebTypeAppEngineIdParseFunc), - "google_iap_web_type_app_engine_iam_policy": ResourceIamPolicy(IapWebTypeAppEngineIamSchema, IapWebTypeAppEngineIamUpdaterProducer, IapWebTypeAppEngineIdParseFunc), - "google_iap_app_engine_version_iam_binding": ResourceIamBinding(IapAppEngineVersionIamSchema, IapAppEngineVersionIamUpdaterProducer, IapAppEngineVersionIdParseFunc), - "google_iap_app_engine_version_iam_member": ResourceIamMember(IapAppEngineVersionIamSchema, IapAppEngineVersionIamUpdaterProducer, IapAppEngineVersionIdParseFunc), - "google_iap_app_engine_version_iam_policy": ResourceIamPolicy(IapAppEngineVersionIamSchema, IapAppEngineVersionIamUpdaterProducer, IapAppEngineVersionIdParseFunc), - "google_iap_app_engine_service_iam_binding": ResourceIamBinding(IapAppEngineServiceIamSchema, IapAppEngineServiceIamUpdaterProducer, IapAppEngineServiceIdParseFunc), - "google_iap_app_engine_service_iam_member": ResourceIamMember(IapAppEngineServiceIamSchema, IapAppEngineServiceIamUpdaterProducer, IapAppEngineServiceIdParseFunc), - "google_iap_app_engine_service_iam_policy": ResourceIamPolicy(IapAppEngineServiceIamSchema, IapAppEngineServiceIamUpdaterProducer, IapAppEngineServiceIdParseFunc), - "google_iap_web_backend_service_iam_binding": ResourceIamBinding(IapWebBackendServiceIamSchema, IapWebBackendServiceIamUpdaterProducer, IapWebBackendServiceIdParseFunc), - "google_iap_web_backend_service_iam_member": ResourceIamMember(IapWebBackendServiceIamSchema, IapWebBackendServiceIamUpdaterProducer, IapWebBackendServiceIdParseFunc), - "google_iap_web_backend_service_iam_policy": ResourceIamPolicy(IapWebBackendServiceIamSchema, IapWebBackendServiceIamUpdaterProducer, IapWebBackendServiceIdParseFunc), - "google_iap_tunnel_instance_iam_binding": ResourceIamBinding(IapTunnelInstanceIamSchema, IapTunnelInstanceIamUpdaterProducer, IapTunnelInstanceIdParseFunc), - "google_iap_tunnel_instance_iam_member": ResourceIamMember(IapTunnelInstanceIamSchema, IapTunnelInstanceIamUpdaterProducer, IapTunnelInstanceIdParseFunc), - "google_iap_tunnel_instance_iam_policy": ResourceIamPolicy(IapTunnelInstanceIamSchema, IapTunnelInstanceIamUpdaterProducer, IapTunnelInstanceIdParseFunc), - "google_iap_tunnel_iam_binding": ResourceIamBinding(IapTunnelIamSchema, IapTunnelIamUpdaterProducer, IapTunnelIdParseFunc), - "google_iap_tunnel_iam_member": ResourceIamMember(IapTunnelIamSchema, IapTunnelIamUpdaterProducer, IapTunnelIdParseFunc), - "google_iap_tunnel_iam_policy": ResourceIamPolicy(IapTunnelIamSchema, IapTunnelIamUpdaterProducer, IapTunnelIdParseFunc), - "google_iap_brand": resourceIapBrand(), - "google_iap_client": resourceIapClient(), - "google_identity_platform_default_supported_idp_config": resourceIdentityPlatformDefaultSupportedIdpConfig(), - "google_identity_platform_tenant_default_supported_idp_config": resourceIdentityPlatformTenantDefaultSupportedIdpConfig(), - "google_identity_platform_inbound_saml_config": resourceIdentityPlatformInboundSamlConfig(), - "google_identity_platform_tenant_inbound_saml_config": resourceIdentityPlatformTenantInboundSamlConfig(), - "google_identity_platform_oauth_idp_config": resourceIdentityPlatformOauthIdpConfig(), - "google_identity_platform_tenant_oauth_idp_config": resourceIdentityPlatformTenantOauthIdpConfig(), - "google_identity_platform_tenant": resourceIdentityPlatformTenant(), - "google_kms_key_ring": resourceKMSKeyRing(), - "google_kms_crypto_key": resourceKMSCryptoKey(), - "google_kms_key_ring_import_job": resourceKMSKeyRingImportJob(), - "google_kms_secret_ciphertext": resourceKMSSecretCiphertext(), - "google_logging_metric": resourceLoggingMetric(), - "google_memcache_instance": resourceMemcacheInstance(), - "google_ml_engine_model": resourceMLEngineModel(), - "google_monitoring_alert_policy": resourceMonitoringAlertPolicy(), - "google_monitoring_group": resourceMonitoringGroup(), - "google_monitoring_notification_channel": resourceMonitoringNotificationChannel(), - "google_monitoring_custom_service": resourceMonitoringService(), - "google_monitoring_slo": resourceMonitoringSlo(), - "google_monitoring_uptime_check_config": resourceMonitoringUptimeCheckConfig(), - "google_monitoring_metric_descriptor": resourceMonitoringMetricDescriptor(), - "google_network_management_connectivity_test": resourceNetworkManagementConnectivityTest(), - "google_network_services_edge_cache_keyset": resourceNetworkServicesEdgeCacheKeyset(), - "google_network_services_edge_cache_origin": resourceNetworkServicesEdgeCacheOrigin(), - "google_network_services_edge_cache_service": resourceNetworkServicesEdgeCacheService(), - "google_notebooks_environment": resourceNotebooksEnvironment(), - "google_notebooks_instance": resourceNotebooksInstance(), - "google_notebooks_instance_iam_binding": ResourceIamBinding(NotebooksInstanceIamSchema, NotebooksInstanceIamUpdaterProducer, NotebooksInstanceIdParseFunc), - "google_notebooks_instance_iam_member": ResourceIamMember(NotebooksInstanceIamSchema, NotebooksInstanceIamUpdaterProducer, NotebooksInstanceIdParseFunc), - "google_notebooks_instance_iam_policy": ResourceIamPolicy(NotebooksInstanceIamSchema, NotebooksInstanceIamUpdaterProducer, NotebooksInstanceIdParseFunc), - "google_notebooks_location": resourceNotebooksLocation(), - "google_os_config_patch_deployment": resourceOSConfigPatchDeployment(), - "google_os_login_ssh_public_key": resourceOSLoginSSHPublicKey(), - "google_privateca_certificate_authority": resourcePrivatecaCertificateAuthority(), - "google_privateca_certificate": resourcePrivatecaCertificate(), - "google_privateca_ca_pool": resourcePrivatecaCaPool(), - "google_privateca_ca_pool_iam_binding": ResourceIamBinding(PrivatecaCaPoolIamSchema, PrivatecaCaPoolIamUpdaterProducer, PrivatecaCaPoolIdParseFunc), - "google_privateca_ca_pool_iam_member": ResourceIamMember(PrivatecaCaPoolIamSchema, PrivatecaCaPoolIamUpdaterProducer, PrivatecaCaPoolIdParseFunc), - "google_privateca_ca_pool_iam_policy": ResourceIamPolicy(PrivatecaCaPoolIamSchema, PrivatecaCaPoolIamUpdaterProducer, PrivatecaCaPoolIdParseFunc), - "google_pubsub_topic": resourcePubsubTopic(), - "google_pubsub_topic_iam_binding": ResourceIamBinding(PubsubTopicIamSchema, PubsubTopicIamUpdaterProducer, PubsubTopicIdParseFunc), - "google_pubsub_topic_iam_member": ResourceIamMember(PubsubTopicIamSchema, PubsubTopicIamUpdaterProducer, PubsubTopicIdParseFunc), - "google_pubsub_topic_iam_policy": ResourceIamPolicy(PubsubTopicIamSchema, PubsubTopicIamUpdaterProducer, PubsubTopicIdParseFunc), - "google_pubsub_subscription": resourcePubsubSubscription(), - "google_pubsub_schema": resourcePubsubSchema(), - "google_pubsub_lite_reservation": resourcePubsubLiteReservation(), - "google_pubsub_lite_topic": resourcePubsubLiteTopic(), - "google_pubsub_lite_subscription": resourcePubsubLiteSubscription(), - "google_redis_instance": resourceRedisInstance(), - "google_resource_manager_lien": resourceResourceManagerLien(), - "google_secret_manager_secret": resourceSecretManagerSecret(), - "google_secret_manager_secret_iam_binding": ResourceIamBinding(SecretManagerSecretIamSchema, SecretManagerSecretIamUpdaterProducer, SecretManagerSecretIdParseFunc), - "google_secret_manager_secret_iam_member": ResourceIamMember(SecretManagerSecretIamSchema, SecretManagerSecretIamUpdaterProducer, SecretManagerSecretIdParseFunc), - "google_secret_manager_secret_iam_policy": ResourceIamPolicy(SecretManagerSecretIamSchema, SecretManagerSecretIamUpdaterProducer, SecretManagerSecretIdParseFunc), - "google_secret_manager_secret_version": resourceSecretManagerSecretVersion(), - "google_scc_source": resourceSecurityCenterSource(), - "google_scc_notification_config": resourceSecurityCenterNotificationConfig(), - "google_endpoints_service_iam_binding": ResourceIamBinding(ServiceManagementServiceIamSchema, ServiceManagementServiceIamUpdaterProducer, ServiceManagementServiceIdParseFunc), - "google_endpoints_service_iam_member": ResourceIamMember(ServiceManagementServiceIamSchema, ServiceManagementServiceIamUpdaterProducer, ServiceManagementServiceIdParseFunc), - "google_endpoints_service_iam_policy": ResourceIamPolicy(ServiceManagementServiceIamSchema, ServiceManagementServiceIamUpdaterProducer, ServiceManagementServiceIdParseFunc), - "google_sourcerepo_repository": resourceSourceRepoRepository(), - "google_sourcerepo_repository_iam_binding": ResourceIamBinding(SourceRepoRepositoryIamSchema, SourceRepoRepositoryIamUpdaterProducer, SourceRepoRepositoryIdParseFunc), - "google_sourcerepo_repository_iam_member": ResourceIamMember(SourceRepoRepositoryIamSchema, SourceRepoRepositoryIamUpdaterProducer, SourceRepoRepositoryIdParseFunc), - "google_sourcerepo_repository_iam_policy": ResourceIamPolicy(SourceRepoRepositoryIamSchema, SourceRepoRepositoryIamUpdaterProducer, SourceRepoRepositoryIdParseFunc), - "google_spanner_instance": resourceSpannerInstance(), - "google_spanner_database": resourceSpannerDatabase(), - "google_sql_database": resourceSQLDatabase(), - "google_sql_source_representation_instance": resourceSQLSourceRepresentationInstance(), - "google_storage_bucket_iam_binding": ResourceIamBinding(StorageBucketIamSchema, StorageBucketIamUpdaterProducer, StorageBucketIdParseFunc), - "google_storage_bucket_iam_member": ResourceIamMember(StorageBucketIamSchema, StorageBucketIamUpdaterProducer, StorageBucketIdParseFunc), - "google_storage_bucket_iam_policy": ResourceIamPolicy(StorageBucketIamSchema, StorageBucketIamUpdaterProducer, StorageBucketIdParseFunc), - "google_storage_bucket_access_control": resourceStorageBucketAccessControl(), - "google_storage_object_access_control": resourceStorageObjectAccessControl(), - "google_storage_default_object_access_control": resourceStorageDefaultObjectAccessControl(), - "google_storage_hmac_key": resourceStorageHmacKey(), - "google_tags_tag_key": resourceTagsTagKey(), - "google_tags_tag_key_iam_binding": ResourceIamBinding(TagsTagKeyIamSchema, TagsTagKeyIamUpdaterProducer, TagsTagKeyIdParseFunc), - "google_tags_tag_key_iam_member": ResourceIamMember(TagsTagKeyIamSchema, TagsTagKeyIamUpdaterProducer, TagsTagKeyIdParseFunc), - "google_tags_tag_key_iam_policy": ResourceIamPolicy(TagsTagKeyIamSchema, TagsTagKeyIamUpdaterProducer, TagsTagKeyIdParseFunc), - "google_tags_tag_value": resourceTagsTagValue(), - "google_tags_tag_value_iam_binding": ResourceIamBinding(TagsTagValueIamSchema, TagsTagValueIamUpdaterProducer, TagsTagValueIdParseFunc), - "google_tags_tag_value_iam_member": ResourceIamMember(TagsTagValueIamSchema, TagsTagValueIamUpdaterProducer, TagsTagValueIdParseFunc), - "google_tags_tag_value_iam_policy": ResourceIamPolicy(TagsTagValueIamSchema, TagsTagValueIamUpdaterProducer, TagsTagValueIdParseFunc), - "google_tags_tag_binding": resourceTagsTagBinding(), - "google_tpu_node": resourceTPUNode(), - "google_vertex_ai_dataset": resourceVertexAIDataset(), - "google_vpc_access_connector": resourceVPCAccessConnector(), - "google_workflows_workflow": resourceWorkflowsWorkflow(), - }, - map[string]*provider_schema.Resource{ - "google_app_engine_application": resourceAppEngineApplication(), - "google_bigquery_table": resourceBigQueryTable(), - "google_bigtable_gc_policy": resourceBigtableGCPolicy(), - "google_bigtable_instance": resourceBigtableInstance(), - "google_bigtable_table": resourceBigtableTable(), - "google_billing_subaccount": resourceBillingSubaccount(), - "google_cloudfunctions_function": resourceCloudFunctionsFunction(), - "google_composer_environment": resourceComposerEnvironment(), - "google_compute_attached_disk": resourceComputeAttachedDisk(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_instance_from_template": resourceComputeInstanceFromTemplate(), - "google_compute_instance_group": resourceComputeInstanceGroup(), - "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), - "google_compute_instance_template": resourceComputeInstanceTemplate(), - "google_compute_network_peering": resourceComputeNetworkPeering(), - "google_compute_project_default_network_tier": resourceComputeProjectDefaultNetworkTier(), - "google_compute_project_metadata": resourceComputeProjectMetadata(), - "google_compute_project_metadata_item": resourceComputeProjectMetadataItem(), - "google_compute_region_instance_group_manager": resourceComputeRegionInstanceGroupManager(), - "google_compute_router_interface": resourceComputeRouterInterface(), - "google_compute_security_policy": resourceComputeSecurityPolicy(), - "google_compute_shared_vpc_host_project": resourceComputeSharedVpcHostProject(), - "google_compute_shared_vpc_service_project": resourceComputeSharedVpcServiceProject(), - "google_compute_target_pool": resourceComputeTargetPool(), - "google_container_cluster": resourceContainerCluster(), - "google_container_node_pool": resourceContainerNodePool(), - "google_container_registry": resourceContainerRegistry(), - "google_dataflow_job": resourceDataflowJob(), - "google_dataproc_cluster": resourceDataprocCluster(), - "google_dataproc_job": resourceDataprocJob(), - "google_dns_record_set": resourceDnsRecordSet(), - "google_endpoints_service": resourceEndpointsService(), - "google_folder": resourceGoogleFolder(), - "google_folder_organization_policy": resourceGoogleFolderOrganizationPolicy(), - "google_logging_billing_account_sink": resourceLoggingBillingAccountSink(), - "google_logging_billing_account_exclusion": ResourceLoggingExclusion(BillingAccountLoggingExclusionSchema, NewBillingAccountLoggingExclusionUpdater, billingAccountLoggingExclusionIdParseFunc), - "google_logging_billing_account_bucket_config": ResourceLoggingBillingAccountBucketConfig(), - "google_logging_organization_sink": resourceLoggingOrganizationSink(), - "google_logging_organization_exclusion": ResourceLoggingExclusion(OrganizationLoggingExclusionSchema, NewOrganizationLoggingExclusionUpdater, organizationLoggingExclusionIdParseFunc), - "google_logging_organization_bucket_config": ResourceLoggingOrganizationBucketConfig(), - "google_logging_folder_sink": resourceLoggingFolderSink(), - "google_logging_folder_exclusion": ResourceLoggingExclusion(FolderLoggingExclusionSchema, NewFolderLoggingExclusionUpdater, folderLoggingExclusionIdParseFunc), - "google_logging_folder_bucket_config": ResourceLoggingFolderBucketConfig(), - "google_logging_project_sink": resourceLoggingProjectSink(), - "google_logging_project_exclusion": ResourceLoggingExclusion(ProjectLoggingExclusionSchema, NewProjectLoggingExclusionUpdater, projectLoggingExclusionIdParseFunc), - "google_logging_project_bucket_config": ResourceLoggingProjectBucketConfig(), - "google_monitoring_dashboard": resourceMonitoringDashboard(), - "google_service_networking_connection": resourceServiceNetworkingConnection(), - "google_sql_database_instance": resourceSqlDatabaseInstance(), - "google_sql_ssl_cert": resourceSqlSslCert(), - "google_sql_user": resourceSqlUser(), - "google_organization_iam_custom_role": resourceGoogleOrganizationIamCustomRole(), - "google_organization_policy": resourceGoogleOrganizationPolicy(), - "google_project": resourceGoogleProject(), - "google_project_default_service_accounts": resourceGoogleProjectDefaultServiceAccounts(), - "google_project_service": resourceGoogleProjectService(), - "google_project_iam_custom_role": resourceGoogleProjectIamCustomRole(), - "google_project_organization_policy": resourceGoogleProjectOrganizationPolicy(), - "google_project_usage_export_bucket": resourceProjectUsageBucket(), - "google_service_account": resourceGoogleServiceAccount(), - "google_service_account_key": resourceGoogleServiceAccountKey(), - "google_service_networking_peered_dns_domain": resourceGoogleServiceNetworkingPeeredDNSDomain(), - "google_storage_bucket": resourceStorageBucket(), - "google_storage_bucket_acl": resourceStorageBucketAcl(), - "google_storage_bucket_object": resourceStorageBucketObject(), - "google_storage_object_acl": resourceStorageObjectAcl(), - "google_storage_default_object_acl": resourceStorageDefaultObjectAcl(), - "google_storage_notification": resourceStorageNotification(), - "google_storage_transfer_job": resourceStorageTransferJob(), - }, - - map[string]*provider_schema.Resource{ - "google_assured_workloads_workload": resourceAssuredWorkloadsWorkload(), - "google_compute_firewall_policy_association": resourceComputeFirewallPolicyAssociation(), - "google_compute_firewall_policy": resourceComputeFirewallPolicy(), - "google_compute_firewall_policy_rule": resourceComputeFirewallPolicyRule(), - "google_dataproc_workflow_template": resourceDataprocWorkflowTemplate(), - "google_eventarc_trigger": resourceEventarcTrigger(), - "google_org_policy_policy": resourceOrgPolicyPolicy(), - "google_privateca_certificate_template": resourcePrivatecaCertificateTemplate(), - }, - - map[string]*provider_schema.Resource{ - "google_bigtable_instance_iam_binding": ResourceIamBinding(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_instance_iam_member": ResourceIamMember(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_instance_iam_policy": ResourceIamPolicy(IamBigtableInstanceSchema, NewBigtableInstanceUpdater, BigtableInstanceIdParseFunc), - "google_bigtable_table_iam_binding": ResourceIamBinding(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigtable_table_iam_member": ResourceIamMember(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigtable_table_iam_policy": ResourceIamPolicy(IamBigtableTableSchema, NewBigtableTableUpdater, BigtableTableIdParseFunc), - "google_bigquery_dataset_iam_binding": ResourceIamBinding(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_bigquery_dataset_iam_member": ResourceIamMember(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_bigquery_dataset_iam_policy": ResourceIamPolicy(IamBigqueryDatasetSchema, NewBigqueryDatasetIamUpdater, BigqueryDatasetIdParseFunc), - "google_billing_account_iam_binding": ResourceIamBinding(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), - "google_billing_account_iam_member": ResourceIamMember(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), - "google_billing_account_iam_policy": ResourceIamPolicy(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc), - "google_dataproc_cluster_iam_binding": ResourceIamBinding(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_cluster_iam_member": ResourceIamMember(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_cluster_iam_policy": ResourceIamPolicy(IamDataprocClusterSchema, NewDataprocClusterUpdater, DataprocClusterIdParseFunc), - "google_dataproc_job_iam_binding": ResourceIamBinding(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), - "google_dataproc_job_iam_member": ResourceIamMember(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), - "google_dataproc_job_iam_policy": ResourceIamPolicy(IamDataprocJobSchema, NewDataprocJobUpdater, DataprocJobIdParseFunc), - "google_folder_iam_binding": ResourceIamBinding(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_folder_iam_member": ResourceIamMember(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_folder_iam_policy": ResourceIamPolicy(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_folder_iam_audit_config": ResourceIamAuditConfig(IamFolderSchema, NewFolderIamUpdater, FolderIdParseFunc), - "google_healthcare_dataset_iam_binding": ResourceIamBindingWithBatching(IamHealthcareDatasetSchema, NewHealthcareDatasetIamUpdater, DatasetIdParseFunc, IamBatchingEnabled), - "google_healthcare_dataset_iam_member": ResourceIamMemberWithBatching(IamHealthcareDatasetSchema, NewHealthcareDatasetIamUpdater, DatasetIdParseFunc, IamBatchingEnabled), - "google_healthcare_dataset_iam_policy": ResourceIamPolicy(IamHealthcareDatasetSchema, NewHealthcareDatasetIamUpdater, DatasetIdParseFunc), - "google_healthcare_dicom_store_iam_binding": ResourceIamBindingWithBatching(IamHealthcareDicomStoreSchema, NewHealthcareDicomStoreIamUpdater, DicomStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_dicom_store_iam_member": ResourceIamMemberWithBatching(IamHealthcareDicomStoreSchema, NewHealthcareDicomStoreIamUpdater, DicomStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_dicom_store_iam_policy": ResourceIamPolicy(IamHealthcareDicomStoreSchema, NewHealthcareDicomStoreIamUpdater, DicomStoreIdParseFunc), - "google_healthcare_fhir_store_iam_binding": ResourceIamBindingWithBatching(IamHealthcareFhirStoreSchema, NewHealthcareFhirStoreIamUpdater, FhirStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_fhir_store_iam_member": ResourceIamMemberWithBatching(IamHealthcareFhirStoreSchema, NewHealthcareFhirStoreIamUpdater, FhirStoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_fhir_store_iam_policy": ResourceIamPolicy(IamHealthcareFhirStoreSchema, NewHealthcareFhirStoreIamUpdater, FhirStoreIdParseFunc), - "google_healthcare_hl7_v2_store_iam_binding": ResourceIamBindingWithBatching(IamHealthcareHl7V2StoreSchema, NewHealthcareHl7V2StoreIamUpdater, Hl7V2StoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_hl7_v2_store_iam_member": ResourceIamMemberWithBatching(IamHealthcareHl7V2StoreSchema, NewHealthcareHl7V2StoreIamUpdater, Hl7V2StoreIdParseFunc, IamBatchingEnabled), - "google_healthcare_hl7_v2_store_iam_policy": ResourceIamPolicy(IamHealthcareHl7V2StoreSchema, NewHealthcareHl7V2StoreIamUpdater, Hl7V2StoreIdParseFunc), - "google_kms_key_ring_iam_binding": ResourceIamBinding(IamKmsKeyRingSchema, NewKmsKeyRingIamUpdater, KeyRingIdParseFunc), - "google_kms_key_ring_iam_member": ResourceIamMember(IamKmsKeyRingSchema, NewKmsKeyRingIamUpdater, KeyRingIdParseFunc), - "google_kms_key_ring_iam_policy": ResourceIamPolicy(IamKmsKeyRingSchema, NewKmsKeyRingIamUpdater, KeyRingIdParseFunc), - "google_kms_crypto_key_iam_binding": ResourceIamBinding(IamKmsCryptoKeySchema, NewKmsCryptoKeyIamUpdater, CryptoIdParseFunc), - "google_kms_crypto_key_iam_member": ResourceIamMember(IamKmsCryptoKeySchema, NewKmsCryptoKeyIamUpdater, CryptoIdParseFunc), - "google_kms_crypto_key_iam_policy": ResourceIamPolicy(IamKmsCryptoKeySchema, NewKmsCryptoKeyIamUpdater, CryptoIdParseFunc), - "google_spanner_instance_iam_binding": ResourceIamBinding(IamSpannerInstanceSchema, NewSpannerInstanceIamUpdater, SpannerInstanceIdParseFunc), - "google_spanner_instance_iam_member": ResourceIamMember(IamSpannerInstanceSchema, NewSpannerInstanceIamUpdater, SpannerInstanceIdParseFunc), - "google_spanner_instance_iam_policy": ResourceIamPolicy(IamSpannerInstanceSchema, NewSpannerInstanceIamUpdater, SpannerInstanceIdParseFunc), - "google_spanner_database_iam_binding": ResourceIamBinding(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc), - "google_spanner_database_iam_member": ResourceIamMember(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc), - "google_spanner_database_iam_policy": ResourceIamPolicy(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc), - "google_organization_iam_binding": ResourceIamBinding(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_organization_iam_member": ResourceIamMember(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_organization_iam_policy": ResourceIamPolicy(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_organization_iam_audit_config": ResourceIamAuditConfig(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc), - "google_project_iam_policy": ResourceIamPolicy(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc), - "google_project_iam_binding": ResourceIamBindingWithBatching(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc, IamBatchingEnabled), - "google_project_iam_member": ResourceIamMemberWithBatching(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc, IamBatchingEnabled), - "google_project_iam_audit_config": ResourceIamAuditConfigWithBatching(IamProjectSchema, NewProjectIamUpdater, ProjectIdParseFunc, IamBatchingEnabled), - "google_pubsub_subscription_iam_binding": ResourceIamBinding(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), - "google_pubsub_subscription_iam_member": ResourceIamMember(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), - "google_pubsub_subscription_iam_policy": ResourceIamPolicy(IamPubsubSubscriptionSchema, NewPubsubSubscriptionIamUpdater, PubsubSubscriptionIdParseFunc), - "google_service_account_iam_binding": ResourceIamBinding(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), - "google_service_account_iam_member": ResourceIamMember(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), - "google_service_account_iam_policy": ResourceIamPolicy(IamServiceAccountSchema, NewServiceAccountIamUpdater, ServiceAccountIdParseFunc), - }, - ) -} - -func providerConfigure(ctx provider_context.Context, d *provider_schema.ResourceData, p *provider_schema.Provider) (interface{}, provider_diag.Diagnostics) { - config := Config{ - Project: d.Get("project").(string), - Region: d.Get("region").(string), - Zone: d.Get("zone").(string), - UserProjectOverride: d.Get("user_project_override").(bool), - BillingProject: d.Get("billing_project").(string), - userAgent: p.UserAgent("terraform-provider-google", provider_version.ProviderVersion), - } - - if ext := provider_os.Getenv("GOOGLE_TERRAFORM_USERAGENT_EXTENSION"); ext != "" { - ua := config.userAgent - config.userAgent = provider_fmt.Sprintf("%s %s", ua, ext) - } - - if v, ok := d.GetOk("request_timeout"); ok { - var err error - config.RequestTimeout, err = provider_time.ParseDuration(v.(string)) - if err != nil { - return nil, provider_diag.FromErr(err) - } - } - - if v, ok := d.GetOk("request_reason"); ok { - config.RequestReason = v.(string) - } - - if v, ok := d.GetOk("access_token"); ok { - config.AccessToken = v.(string) - } - - if v, ok := d.GetOk("credentials"); ok { - config.Credentials = v.(string) - } - - if config.AccessToken == "" && config.Credentials == "" { - config.Credentials = multiEnvSearch([]string{ - "GOOGLE_CREDENTIALS", - "GOOGLE_CLOUD_KEYFILE_JSON", - "GCLOUD_KEYFILE_JSON", - }) - - config.AccessToken = multiEnvSearch([]string{ - "GOOGLE_OAUTH_ACCESS_TOKEN", - }) - } - - if v, ok := d.GetOk("impersonate_service_account"); ok { - config.ImpersonateServiceAccount = v.(string) - } - - delegates := d.Get("impersonate_service_account_delegates").([]interface{}) - if len(delegates) > 0 { - config.ImpersonateServiceAccountDelegates = make([]string, len(delegates)) - } - for i, delegate := range delegates { - config.ImpersonateServiceAccountDelegates[i] = delegate.(string) - } - - scopes := d.Get("scopes").([]interface{}) - if len(scopes) > 0 { - config.Scopes = make([]string, len(scopes)) - } - for i, scope := range scopes { - config.Scopes[i] = scope.(string) - } - - batchCfg, err := expandProviderBatchingConfig(d.Get("batching")) - if err != nil { - return nil, provider_diag.FromErr(err) - } - config.BatchingConfig = batchCfg - - config.AccessApprovalBasePath = d.Get("access_approval_custom_endpoint").(string) - config.AccessContextManagerBasePath = d.Get("access_context_manager_custom_endpoint").(string) - config.ActiveDirectoryBasePath = d.Get("active_directory_custom_endpoint").(string) - config.ApigeeBasePath = d.Get("apigee_custom_endpoint").(string) - config.AppEngineBasePath = d.Get("app_engine_custom_endpoint").(string) - config.BigQueryBasePath = d.Get("big_query_custom_endpoint").(string) - config.BigqueryDataTransferBasePath = d.Get("bigquery_data_transfer_custom_endpoint").(string) - config.BigqueryReservationBasePath = d.Get("bigquery_reservation_custom_endpoint").(string) - config.BigtableBasePath = d.Get("bigtable_custom_endpoint").(string) - config.BillingBasePath = d.Get("billing_custom_endpoint").(string) - config.BinaryAuthorizationBasePath = d.Get("binary_authorization_custom_endpoint").(string) - config.CloudAssetBasePath = d.Get("cloud_asset_custom_endpoint").(string) - config.CloudBuildBasePath = d.Get("cloud_build_custom_endpoint").(string) - config.CloudFunctionsBasePath = d.Get("cloud_functions_custom_endpoint").(string) - config.CloudIdentityBasePath = d.Get("cloud_identity_custom_endpoint").(string) - config.CloudIotBasePath = d.Get("cloud_iot_custom_endpoint").(string) - config.CloudRunBasePath = d.Get("cloud_run_custom_endpoint").(string) - config.CloudSchedulerBasePath = d.Get("cloud_scheduler_custom_endpoint").(string) - config.CloudTasksBasePath = d.Get("cloud_tasks_custom_endpoint").(string) - config.ComputeBasePath = d.Get("compute_custom_endpoint").(string) - config.ContainerAnalysisBasePath = d.Get("container_analysis_custom_endpoint").(string) - config.DataCatalogBasePath = d.Get("data_catalog_custom_endpoint").(string) - config.DataLossPreventionBasePath = d.Get("data_loss_prevention_custom_endpoint").(string) - config.DataprocBasePath = d.Get("dataproc_custom_endpoint").(string) - config.DatastoreBasePath = d.Get("datastore_custom_endpoint").(string) - config.DeploymentManagerBasePath = d.Get("deployment_manager_custom_endpoint").(string) - config.DialogflowBasePath = d.Get("dialogflow_custom_endpoint").(string) - config.DialogflowCXBasePath = d.Get("dialogflow_cx_custom_endpoint").(string) - config.DNSBasePath = d.Get("dns_custom_endpoint").(string) - config.EssentialContactsBasePath = d.Get("essential_contacts_custom_endpoint").(string) - config.FilestoreBasePath = d.Get("filestore_custom_endpoint").(string) - config.FirestoreBasePath = d.Get("firestore_custom_endpoint").(string) - config.GameServicesBasePath = d.Get("game_services_custom_endpoint").(string) - config.GKEHubBasePath = d.Get("gke_hub_custom_endpoint").(string) - config.HealthcareBasePath = d.Get("healthcare_custom_endpoint").(string) - config.IapBasePath = d.Get("iap_custom_endpoint").(string) - config.IdentityPlatformBasePath = d.Get("identity_platform_custom_endpoint").(string) - config.KMSBasePath = d.Get("kms_custom_endpoint").(string) - config.LoggingBasePath = d.Get("logging_custom_endpoint").(string) - config.MemcacheBasePath = d.Get("memcache_custom_endpoint").(string) - config.MLEngineBasePath = d.Get("ml_engine_custom_endpoint").(string) - config.MonitoringBasePath = d.Get("monitoring_custom_endpoint").(string) - config.NetworkManagementBasePath = d.Get("network_management_custom_endpoint").(string) - config.NetworkServicesBasePath = d.Get("network_services_custom_endpoint").(string) - config.NotebooksBasePath = d.Get("notebooks_custom_endpoint").(string) - config.OSConfigBasePath = d.Get("os_config_custom_endpoint").(string) - config.OSLoginBasePath = d.Get("os_login_custom_endpoint").(string) - config.PrivatecaBasePath = d.Get("privateca_custom_endpoint").(string) - config.PubsubBasePath = d.Get("pubsub_custom_endpoint").(string) - config.PubsubLiteBasePath = d.Get("pubsub_lite_custom_endpoint").(string) - config.RedisBasePath = d.Get("redis_custom_endpoint").(string) - config.ResourceManagerBasePath = d.Get("resource_manager_custom_endpoint").(string) - config.SecretManagerBasePath = d.Get("secret_manager_custom_endpoint").(string) - config.SecurityCenterBasePath = d.Get("security_center_custom_endpoint").(string) - config.ServiceManagementBasePath = d.Get("service_management_custom_endpoint").(string) - config.ServiceUsageBasePath = d.Get("service_usage_custom_endpoint").(string) - config.SourceRepoBasePath = d.Get("source_repo_custom_endpoint").(string) - config.SpannerBasePath = d.Get("spanner_custom_endpoint").(string) - config.SQLBasePath = d.Get("sql_custom_endpoint").(string) - config.StorageBasePath = d.Get("storage_custom_endpoint").(string) - config.TagsBasePath = d.Get("tags_custom_endpoint").(string) - config.TPUBasePath = d.Get("tpu_custom_endpoint").(string) - config.VertexAIBasePath = d.Get("vertex_ai_custom_endpoint").(string) - config.VPCAccessBasePath = d.Get("vpc_access_custom_endpoint").(string) - config.WorkflowsBasePath = d.Get("workflows_custom_endpoint").(string) - - config.CloudBillingBasePath = d.Get(CloudBillingCustomEndpointEntryKey).(string) - config.ComposerBasePath = d.Get(ComposerCustomEndpointEntryKey).(string) - config.ContainerBasePath = d.Get(ContainerCustomEndpointEntryKey).(string) - config.DataflowBasePath = d.Get(DataflowCustomEndpointEntryKey).(string) - config.IamCredentialsBasePath = d.Get(IamCredentialsCustomEndpointEntryKey).(string) - config.ResourceManagerV2BasePath = d.Get(ResourceManagerV2CustomEndpointEntryKey).(string) - config.IAMBasePath = d.Get(IAMCustomEndpointEntryKey).(string) - config.ServiceNetworkingBasePath = d.Get(ServiceNetworkingCustomEndpointEntryKey).(string) - config.ServiceUsageBasePath = d.Get(ServiceUsageCustomEndpointEntryKey).(string) - config.StorageTransferBasePath = d.Get(StorageTransferCustomEndpointEntryKey).(string) - config.BigtableAdminBasePath = d.Get(BigtableAdminCustomEndpointEntryKey).(string) - - config.AssuredWorkloadsBasePath = d.Get(AssuredWorkloadsEndpointEntryKey).(string) - config.CloudResourceManagerBasePath = d.Get(CloudResourceManagerEndpointEntryKey).(string) - config.EventarcBasePath = d.Get(EventarcEndpointEntryKey).(string) - config.GkeHubBasePath = d.Get(GkeHubFeatureCustomEndpointEntryKey).(string) - config.OrgPolicyBasePath = d.Get(OrgPolicyEndpointEntryKey).(string) - config.PrivatecaBasePath = d.Get(PrivatecaCertificateTemplateEndpointEntryKey).(string) - - stopCtx, ok := provider_schema.StopContext(ctx) - if !ok { - stopCtx = ctx - } - if err := config.LoadAndValidate(stopCtx); err != nil { - return nil, provider_diag.FromErr(err) - } - - return &config, nil -} - -func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { - if v == nil || v.(string) == "" { - return - } - creds := v.(string) - - if _, err := provider_os.Stat(creds); err == nil { - return - } - if _, err := provider_googlegoogleoauth.CredentialsFromJSON(provider_context.Background(), []byte(creds)); err != nil { - errors = append(errors, - provider_fmt.Errorf("JSON credentials in %q are not valid: %s", creds, err)) - } - - return -} - -func NewDCLAssuredWorkloadsClient(config *Config, userAgent, billingProject string, timeout provider_dcl_client_creation_time.Duration) *provider_dcl_client_creation_assuredworkloadsassuredworkloads.Client { - configOptions := []provider_dcl_client_creation_dcldcl.ConfigOption{ - provider_dcl_client_creation_dcldcl.WithHTTPClient(config.client), - provider_dcl_client_creation_dcldcl.WithUserAgent(userAgent), - provider_dcl_client_creation_dcldcl.WithLogger(dclLogger{}), - provider_dcl_client_creation_dcldcl.WithBasePath(config.AssuredWorkloadsBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithBillingProject(billingProject)) - } - } - - dclConfig := provider_dcl_client_creation_dcldcl.NewConfig(configOptions...) - return provider_dcl_client_creation_assuredworkloadsassuredworkloads.NewClient(dclConfig) -} - -func NewDCLCloudResourceManagerClient(config *Config, userAgent, billingProject string, timeout provider_dcl_client_creation_time.Duration) *provider_dcl_client_creation_cloudresourcemanagercloudresourcemanager.Client { - configOptions := []provider_dcl_client_creation_dcldcl.ConfigOption{ - provider_dcl_client_creation_dcldcl.WithHTTPClient(config.client), - provider_dcl_client_creation_dcldcl.WithUserAgent(userAgent), - provider_dcl_client_creation_dcldcl.WithLogger(dclLogger{}), - provider_dcl_client_creation_dcldcl.WithBasePath(config.CloudResourceManagerBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithBillingProject(billingProject)) - } - } - - dclConfig := provider_dcl_client_creation_dcldcl.NewConfig(configOptions...) - return provider_dcl_client_creation_cloudresourcemanagercloudresourcemanager.NewClient(dclConfig) -} - -func NewDCLComputeClient(config *Config, userAgent, billingProject string, timeout provider_dcl_client_creation_time.Duration) *provider_dcl_client_creation_computecompute.Client { - configOptions := []provider_dcl_client_creation_dcldcl.ConfigOption{ - provider_dcl_client_creation_dcldcl.WithHTTPClient(config.client), - provider_dcl_client_creation_dcldcl.WithUserAgent(userAgent), - provider_dcl_client_creation_dcldcl.WithLogger(dclLogger{}), - provider_dcl_client_creation_dcldcl.WithBasePath(config.ComputeBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithBillingProject(billingProject)) - } - } - - dclConfig := provider_dcl_client_creation_dcldcl.NewConfig(configOptions...) - return provider_dcl_client_creation_computecompute.NewClient(dclConfig) -} - -func NewDCLDataprocClient(config *Config, userAgent, billingProject string, timeout provider_dcl_client_creation_time.Duration) *provider_dcl_client_creation_dataprocdataproc.Client { - configOptions := []provider_dcl_client_creation_dcldcl.ConfigOption{ - provider_dcl_client_creation_dcldcl.WithHTTPClient(config.client), - provider_dcl_client_creation_dcldcl.WithUserAgent(userAgent), - provider_dcl_client_creation_dcldcl.WithLogger(dclLogger{}), - provider_dcl_client_creation_dcldcl.WithBasePath(config.DataprocBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithBillingProject(billingProject)) - } - } - - dclConfig := provider_dcl_client_creation_dcldcl.NewConfig(configOptions...) - return provider_dcl_client_creation_dataprocdataproc.NewClient(dclConfig) -} - -func NewDCLEventarcClient(config *Config, userAgent, billingProject string, timeout provider_dcl_client_creation_time.Duration) *provider_dcl_client_creation_eventarceventarc.Client { - configOptions := []provider_dcl_client_creation_dcldcl.ConfigOption{ - provider_dcl_client_creation_dcldcl.WithHTTPClient(config.client), - provider_dcl_client_creation_dcldcl.WithUserAgent(userAgent), - provider_dcl_client_creation_dcldcl.WithLogger(dclLogger{}), - provider_dcl_client_creation_dcldcl.WithBasePath(config.EventarcBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithBillingProject(billingProject)) - } - } - - dclConfig := provider_dcl_client_creation_dcldcl.NewConfig(configOptions...) - return provider_dcl_client_creation_eventarceventarc.NewClient(dclConfig) -} - -func NewDCLOrgPolicyClient(config *Config, userAgent, billingProject string, timeout provider_dcl_client_creation_time.Duration) *provider_dcl_client_creation_orgpolicyorgpolicy.Client { - configOptions := []provider_dcl_client_creation_dcldcl.ConfigOption{ - provider_dcl_client_creation_dcldcl.WithHTTPClient(config.client), - provider_dcl_client_creation_dcldcl.WithUserAgent(userAgent), - provider_dcl_client_creation_dcldcl.WithLogger(dclLogger{}), - provider_dcl_client_creation_dcldcl.WithBasePath(config.OrgPolicyBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithBillingProject(billingProject)) - } - } - - dclConfig := provider_dcl_client_creation_dcldcl.NewConfig(configOptions...) - return provider_dcl_client_creation_orgpolicyorgpolicy.NewClient(dclConfig) -} - -func NewDCLPrivatecaClient(config *Config, userAgent, billingProject string, timeout provider_dcl_client_creation_time.Duration) *provider_dcl_client_creation_privatecaprivateca.Client { - configOptions := []provider_dcl_client_creation_dcldcl.ConfigOption{ - provider_dcl_client_creation_dcldcl.WithHTTPClient(config.client), - provider_dcl_client_creation_dcldcl.WithUserAgent(userAgent), - provider_dcl_client_creation_dcldcl.WithLogger(dclLogger{}), - provider_dcl_client_creation_dcldcl.WithBasePath(config.PrivatecaBasePath), - } - - if timeout != 0 { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithTimeout(timeout)) - } - - if config.UserProjectOverride { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithUserProjectOverride()) - if billingProject != "" { - configOptions = append(configOptions, provider_dcl_client_creation_dcldcl.WithBillingProject(billingProject)) - } - } - - dclConfig := provider_dcl_client_creation_dcldcl.NewConfig(configOptions...) - return provider_dcl_client_creation_privatecaprivateca.NewClient(dclConfig) -} - -var AssuredWorkloadsEndpointEntryKey = "assured_workloads_custom_endpoint" - -var AssuredWorkloadsEndpointEntry = &provider_dcl_endpoints_schema.Schema{ - Type: provider_dcl_endpoints_schema.TypeString, - Optional: true, - DefaultFunc: provider_dcl_endpoints_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ASSURED_WORKLOADS_CUSTOM_ENDPOINT", - }, ""), -} - -var CloudResourceManagerEndpointEntryKey = "cloud_resource_manager_custom_endpoint" - -var CloudResourceManagerEndpointEntry = &provider_dcl_endpoints_schema.Schema{ - Type: provider_dcl_endpoints_schema.TypeString, - Optional: true, - DefaultFunc: provider_dcl_endpoints_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_RESOURCE_MANAGER_CUSTOM_ENDPOINT", - }, ""), -} - -var ComputeEndpointEntryKey = "compute_custom_endpoint" - -var ComputeEndpointEntry = &provider_dcl_endpoints_schema.Schema{ - Type: provider_dcl_endpoints_schema.TypeString, - Optional: true, - DefaultFunc: provider_dcl_endpoints_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_COMPUTE_CUSTOM_ENDPOINT", - }, ""), -} - -var EventarcEndpointEntryKey = "eventarc_custom_endpoint" - -var EventarcEndpointEntry = &provider_dcl_endpoints_schema.Schema{ - Type: provider_dcl_endpoints_schema.TypeString, - Optional: true, - DefaultFunc: provider_dcl_endpoints_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_EVENTARC_CUSTOM_ENDPOINT", - }, ""), -} - -var OrgPolicyEndpointEntryKey = "org_policy_custom_endpoint" - -var OrgPolicyEndpointEntry = &provider_dcl_endpoints_schema.Schema{ - Type: provider_dcl_endpoints_schema.TypeString, - Optional: true, - DefaultFunc: provider_dcl_endpoints_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_ORG_POLICY_CUSTOM_ENDPOINT", - }, ""), -} - -var PrivatecaEndpointEntryKey = "privateca_custom_endpoint" - -var PrivatecaEndpointEntry = &provider_dcl_endpoints_schema.Schema{ - Type: provider_dcl_endpoints_schema.TypeString, - Optional: true, - DefaultFunc: provider_dcl_endpoints_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", - }, ""), -} - -var CloudBillingCustomEndpointEntryKey = "cloud_billing_custom_endpoint" - -var CloudBillingCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CLOUD_BILLING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[CloudBillingBasePathKey]), -} - -var ComposerCustomEndpointEntryKey = "composer_custom_endpoint" - -var ComposerCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_COMPOSER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ComposerBasePathKey]), -} - -var ContainerCustomEndpointEntryKey = "container_custom_endpoint" - -var ContainerCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_CONTAINER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ContainerBasePathKey]), -} - -var DataflowCustomEndpointEntryKey = "dataflow_custom_endpoint" - -var DataflowCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_DATAFLOW_CUSTOM_ENDPOINT", - }, DefaultBasePaths[DataflowBasePathKey]), -} - -var IAMCustomEndpointEntryKey = "iam_custom_endpoint" - -var IAMCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAM_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IAMBasePathKey]), -} - -var IamCredentialsCustomEndpointEntryKey = "iam_credentials_custom_endpoint" - -var IamCredentialsCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_IAM_CREDENTIALS_CUSTOM_ENDPOINT", - }, DefaultBasePaths[IamCredentialsBasePathKey]), -} - -var ResourceManagerV2CustomEndpointEntryKey = "resource_manager_v2_custom_endpoint" - -var ResourceManagerV2CustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_RESOURCE_MANAGER_V2_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ResourceManagerV2BasePathKey]), -} - -var ServiceNetworkingCustomEndpointEntryKey = "service_networking_custom_endpoint" - -var ServiceNetworkingCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_NETWORKING_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceNetworkingBasePathKey]), -} - -var ServiceUsageCustomEndpointEntryKey = "service_usage_custom_endpoint" - -var ServiceUsageCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[ServiceUsageBasePathKey]), -} - -var StorageTransferCustomEndpointEntryKey = "storage_transfer_custom_endpoint" - -var StorageTransferCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_STORAGE_TRANSFER_CUSTOM_ENDPOINT", - }, DefaultBasePaths[StorageTransferBasePathKey]), -} - -var BigtableAdminCustomEndpointEntryKey = "bigtable_custom_endpoint" - -var BigtableAdminCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[BigtableAdminBasePathKey]), -} - -var GkeHubFeatureCustomEndpointEntryKey = "gkehub_feature_custom_endpoint" - -var GkeHubFeatureCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_GKEHUB_FEATURE_CUSTOM_ENDPOINT", - }, DefaultBasePaths[GkeHubFeatureBasePathKey]), -} - -var PrivatecaCertificateTemplateEndpointEntryKey = "privateca_custom_endpoint" - -var PrivatecaCertificateTemplateCustomEndpointEntry = &provider_handwritten_endpoint_schema.Schema{ - Type: provider_handwritten_endpoint_schema.TypeString, - Optional: true, - ValidateFunc: validateCustomEndpoint, - DefaultFunc: provider_handwritten_endpoint_schema.MultiEnvDefaultFunc([]string{ - "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", - }, DefaultBasePaths[PrivatecaBasePathKey]), -} - -func validateCustomEndpoint(v interface{}, k string) (ws []string, errors []error) { - re := `.*/[^/]+/$` - return validateRegexp(re)(v, k) -} - -const PubsubTopicRegex = "projects\\/.*\\/topics\\/.*" - -func getComputedSubscriptionName(project, subscription string) string { - match, _ := pubsub_utils_regexp.MatchString("projects\\/.*\\/subscriptions\\/.*", subscription) - if match { - return subscription - } - return pubsub_utils_fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) -} - -func getComputedTopicName(project, topic string) string { - match, _ := pubsub_utils_regexp.MatchString(PubsubTopicRegex, topic) - if match { - return topic - } - return pubsub_utils_fmt.Sprintf("projects/%s/topics/%s", project, topic) -} - -type RedisOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *RedisOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, redis_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := redis_operation_fmt.Sprintf("https://redis.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createRedisWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*RedisOperationWaiter, error) { - w := &RedisOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func redisOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout redis_operation_time.Duration) error { - w, err := createRedisWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return redis_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func redisOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout redis_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createRedisWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func isZone(location string) bool { - return len(regional_utils_strings.Split(location, "-")) == 3 -} - -func getLocation(d TerraformResourceData, config *Config) (string, error) { - if v, ok := d.GetOk("location"); ok { - return v.(string), nil - } else if v, isRegionalCluster := d.GetOk("region"); isRegionalCluster { - return v.(string), nil - } else { - - return getZone(d, config) - } -} - -func getZone(d TerraformResourceData, config *Config) (string, error) { - res, ok := d.GetOk("zone") - if !ok { - if config.Zone != "" { - return config.Zone, nil - } - return "", regional_utils_fmt.Errorf("Cannot determine zone: set in this resource, or set provider-level zone.") - } - return GetResourceNameFromSelfLink(res.(string)), nil -} - -func resourceAccessContextManagerAccessLevel() *resource_access_context_manager_access_level_schema.Resource { - return &resource_access_context_manager_access_level_schema.Resource{ - Create: resourceAccessContextManagerAccessLevelCreate, - Read: resourceAccessContextManagerAccessLevelRead, - Update: resourceAccessContextManagerAccessLevelUpdate, - Delete: resourceAccessContextManagerAccessLevelDelete, - - Importer: &resource_access_context_manager_access_level_schema.ResourceImporter{ - State: resourceAccessContextManagerAccessLevelImport, - }, - - Timeouts: &resource_access_context_manager_access_level_schema.ResourceTimeout{ - Create: resource_access_context_manager_access_level_schema.DefaultTimeout(6 * resource_access_context_manager_access_level_time.Minute), - Update: resource_access_context_manager_access_level_schema.DefaultTimeout(6 * resource_access_context_manager_access_level_time.Minute), - Delete: resource_access_context_manager_access_level_schema.DefaultTimeout(6 * resource_access_context_manager_access_level_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_access_level_schema.Schema{ - "name": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name for the Access Level. The short_name component must begin -with a letter and only include alphanumeric and '_'. -Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, - }, - "parent": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The AccessPolicy this AccessLevel lives in. -Format: accessPolicies/{policy_id}`, - }, - "title": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Required: true, - Description: `Human readable title. Must be unique within the Policy.`, - }, - "basic": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `A set of predefined conditions for the access level and a combining function.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_level_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_schema.Schema{ - "conditions": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Required: true, - Description: `A set of requirements for the AccessLevel to be granted.`, - MinItems: 1, - Elem: &resource_access_context_manager_access_level_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_schema.Schema{ - "device_policy": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `Device specific restrictions, all restrictions must hold for -the Condition to be true. If not specified, all devices are -allowed.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_level_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_schema.Schema{ - "allowed_device_management_levels": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `A list of allowed device management levels. -An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"]`, - Elem: &resource_access_context_manager_access_level_schema.Schema{ - Type: resource_access_context_manager_access_level_schema.TypeString, - ValidateFunc: resource_access_context_manager_access_level_validation.StringInSlice([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}, false), - }, - }, - "allowed_encryption_statuses": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `A list of allowed encryptions statuses. -An empty list allows all statuses. Possible values: ["ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"]`, - Elem: &resource_access_context_manager_access_level_schema.Schema{ - Type: resource_access_context_manager_access_level_schema.TypeString, - ValidateFunc: resource_access_context_manager_access_level_validation.StringInSlice([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}, false), - }, - }, - "os_constraints": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `A list of allowed OS versions. -An empty list allows all types and all versions.`, - Elem: &resource_access_context_manager_access_level_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_schema.Schema{ - "os_type": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Required: true, - ValidateFunc: resource_access_context_manager_access_level_validation.StringInSlice([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}, false), - Description: `The operating system type of the device. Possible values: ["OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"]`, - }, - "minimum_version": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Optional: true, - Description: `The minimum allowed OS version. If not set, any version -of this OS satisfies the constraint. -Format: "major.minor.patch" such as "10.5.301", "9.2.1".`, - }, - "require_verified_chrome_os": { - Type: resource_access_context_manager_access_level_schema.TypeBool, - Optional: true, - Description: `If you specify DESKTOP_CHROME_OS for osType, you can optionally include requireVerifiedChromeOs to require Chrome Verified Access.`, - }, - }, - }, - }, - "require_admin_approval": { - Type: resource_access_context_manager_access_level_schema.TypeBool, - Optional: true, - Description: `Whether the device needs to be approved by the customer admin.`, - }, - "require_corp_owned": { - Type: resource_access_context_manager_access_level_schema.TypeBool, - Optional: true, - Description: `Whether the device needs to be corp owned.`, - }, - "require_screen_lock": { - Type: resource_access_context_manager_access_level_schema.TypeBool, - Optional: true, - Description: `Whether or not screenlock is required for the DevicePolicy -to be true. Defaults to false.`, - }, - }, - }, - }, - "ip_subnetworks": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `A list of CIDR block IP subnetwork specification. May be IPv4 -or IPv6. -Note that for a CIDR IP address block, the specified IP address -portion must be properly truncated (i.e. all the host bits must -be zero) or the input is considered malformed. For example, -"192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, -for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" -is not. The originating IP of a request must be in one of the -listed subnets in order for this Condition to be true. -If empty, all IP addresses are allowed.`, - Elem: &resource_access_context_manager_access_level_schema.Schema{ - Type: resource_access_context_manager_access_level_schema.TypeString, - }, - }, - "members": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `An allowed list of members (users, service accounts). -Using groups is not supported yet. - -The signed-in user originating the request must be a part of one -of the provided members. If not specified, a request may come -from any user (logged in/not logged in, not present in any -groups, etc.). -Formats: 'user:{emailid}', 'serviceAccount:{emailid}'`, - Elem: &resource_access_context_manager_access_level_schema.Schema{ - Type: resource_access_context_manager_access_level_schema.TypeString, - }, - }, - "negate": { - Type: resource_access_context_manager_access_level_schema.TypeBool, - Optional: true, - Description: `Whether to negate the Condition. If true, the Condition becomes -a NAND over its non-empty fields, each field must be false for -the Condition overall to be satisfied. Defaults to false.`, - }, - "regions": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `The request must originate from one of the provided -countries/regions. -Format: A valid ISO 3166-1 alpha-2 code.`, - Elem: &resource_access_context_manager_access_level_schema.Schema{ - Type: resource_access_context_manager_access_level_schema.TypeString, - }, - }, - "required_access_levels": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `A list of other access levels defined in the same Policy, -referenced by resource name. Referencing an AccessLevel which -does not exist is an error. All access levels listed must be -granted for the Condition to be true. -Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, - Elem: &resource_access_context_manager_access_level_schema.Schema{ - Type: resource_access_context_manager_access_level_schema.TypeString, - }, - }, - }, - }, - }, - "combining_function": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_access_level_validation.StringInSlice([]string{"AND", "OR", ""}, false), - Description: `How the conditions list should be combined to determine if a request -is granted this AccessLevel. If AND is used, each Condition in -conditions must be satisfied for the AccessLevel to be applied. If -OR is used, at least one Condition in conditions must be satisfied -for the AccessLevel to be applied. Default value: "AND" Possible values: ["AND", "OR"]`, - Default: "AND", - }, - }, - }, - ConflictsWith: []string{"custom"}, - }, - "custom": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Optional: true, - Description: `Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. -See CEL spec at: https://github.com/google/cel-spec.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_level_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_schema.Schema{ - "expr": { - Type: resource_access_context_manager_access_level_schema.TypeList, - Required: true, - Description: `Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. -This page details the objects and attributes that are used to the build the CEL expressions for -custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_level_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_schema.Schema{ - "expression": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Optional: true, - Description: `Description of the expression`, - }, - "location": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file name and a position in the file`, - }, - "title": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose.`, - }, - }, - }, - }, - }, - }, - ConflictsWith: []string{"basic"}, - }, - "description": { - Type: resource_access_context_manager_access_level_schema.TypeString, - Optional: true, - Description: `Description of the AccessLevel and its use. Does not affect behavior.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerAccessLevelCreate(d *resource_access_context_manager_access_level_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - titleProp, err := expandAccessContextManagerAccessLevelTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(titleProp)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - descriptionProp, err := expandAccessContextManagerAccessLevelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(descriptionProp)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - basicProp, err := expandAccessContextManagerAccessLevelBasic(d.Get("basic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(basicProp)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, basicProp)) { - obj["basic"] = basicProp - } - customProp, err := expandAccessContextManagerAccessLevelCustom(d.Get("custom"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(customProp)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, customProp)) { - obj["custom"] = customProp - } - parentProp, err := expandAccessContextManagerAccessLevelParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(parentProp)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - nameProp, err := expandAccessContextManagerAccessLevelName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(nameProp)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceAccessContextManagerAccessLevelEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels") - if err != nil { - return err - } - - resource_access_context_manager_access_level_log.Printf("[DEBUG] Creating new AccessLevel: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_level_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error creating AccessLevel: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = accessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating AccessLevel", userAgent, - d.Timeout(resource_access_context_manager_access_level_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_access_context_manager_access_level_fmt.Errorf("Error waiting to create AccessLevel: %s", err) - } - - if err := d.Set("name", flattenAccessContextManagerAccessLevelName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_access_context_manager_access_level_log.Printf("[DEBUG] Finished creating AccessLevel %q: %#v", d.Id(), res) - - return resourceAccessContextManagerAccessLevelRead(d, meta) -} - -func resourceAccessContextManagerAccessLevelRead(d *resource_access_context_manager_access_level_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_access_level_fmt.Sprintf("AccessContextManagerAccessLevel %q", d.Id())) - } - - if err := d.Set("title", flattenAccessContextManagerAccessLevelTitle(res["title"], d, config)); err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error reading AccessLevel: %s", err) - } - if err := d.Set("description", flattenAccessContextManagerAccessLevelDescription(res["description"], d, config)); err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error reading AccessLevel: %s", err) - } - if err := d.Set("basic", flattenAccessContextManagerAccessLevelBasic(res["basic"], d, config)); err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error reading AccessLevel: %s", err) - } - if err := d.Set("custom", flattenAccessContextManagerAccessLevelCustom(res["custom"], d, config)); err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error reading AccessLevel: %s", err) - } - if err := d.Set("name", flattenAccessContextManagerAccessLevelName(res["name"], d, config)); err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error reading AccessLevel: %s", err) - } - - return nil -} - -func resourceAccessContextManagerAccessLevelUpdate(d *resource_access_context_manager_access_level_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - titleProp, err := expandAccessContextManagerAccessLevelTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - descriptionProp, err := expandAccessContextManagerAccessLevelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - basicProp, err := expandAccessContextManagerAccessLevelBasic(d.Get("basic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, basicProp)) { - obj["basic"] = basicProp - } - customProp, err := expandAccessContextManagerAccessLevelCustom(d.Get("custom"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom"); !isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_access_level_reflect.DeepEqual(v, customProp)) { - obj["custom"] = customProp - } - - obj, err = resourceAccessContextManagerAccessLevelEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - resource_access_context_manager_access_level_log.Printf("[DEBUG] Updating AccessLevel %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("title") { - updateMask = append(updateMask, "title") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("basic") { - updateMask = append(updateMask, "basic") - } - - if d.HasChange("custom") { - updateMask = append(updateMask, "custom") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_access_context_manager_access_level_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_level_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_access_level_fmt.Errorf("Error updating AccessLevel %q: %s", d.Id(), err) - } else { - resource_access_context_manager_access_level_log.Printf("[DEBUG] Finished updating AccessLevel %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating AccessLevel", userAgent, - d.Timeout(resource_access_context_manager_access_level_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerAccessLevelRead(d, meta) -} - -func resourceAccessContextManagerAccessLevelDelete(d *resource_access_context_manager_access_level_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_access_context_manager_access_level_log.Printf("[DEBUG] Deleting AccessLevel %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_level_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AccessLevel") - } - - err = accessContextManagerOperationWaitTime( - config, res, "Deleting AccessLevel", userAgent, - d.Timeout(resource_access_context_manager_access_level_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_access_context_manager_access_level_log.Printf("[DEBUG] Finished deleting AccessLevel %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerAccessLevelImport(d *resource_access_context_manager_access_level_schema.ResourceData, meta interface{}) ([]*resource_access_context_manager_access_level_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - stringParts := resource_access_context_manager_access_level_strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, resource_access_context_manager_access_level_fmt.Errorf("Error parsing parent name. Should be in form accessPolicies/{{policy_id}}/accessLevels/{{short_name}}") - } - if err := d.Set("parent", resource_access_context_manager_access_level_fmt.Sprintf("%s/%s", stringParts[0], stringParts[1])); err != nil { - return nil, resource_access_context_manager_access_level_fmt.Errorf("Error setting parent, %s", err) - } - return []*resource_access_context_manager_access_level_schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerAccessLevelTitle(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelDescription(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasic(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["combining_function"] = - flattenAccessContextManagerAccessLevelBasicCombiningFunction(original["combiningFunction"], d, config) - transformed["conditions"] = - flattenAccessContextManagerAccessLevelBasicConditions(original["conditions"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelBasicCombiningFunction(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_access_context_manager_access_level_reflect.ValueOf(v)) { - return "AND" - } - - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditions(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ip_subnetworks": flattenAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(original["ipSubnetworks"], d, config), - "required_access_levels": flattenAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(original["requiredAccessLevels"], d, config), - "members": flattenAccessContextManagerAccessLevelBasicConditionsMembers(original["members"], d, config), - "negate": flattenAccessContextManagerAccessLevelBasicConditionsNegate(original["negate"], d, config), - "device_policy": flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicy(original["devicePolicy"], d, config), - "regions": flattenAccessContextManagerAccessLevelBasicConditionsRegions(original["regions"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsMembers(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsNegate(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["require_screen_lock"] = - flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(original["requireScreenlock"], d, config) - transformed["allowed_encryption_statuses"] = - flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(original["allowedEncryptionStatuses"], d, config) - transformed["allowed_device_management_levels"] = - flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(original["allowedDeviceManagementLevels"], d, config) - transformed["os_constraints"] = - flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(original["osConstraints"], d, config) - transformed["require_admin_approval"] = - flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(original["requireAdminApproval"], d, config) - transformed["require_corp_owned"] = - flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(original["requireCorpOwned"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "minimum_version": flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(original["minimumVersion"], d, config), - "require_verified_chrome_os": flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(original["requireVerifiedChromeOs"], d, config), - "os_type": flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(original["osType"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelBasicConditionsRegions(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelCustom(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expr"] = - flattenAccessContextManagerAccessLevelCustomExpr(original["expr"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelCustomExpr(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenAccessContextManagerAccessLevelCustomExprExpression(original["expression"], d, config) - transformed["title"] = - flattenAccessContextManagerAccessLevelCustomExprTitle(original["title"], d, config) - transformed["description"] = - flattenAccessContextManagerAccessLevelCustomExprDescription(original["description"], d, config) - transformed["location"] = - flattenAccessContextManagerAccessLevelCustomExprLocation(original["location"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelCustomExprExpression(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelCustomExprTitle(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelCustomExprDescription(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelCustomExprLocation(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelName(v interface{}, d *resource_access_context_manager_access_level_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerAccessLevelTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCombiningFunction, err := expandAccessContextManagerAccessLevelBasicCombiningFunction(original["combining_function"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedCombiningFunction); val.IsValid() && !isEmptyValue(val) { - transformed["combiningFunction"] = transformedCombiningFunction - } - - transformedConditions, err := expandAccessContextManagerAccessLevelBasicConditions(original["conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { - transformed["conditions"] = transformedConditions - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelBasicCombiningFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpSubnetworks, err := expandAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(original["ip_subnetworks"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedIpSubnetworks); val.IsValid() && !isEmptyValue(val) { - transformed["ipSubnetworks"] = transformedIpSubnetworks - } - - transformedRequiredAccessLevels, err := expandAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(original["required_access_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedRequiredAccessLevels); val.IsValid() && !isEmptyValue(val) { - transformed["requiredAccessLevels"] = transformedRequiredAccessLevels - } - - transformedMembers, err := expandAccessContextManagerAccessLevelBasicConditionsMembers(original["members"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedMembers); val.IsValid() && !isEmptyValue(val) { - transformed["members"] = transformedMembers - } - - transformedNegate, err := expandAccessContextManagerAccessLevelBasicConditionsNegate(original["negate"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedNegate); val.IsValid() && !isEmptyValue(val) { - transformed["negate"] = transformedNegate - } - - transformedDevicePolicy, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicy(original["device_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedDevicePolicy); val.IsValid() && !isEmptyValue(val) { - transformed["devicePolicy"] = transformedDevicePolicy - } - - transformedRegions, err := expandAccessContextManagerAccessLevelBasicConditionsRegions(original["regions"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedRegions); val.IsValid() && !isEmptyValue(val) { - transformed["regions"] = transformedRegions - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsIpSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsRequiredAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsMembers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsNegate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequireScreenLock, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(original["require_screen_lock"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !isEmptyValue(val) { - transformed["requireScreenlock"] = transformedRequireScreenLock - } - - transformedAllowedEncryptionStatuses, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(original["allowed_encryption_statuses"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !isEmptyValue(val) { - transformed["allowedEncryptionStatuses"] = transformedAllowedEncryptionStatuses - } - - transformedAllowedDeviceManagementLevels, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(original["allowed_device_management_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !isEmptyValue(val) { - transformed["allowedDeviceManagementLevels"] = transformedAllowedDeviceManagementLevels - } - - transformedOsConstraints, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(original["os_constraints"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedOsConstraints); val.IsValid() && !isEmptyValue(val) { - transformed["osConstraints"] = transformedOsConstraints - } - - transformedRequireAdminApproval, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(original["require_admin_approval"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !isEmptyValue(val) { - transformed["requireAdminApproval"] = transformedRequireAdminApproval - } - - transformedRequireCorpOwned, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(original["require_corp_owned"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !isEmptyValue(val) { - transformed["requireCorpOwned"] = transformedRequireCorpOwned - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinimumVersion, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(original["minimum_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !isEmptyValue(val) { - transformed["minimumVersion"] = transformedMinimumVersion - } - - transformedRequireVerifiedChromeOs, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(original["require_verified_chrome_os"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedRequireVerifiedChromeOs); val.IsValid() && !isEmptyValue(val) { - transformed["requireVerifiedChromeOs"] = transformedRequireVerifiedChromeOs - } - - transformedOsType, err := expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(original["os_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedOsType); val.IsValid() && !isEmptyValue(val) { - transformed["osType"] = transformedOsType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsRequireVerifiedChromeOs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelBasicConditionsRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelCustom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpr, err := expandAccessContextManagerAccessLevelCustomExpr(original["expr"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedExpr); val.IsValid() && !isEmptyValue(val) { - transformed["expr"] = transformedExpr - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelCustomExpr(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandAccessContextManagerAccessLevelCustomExprExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandAccessContextManagerAccessLevelCustomExprTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandAccessContextManagerAccessLevelCustomExprDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandAccessContextManagerAccessLevelCustomExprLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelCustomExprExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelCustomExprTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelCustomExprDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelCustomExprLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAccessContextManagerAccessLevelEncoder(d *resource_access_context_manager_access_level_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "parent") - return obj, nil -} - -func resourceAccessContextManagerAccessLevelCondition() *resource_access_context_manager_access_level_condition_schema.Resource { - return &resource_access_context_manager_access_level_condition_schema.Resource{ - Create: resourceAccessContextManagerAccessLevelConditionCreate, - Read: resourceAccessContextManagerAccessLevelConditionRead, - Delete: resourceAccessContextManagerAccessLevelConditionDelete, - - Timeouts: &resource_access_context_manager_access_level_condition_schema.ResourceTimeout{ - Create: resource_access_context_manager_access_level_condition_schema.DefaultTimeout(4 * resource_access_context_manager_access_level_condition_time.Minute), - Delete: resource_access_context_manager_access_level_condition_schema.DefaultTimeout(4 * resource_access_context_manager_access_level_condition_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_access_level_condition_schema.Schema{ - "access_level": { - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Access Level to add this condition to.`, - }, - "device_policy": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Device specific restrictions, all restrictions must hold for -the Condition to be true. If not specified, all devices are -allowed.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_level_condition_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_condition_schema.Schema{ - "allowed_device_management_levels": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of allowed device management levels. -An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"]`, - Elem: &resource_access_context_manager_access_level_condition_schema.Schema{ - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - ValidateFunc: resource_access_context_manager_access_level_condition_validation.StringInSlice([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}, false), - }, - }, - "allowed_encryption_statuses": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of allowed encryptions statuses. -An empty list allows all statuses. Possible values: ["ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"]`, - Elem: &resource_access_context_manager_access_level_condition_schema.Schema{ - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - ValidateFunc: resource_access_context_manager_access_level_condition_validation.StringInSlice([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}, false), - }, - }, - "os_constraints": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of allowed OS versions. -An empty list allows all types and all versions.`, - Elem: &resource_access_context_manager_access_level_condition_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_level_condition_schema.Schema{ - "os_type": { - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_access_context_manager_access_level_condition_validation.StringInSlice([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}, false), - Description: `The operating system type of the device. Possible values: ["OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"]`, - }, - "minimum_version": { - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The minimum allowed OS version. If not set, any version -of this OS satisfies the constraint. -Format: "major.minor.patch" such as "10.5.301", "9.2.1".`, - }, - }, - }, - }, - "require_admin_approval": { - Type: resource_access_context_manager_access_level_condition_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the device needs to be approved by the customer admin.`, - }, - "require_corp_owned": { - Type: resource_access_context_manager_access_level_condition_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the device needs to be corp owned.`, - }, - "require_screen_lock": { - Type: resource_access_context_manager_access_level_condition_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether or not screenlock is required for the DevicePolicy -to be true. Defaults to false.`, - }, - }, - }, - }, - "ip_subnetworks": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of CIDR block IP subnetwork specification. May be IPv4 -or IPv6. -Note that for a CIDR IP address block, the specified IP address -portion must be properly truncated (i.e. all the host bits must -be zero) or the input is considered malformed. For example, -"192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, -for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" -is not. The originating IP of a request must be in one of the -listed subnets in order for this Condition to be true. -If empty, all IP addresses are allowed.`, - Elem: &resource_access_context_manager_access_level_condition_schema.Schema{ - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - }, - }, - "members": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An allowed list of members (users, service accounts). -Using groups is not supported yet. - -The signed-in user originating the request must be a part of one -of the provided members. If not specified, a request may come -from any user (logged in/not logged in, not present in any -groups, etc.). -Formats: 'user:{emailid}', 'serviceAccount:{emailid}'`, - Elem: &resource_access_context_manager_access_level_condition_schema.Schema{ - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - }, - }, - "negate": { - Type: resource_access_context_manager_access_level_condition_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to negate the Condition. If true, the Condition becomes -a NAND over its non-empty fields, each field must be false for -the Condition overall to be satisfied. Defaults to false.`, - }, - "regions": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The request must originate from one of the provided -countries/regions. -Format: A valid ISO 3166-1 alpha-2 code.`, - Elem: &resource_access_context_manager_access_level_condition_schema.Schema{ - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - }, - }, - "required_access_levels": { - Type: resource_access_context_manager_access_level_condition_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of other access levels defined in the same Policy, -referenced by resource name. Referencing an AccessLevel which -does not exist is an error. All access levels listed must be -granted for the Condition to be true. -Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, - Elem: &resource_access_context_manager_access_level_condition_schema.Schema{ - Type: resource_access_context_manager_access_level_condition_schema.TypeString, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerAccessLevelConditionCreate(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - ipSubnetworksProp, err := expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(d.Get("ip_subnetworks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_subnetworks"); !isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(ipSubnetworksProp)) && (ok || !resource_access_context_manager_access_level_condition_reflect.DeepEqual(v, ipSubnetworksProp)) { - obj["ipSubnetworks"] = ipSubnetworksProp - } - requiredAccessLevelsProp, err := expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(d.Get("required_access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("required_access_levels"); !isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(requiredAccessLevelsProp)) && (ok || !resource_access_context_manager_access_level_condition_reflect.DeepEqual(v, requiredAccessLevelsProp)) { - obj["requiredAccessLevels"] = requiredAccessLevelsProp - } - membersProp, err := expandNestedAccessContextManagerAccessLevelConditionMembers(d.Get("members"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("members"); !isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(membersProp)) && (ok || !resource_access_context_manager_access_level_condition_reflect.DeepEqual(v, membersProp)) { - obj["members"] = membersProp - } - negateProp, err := expandNestedAccessContextManagerAccessLevelConditionNegate(d.Get("negate"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("negate"); !isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(negateProp)) && (ok || !resource_access_context_manager_access_level_condition_reflect.DeepEqual(v, negateProp)) { - obj["negate"] = negateProp - } - devicePolicyProp, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(d.Get("device_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("device_policy"); !isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(devicePolicyProp)) && (ok || !resource_access_context_manager_access_level_condition_reflect.DeepEqual(v, devicePolicyProp)) { - obj["devicePolicy"] = devicePolicyProp - } - regionsProp, err := expandNestedAccessContextManagerAccessLevelConditionRegions(d.Get("regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("regions"); !isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(regionsProp)) && (ok || !resource_access_context_manager_access_level_condition_reflect.DeepEqual(v, regionsProp)) { - obj["regions"] = regionsProp - } - - lockName, err := replaceVars(d, config, "{{access_level}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return err - } - - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Creating new AccessLevelCondition: %#v", obj) - - obj, err = resourceAccessContextManagerAccessLevelConditionPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - url, err = addQueryParams(url, map[string]string{"updateMask": "basic.conditions"}) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_level_condition_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error creating AccessLevelCondition: %s", err) - } - - id, err := replaceVars(d, config, "{{access_level}}") - if err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceAccessContextManagerAccessLevelConditionPollRead(d, meta), PollCheckForExistence, "Creating AccessLevelCondition", d.Timeout(resource_access_context_manager_access_level_condition_schema.TimeoutCreate), 1) - if err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error waiting to create AccessLevelCondition: %s", err) - } - - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Finished creating AccessLevelCondition %q: %#v", d.Id(), res) - - return resourceAccessContextManagerAccessLevelConditionRead(d, meta) -} - -func resourceAccessContextManagerAccessLevelConditionPollRead(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return nil, err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = flattenNestedAccessContextManagerAccessLevelCondition(d, meta, res) - if err != nil { - return nil, err - } - - if res == nil { - - return nil, &resource_access_context_manager_access_level_condition_googleapi.Error{ - Code: 404, - Message: "nested object AccessContextManagerAccessLevelCondition not found", - } - } - - return res, nil - } -} - -func resourceAccessContextManagerAccessLevelConditionRead(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_access_level_condition_fmt.Sprintf("AccessContextManagerAccessLevelCondition %q", d.Id())) - } - - res, err = flattenNestedAccessContextManagerAccessLevelCondition(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Removing AccessContextManagerAccessLevelCondition because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("ip_subnetworks", flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(res["ipSubnetworks"], d, config)); err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("required_access_levels", flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(res["requiredAccessLevels"], d, config)); err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("members", flattenNestedAccessContextManagerAccessLevelConditionMembers(res["members"], d, config)); err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("negate", flattenNestedAccessContextManagerAccessLevelConditionNegate(res["negate"], d, config)); err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("device_policy", flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(res["devicePolicy"], d, config)); err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - if err := d.Set("regions", flattenNestedAccessContextManagerAccessLevelConditionRegions(res["regions"], d, config)); err != nil { - return resource_access_context_manager_access_level_condition_fmt.Errorf("Error reading AccessLevelCondition: %s", err) - } - - return nil -} - -func resourceAccessContextManagerAccessLevelConditionDelete(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "{{access_level}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceAccessContextManagerAccessLevelConditionPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "AccessLevelCondition") - } - url, err = addQueryParams(url, map[string]string{"updateMask": "basic.conditions"}) - if err != nil { - return err - } - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Deleting AccessLevelCondition %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_level_condition_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AccessLevelCondition") - } - - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Finished deleting AccessLevelCondition %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionMembers(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionNegate(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["require_screen_lock"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(original["requireScreenlock"], d, config) - transformed["allowed_encryption_statuses"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(original["allowedEncryptionStatuses"], d, config) - transformed["allowed_device_management_levels"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(original["allowedDeviceManagementLevels"], d, config) - transformed["os_constraints"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(original["osConstraints"], d, config) - transformed["require_admin_approval"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(original["requireAdminApproval"], d, config) - transformed["require_corp_owned"] = - flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(original["requireCorpOwned"], d, config) - return []interface{}{transformed} -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "minimum_version": flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(original["minimumVersion"], d, config), - "os_type": flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(original["osType"], d, config), - }) - } - return transformed -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedAccessContextManagerAccessLevelConditionRegions(v interface{}, d *resource_access_context_manager_access_level_condition_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionMembers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionNegate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequireScreenLock, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(original["require_screen_lock"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !isEmptyValue(val) { - transformed["requireScreenlock"] = transformedRequireScreenLock - } - - transformedAllowedEncryptionStatuses, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(original["allowed_encryption_statuses"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !isEmptyValue(val) { - transformed["allowedEncryptionStatuses"] = transformedAllowedEncryptionStatuses - } - - transformedAllowedDeviceManagementLevels, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(original["allowed_device_management_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !isEmptyValue(val) { - transformed["allowedDeviceManagementLevels"] = transformedAllowedDeviceManagementLevels - } - - transformedOsConstraints, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(original["os_constraints"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedOsConstraints); val.IsValid() && !isEmptyValue(val) { - transformed["osConstraints"] = transformedOsConstraints - } - - transformedRequireAdminApproval, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(original["require_admin_approval"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !isEmptyValue(val) { - transformed["requireAdminApproval"] = transformedRequireAdminApproval - } - - transformedRequireCorpOwned, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(original["require_corp_owned"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !isEmptyValue(val) { - transformed["requireCorpOwned"] = transformedRequireCorpOwned - } - - return transformed, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireScreenLock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedEncryptionStatuses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyAllowedDeviceManagementLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinimumVersion, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(original["minimum_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !isEmptyValue(val) { - transformed["minimumVersion"] = transformedMinimumVersion - } - - transformedOsType, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(original["os_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_level_condition_reflect.ValueOf(transformedOsType); val.IsValid() && !isEmptyValue(val) { - transformed["osType"] = transformedOsType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsMinimumVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyOsConstraintsOsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireAdminApproval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionDevicePolicyRequireCorpOwned(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedAccessContextManagerAccessLevelConditionRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedAccessContextManagerAccessLevelCondition(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["basic"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["conditions"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_access_context_manager_access_level_condition_fmt.Errorf("expected list or map for value basic.conditions. Actual value: %v", v) - } - - _, item, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedIpSubnetworks, err := expandNestedAccessContextManagerAccessLevelConditionIpSubnetworks(d.Get("ip_subnetworks"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIpSubnetworks := flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(expectedIpSubnetworks, d, meta.(*Config)) - expectedRequiredAccessLevels, err := expandNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(d.Get("required_access_levels"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedRequiredAccessLevels := flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(expectedRequiredAccessLevels, d, meta.(*Config)) - expectedMembers, err := expandNestedAccessContextManagerAccessLevelConditionMembers(d.Get("members"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedMembers := flattenNestedAccessContextManagerAccessLevelConditionMembers(expectedMembers, d, meta.(*Config)) - expectedNegate, err := expandNestedAccessContextManagerAccessLevelConditionNegate(d.Get("negate"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedNegate := flattenNestedAccessContextManagerAccessLevelConditionNegate(expectedNegate, d, meta.(*Config)) - expectedDevicePolicy, err := expandNestedAccessContextManagerAccessLevelConditionDevicePolicy(d.Get("device_policy"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedDevicePolicy := flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(expectedDevicePolicy, d, meta.(*Config)) - expectedRegions, err := expandNestedAccessContextManagerAccessLevelConditionRegions(d.Get("regions"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedRegions := flattenNestedAccessContextManagerAccessLevelConditionRegions(expectedRegions, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemIpSubnetworks := flattenNestedAccessContextManagerAccessLevelConditionIpSubnetworks(item["ipSubnetworks"], d, meta.(*Config)) - - if !(isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(itemIpSubnetworks)) && isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(expectedFlattenedIpSubnetworks))) && !resource_access_context_manager_access_level_condition_reflect.DeepEqual(itemIpSubnetworks, expectedFlattenedIpSubnetworks) { - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Skipping item with ipSubnetworks= %#v, looking for %#v)", itemIpSubnetworks, expectedFlattenedIpSubnetworks) - continue - } - itemRequiredAccessLevels := flattenNestedAccessContextManagerAccessLevelConditionRequiredAccessLevels(item["requiredAccessLevels"], d, meta.(*Config)) - - if !(isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(itemRequiredAccessLevels)) && isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(expectedFlattenedRequiredAccessLevels))) && !resource_access_context_manager_access_level_condition_reflect.DeepEqual(itemRequiredAccessLevels, expectedFlattenedRequiredAccessLevels) { - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Skipping item with requiredAccessLevels= %#v, looking for %#v)", itemRequiredAccessLevels, expectedFlattenedRequiredAccessLevels) - continue - } - itemMembers := flattenNestedAccessContextManagerAccessLevelConditionMembers(item["members"], d, meta.(*Config)) - - if !(isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(itemMembers)) && isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(expectedFlattenedMembers))) && !resource_access_context_manager_access_level_condition_reflect.DeepEqual(itemMembers, expectedFlattenedMembers) { - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Skipping item with members= %#v, looking for %#v)", itemMembers, expectedFlattenedMembers) - continue - } - itemNegate := flattenNestedAccessContextManagerAccessLevelConditionNegate(item["negate"], d, meta.(*Config)) - - if !(isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(itemNegate)) && isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(expectedFlattenedNegate))) && !resource_access_context_manager_access_level_condition_reflect.DeepEqual(itemNegate, expectedFlattenedNegate) { - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Skipping item with negate= %#v, looking for %#v)", itemNegate, expectedFlattenedNegate) - continue - } - itemDevicePolicy := flattenNestedAccessContextManagerAccessLevelConditionDevicePolicy(item["devicePolicy"], d, meta.(*Config)) - - if !(isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(itemDevicePolicy)) && isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(expectedFlattenedDevicePolicy))) && !resource_access_context_manager_access_level_condition_reflect.DeepEqual(itemDevicePolicy, expectedFlattenedDevicePolicy) { - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Skipping item with devicePolicy= %#v, looking for %#v)", itemDevicePolicy, expectedFlattenedDevicePolicy) - continue - } - itemRegions := flattenNestedAccessContextManagerAccessLevelConditionRegions(item["regions"], d, meta.(*Config)) - - if !(isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(itemRegions)) && isEmptyValue(resource_access_context_manager_access_level_condition_reflect.ValueOf(expectedFlattenedRegions))) && !resource_access_context_manager_access_level_condition_reflect.DeepEqual(itemRegions, expectedFlattenedRegions) { - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Skipping item with regions= %#v, looking for %#v)", itemRegions, expectedFlattenedRegions) - continue - } - resource_access_context_manager_access_level_condition_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceAccessContextManagerAccessLevelConditionPatchCreateEncoder(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerAccessLevelConditionListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - if found != nil { - return nil, resource_access_context_manager_access_level_condition_fmt.Errorf("Unable to create AccessLevelCondition, existing object already found: %+v", found) - } - - res := map[string]interface{}{ - "conditions": append(currItems, obj), - } - wrapped := map[string]interface{}{ - "basic": res, - } - res = wrapped - - return res, nil -} - -func resourceAccessContextManagerAccessLevelConditionPatchDeleteEncoder(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerAccessLevelConditionListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceAccessContextManagerAccessLevelConditionFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - - return nil, &resource_access_context_manager_access_level_condition_googleapi.Error{ - Code: 404, - Message: "AccessLevelCondition not found in list", - } - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "conditions": updatedItems, - } - wrapped := map[string]interface{}{ - "basic": res, - } - res = wrapped - - return res, nil -} - -func resourceAccessContextManagerAccessLevelConditionListForPatch(d *resource_access_context_manager_access_level_condition_schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{access_level}}") - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", "", url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - if v, ok = res["basic"]; ok && v != nil { - res = v.(map[string]interface{}) - } else { - return nil, nil - } - - v, ok = res["conditions"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, resource_access_context_manager_access_level_condition_fmt.Errorf(`expected list for nested field "conditions"`) - } - return ls, nil - } - return nil, nil -} - -func resourceAccessContextManagerAccessLevels() *resource_access_context_manager_access_levels_schema.Resource { - return &resource_access_context_manager_access_levels_schema.Resource{ - Create: resourceAccessContextManagerAccessLevelsCreate, - Read: resourceAccessContextManagerAccessLevelsRead, - Update: resourceAccessContextManagerAccessLevelsUpdate, - Delete: resourceAccessContextManagerAccessLevelsDelete, - - Importer: &resource_access_context_manager_access_levels_schema.ResourceImporter{ - State: resourceAccessContextManagerAccessLevelsImport, - }, - - Timeouts: &resource_access_context_manager_access_levels_schema.ResourceTimeout{ - Create: resource_access_context_manager_access_levels_schema.DefaultTimeout(6 * resource_access_context_manager_access_levels_time.Minute), - Update: resource_access_context_manager_access_levels_schema.DefaultTimeout(6 * resource_access_context_manager_access_levels_time.Minute), - Delete: resource_access_context_manager_access_levels_schema.DefaultTimeout(6 * resource_access_context_manager_access_levels_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "parent": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The AccessPolicy this AccessLevel lives in. -Format: accessPolicies/{policy_id}`, - }, - "access_levels": { - Type: resource_access_context_manager_access_levels_schema.TypeSet, - Optional: true, - Description: `The desired Access Levels that should replace all existing Access Levels in the Access Policy.`, - Elem: accesscontextmanagerAccessLevelsAccessLevelsSchema(), - }, - }, - UseJSONNumber: true, - } -} - -func accesscontextmanagerAccessLevelsAccessLevelsSchema() *resource_access_context_manager_access_levels_schema.Resource { - return &resource_access_context_manager_access_levels_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "name": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name for the Access Level. The short_name component must begin -with a letter and only include alphanumeric and '_'. -Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, - }, - "title": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Required: true, - Description: `Human readable title. Must be unique within the Policy.`, - }, - "basic": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `A set of predefined conditions for the access level and a combining function.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_levels_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "conditions": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Required: true, - Description: `A set of requirements for the AccessLevel to be granted.`, - MinItems: 1, - Elem: &resource_access_context_manager_access_levels_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "device_policy": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `Device specific restrictions, all restrictions must hold for -the Condition to be true. If not specified, all devices are -allowed.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_levels_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "allowed_device_management_levels": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `A list of allowed device management levels. -An empty list allows all management levels. Possible values: ["MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"]`, - Elem: &resource_access_context_manager_access_levels_schema.Schema{ - Type: resource_access_context_manager_access_levels_schema.TypeString, - ValidateFunc: resource_access_context_manager_access_levels_validation.StringInSlice([]string{"MANAGEMENT_UNSPECIFIED", "NONE", "BASIC", "COMPLETE"}, false), - }, - }, - "allowed_encryption_statuses": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `A list of allowed encryptions statuses. -An empty list allows all statuses. Possible values: ["ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"]`, - Elem: &resource_access_context_manager_access_levels_schema.Schema{ - Type: resource_access_context_manager_access_levels_schema.TypeString, - ValidateFunc: resource_access_context_manager_access_levels_validation.StringInSlice([]string{"ENCRYPTION_UNSPECIFIED", "ENCRYPTION_UNSUPPORTED", "UNENCRYPTED", "ENCRYPTED"}, false), - }, - }, - "os_constraints": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `A list of allowed OS versions. -An empty list allows all types and all versions.`, - Elem: &resource_access_context_manager_access_levels_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "os_type": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Required: true, - ValidateFunc: resource_access_context_manager_access_levels_validation.StringInSlice([]string{"OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"}, false), - Description: `The operating system type of the device. Possible values: ["OS_UNSPECIFIED", "DESKTOP_MAC", "DESKTOP_WINDOWS", "DESKTOP_LINUX", "DESKTOP_CHROME_OS", "ANDROID", "IOS"]`, - }, - "minimum_version": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Optional: true, - Description: `The minimum allowed OS version. If not set, any version -of this OS satisfies the constraint. -Format: "major.minor.patch" such as "10.5.301", "9.2.1".`, - }, - }, - }, - }, - "require_admin_approval": { - Type: resource_access_context_manager_access_levels_schema.TypeBool, - Optional: true, - Description: `Whether the device needs to be approved by the customer admin.`, - }, - "require_corp_owned": { - Type: resource_access_context_manager_access_levels_schema.TypeBool, - Optional: true, - Description: `Whether the device needs to be corp owned.`, - }, - "require_screen_lock": { - Type: resource_access_context_manager_access_levels_schema.TypeBool, - Optional: true, - Description: `Whether or not screenlock is required for the DevicePolicy -to be true. Defaults to false.`, - }, - }, - }, - }, - "ip_subnetworks": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `A list of CIDR block IP subnetwork specification. May be IPv4 -or IPv6. -Note that for a CIDR IP address block, the specified IP address -portion must be properly truncated (i.e. all the host bits must -be zero) or the input is considered malformed. For example, -"192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, -for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" -is not. The originating IP of a request must be in one of the -listed subnets in order for this Condition to be true. -If empty, all IP addresses are allowed.`, - Elem: &resource_access_context_manager_access_levels_schema.Schema{ - Type: resource_access_context_manager_access_levels_schema.TypeString, - }, - }, - "members": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `An allowed list of members (users, service accounts). -Using groups is not supported yet. - -The signed-in user originating the request must be a part of one -of the provided members. If not specified, a request may come -from any user (logged in/not logged in, not present in any -groups, etc.). -Formats: 'user:{emailid}', 'serviceAccount:{emailid}'`, - Elem: &resource_access_context_manager_access_levels_schema.Schema{ - Type: resource_access_context_manager_access_levels_schema.TypeString, - }, - }, - "negate": { - Type: resource_access_context_manager_access_levels_schema.TypeBool, - Optional: true, - Description: `Whether to negate the Condition. If true, the Condition becomes -a NAND over its non-empty fields, each field must be false for -the Condition overall to be satisfied. Defaults to false.`, - }, - "regions": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `The request must originate from one of the provided -countries/regions. -Format: A valid ISO 3166-1 alpha-2 code.`, - Elem: &resource_access_context_manager_access_levels_schema.Schema{ - Type: resource_access_context_manager_access_levels_schema.TypeString, - }, - }, - "required_access_levels": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `A list of other access levels defined in the same Policy, -referenced by resource name. Referencing an AccessLevel which -does not exist is an error. All access levels listed must be -granted for the Condition to be true. -Format: accessPolicies/{policy_id}/accessLevels/{short_name}`, - Elem: &resource_access_context_manager_access_levels_schema.Schema{ - Type: resource_access_context_manager_access_levels_schema.TypeString, - }, - }, - }, - }, - }, - "combining_function": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_access_levels_validation.StringInSlice([]string{"AND", "OR", ""}, false), - Description: `How the conditions list should be combined to determine if a request -is granted this AccessLevel. If AND is used, each Condition in -conditions must be satisfied for the AccessLevel to be applied. If -OR is used, at least one Condition in conditions must be satisfied -for the AccessLevel to be applied. Default value: "AND" Possible values: ["AND", "OR"]`, - Default: "AND", - }, - }, - }, - }, - "custom": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Optional: true, - Description: `Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. -See CEL spec at: https://github.com/google/cel-spec.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_levels_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "expr": { - Type: resource_access_context_manager_access_levels_schema.TypeList, - Required: true, - Description: `Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. -This page details the objects and attributes that are used to the build the CEL expressions for -custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec.`, - MaxItems: 1, - Elem: &resource_access_context_manager_access_levels_schema.Resource{ - Schema: map[string]*resource_access_context_manager_access_levels_schema.Schema{ - "expression": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Optional: true, - Description: `Description of the expression`, - }, - "location": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file name and a position in the file`, - }, - "title": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose.`, - }, - }, - }, - }, - }, - }, - }, - "description": { - Type: resource_access_context_manager_access_levels_schema.TypeString, - Optional: true, - Description: `Description of the AccessLevel and its use. Does not affect behavior.`, - }, - }, - } -} - -func resourceAccessContextManagerAccessLevelsCreate(d *resource_access_context_manager_access_levels_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - accessLevelsProp, err := expandAccessContextManagerAccessLevelsAccessLevels(d.Get("access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(resource_access_context_manager_access_levels_reflect.ValueOf(accessLevelsProp)) && (ok || !resource_access_context_manager_access_levels_reflect.DeepEqual(v, accessLevelsProp)) { - obj["accessLevels"] = accessLevelsProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") - if err != nil { - return err - } - - resource_access_context_manager_access_levels_log.Printf("[DEBUG] Creating new AccessLevels: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_levels_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_access_levels_fmt.Errorf("Error creating AccessLevels: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/accessLevels") - if err != nil { - return resource_access_context_manager_access_levels_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = accessContextManagerOperationWaitTime( - config, res, "Creating AccessLevels", userAgent, - d.Timeout(resource_access_context_manager_access_levels_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_access_context_manager_access_levels_fmt.Errorf("Error waiting to create AccessLevels: %s", err) - } - - resource_access_context_manager_access_levels_log.Printf("[DEBUG] Finished creating AccessLevels %q: %#v", d.Id(), res) - - return resourceAccessContextManagerAccessLevelsRead(d, meta) -} - -func resourceAccessContextManagerAccessLevelsRead(d *resource_access_context_manager_access_levels_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_access_levels_fmt.Sprintf("AccessContextManagerAccessLevels %q", d.Id())) - } - - if err := d.Set("access_levels", flattenAccessContextManagerAccessLevelsAccessLevels(res["accessLevels"], d, config)); err != nil { - return resource_access_context_manager_access_levels_fmt.Errorf("Error reading AccessLevels: %s", err) - } - - return nil -} - -func resourceAccessContextManagerAccessLevelsUpdate(d *resource_access_context_manager_access_levels_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - accessLevelsProp, err := expandAccessContextManagerAccessLevelsAccessLevels(d.Get("access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(resource_access_context_manager_access_levels_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_access_levels_reflect.DeepEqual(v, accessLevelsProp)) { - obj["accessLevels"] = accessLevelsProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") - if err != nil { - return err - } - - resource_access_context_manager_access_levels_log.Printf("[DEBUG] Updating AccessLevels %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_levels_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_access_levels_fmt.Errorf("Error updating AccessLevels %q: %s", d.Id(), err) - } else { - resource_access_context_manager_access_levels_log.Printf("[DEBUG] Finished updating AccessLevels %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating AccessLevels", userAgent, - d.Timeout(resource_access_context_manager_access_levels_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerAccessLevelsRead(d, meta) -} - -func resourceAccessContextManagerAccessLevelsDelete(d *resource_access_context_manager_access_levels_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["accessLevels"] = []string{} - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/accessLevels:replaceAll") - if err != nil { - return err - } - - resource_access_context_manager_access_levels_log.Printf("[DEBUG] Deleting AccessLevels %q: %#v", d.Id(), obj) - res, err := sendRequestWithTimeout(config, "POST", "", url, userAgent, obj, d.Timeout(resource_access_context_manager_access_levels_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_access_levels_fmt.Errorf("Error deleting AccessLevels %q: %s", d.Id(), err) - } else { - resource_access_context_manager_access_levels_log.Printf("[DEBUG] Finished deleting AccessLevels %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating AccessLevels", userAgent, - d.Timeout(resource_access_context_manager_access_levels_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return nil -} - -func resourceAccessContextManagerAccessLevelsImport(d *resource_access_context_manager_access_levels_schema.ResourceData, meta interface{}) ([]*resource_access_context_manager_access_levels_schema.ResourceData, error) { - config := meta.(*Config) - - parts, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/(.+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("parent", resource_access_context_manager_access_levels_fmt.Sprintf("accessPolicies/%s", parts["accessPolicy"])); err != nil { - return nil, resource_access_context_manager_access_levels_fmt.Errorf("Error setting parent: %s", err) - } - return []*resource_access_context_manager_access_levels_schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerAccessLevelsAccessLevels(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_access_context_manager_access_levels_schema.NewSet(resource_access_context_manager_access_levels_schema.HashResource(accesscontextmanagerAccessLevelsAccessLevelsSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "name": flattenAccessContextManagerAccessLevelsAccessLevelsName(original["name"], d, config), - "title": flattenAccessContextManagerAccessLevelsAccessLevelsTitle(original["title"], d, config), - "description": flattenAccessContextManagerAccessLevelsAccessLevelsDescription(original["description"], d, config), - "basic": flattenAccessContextManagerAccessLevelsAccessLevelsBasic(original["basic"], d, config), - "custom": flattenAccessContextManagerAccessLevelsAccessLevelsCustom(original["custom"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsName(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsTitle(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsDescription(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["combining_function"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(original["combiningFunction"], d, config) - transformed["conditions"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditions(original["conditions"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_access_context_manager_access_levels_reflect.ValueOf(v)) { - return "AND" - } - - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ip_subnetworks": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(original["ipSubnetworks"], d, config), - "required_access_levels": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(original["requiredAccessLevels"], d, config), - "members": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(original["members"], d, config), - "negate": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(original["negate"], d, config), - "device_policy": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(original["devicePolicy"], d, config), - "regions": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(original["regions"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["require_screen_lock"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(original["requireScreenlock"], d, config) - transformed["allowed_encryption_statuses"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(original["allowedEncryptionStatuses"], d, config) - transformed["allowed_device_management_levels"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(original["allowedDeviceManagementLevels"], d, config) - transformed["os_constraints"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(original["osConstraints"], d, config) - transformed["require_admin_approval"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(original["requireAdminApproval"], d, config) - transformed["require_corp_owned"] = - flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(original["requireCorpOwned"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "minimum_version": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(original["minimumVersion"], d, config), - "os_type": flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(original["osType"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expr"] = - flattenAccessContextManagerAccessLevelsAccessLevelsCustomExpr(original["expr"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(original["expression"], d, config) - transformed["title"] = - flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(original["title"], d, config) - transformed["description"] = - flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(original["description"], d, config) - transformed["location"] = - flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(original["location"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(v interface{}, d *resource_access_context_manager_access_levels_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerAccessLevelsAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_access_context_manager_access_levels_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAccessContextManagerAccessLevelsAccessLevelsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedTitle, err := expandAccessContextManagerAccessLevelsAccessLevelsTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandAccessContextManagerAccessLevelsAccessLevelsDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedBasic, err := expandAccessContextManagerAccessLevelsAccessLevelsBasic(original["basic"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedBasic); val.IsValid() && !isEmptyValue(val) { - transformed["basic"] = transformedBasic - } - - transformedCustom, err := expandAccessContextManagerAccessLevelsAccessLevelsCustom(original["custom"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedCustom); val.IsValid() && !isEmptyValue(val) { - transformed["custom"] = transformedCustom - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCombiningFunction, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(original["combining_function"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedCombiningFunction); val.IsValid() && !isEmptyValue(val) { - transformed["combiningFunction"] = transformedCombiningFunction - } - - transformedConditions, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditions(original["conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedConditions); val.IsValid() && !isEmptyValue(val) { - transformed["conditions"] = transformedConditions - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicCombiningFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpSubnetworks, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(original["ip_subnetworks"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedIpSubnetworks); val.IsValid() && !isEmptyValue(val) { - transformed["ipSubnetworks"] = transformedIpSubnetworks - } - - transformedRequiredAccessLevels, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(original["required_access_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedRequiredAccessLevels); val.IsValid() && !isEmptyValue(val) { - transformed["requiredAccessLevels"] = transformedRequiredAccessLevels - } - - transformedMembers, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(original["members"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedMembers); val.IsValid() && !isEmptyValue(val) { - transformed["members"] = transformedMembers - } - - transformedNegate, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(original["negate"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedNegate); val.IsValid() && !isEmptyValue(val) { - transformed["negate"] = transformedNegate - } - - transformedDevicePolicy, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(original["device_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedDevicePolicy); val.IsValid() && !isEmptyValue(val) { - transformed["devicePolicy"] = transformedDevicePolicy - } - - transformedRegions, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(original["regions"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedRegions); val.IsValid() && !isEmptyValue(val) { - transformed["regions"] = transformedRegions - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsIpSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRequiredAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsMembers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsNegate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequireScreenLock, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(original["require_screen_lock"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedRequireScreenLock); val.IsValid() && !isEmptyValue(val) { - transformed["requireScreenlock"] = transformedRequireScreenLock - } - - transformedAllowedEncryptionStatuses, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(original["allowed_encryption_statuses"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedAllowedEncryptionStatuses); val.IsValid() && !isEmptyValue(val) { - transformed["allowedEncryptionStatuses"] = transformedAllowedEncryptionStatuses - } - - transformedAllowedDeviceManagementLevels, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(original["allowed_device_management_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedAllowedDeviceManagementLevels); val.IsValid() && !isEmptyValue(val) { - transformed["allowedDeviceManagementLevels"] = transformedAllowedDeviceManagementLevels - } - - transformedOsConstraints, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(original["os_constraints"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedOsConstraints); val.IsValid() && !isEmptyValue(val) { - transformed["osConstraints"] = transformedOsConstraints - } - - transformedRequireAdminApproval, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(original["require_admin_approval"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedRequireAdminApproval); val.IsValid() && !isEmptyValue(val) { - transformed["requireAdminApproval"] = transformedRequireAdminApproval - } - - transformedRequireCorpOwned, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(original["require_corp_owned"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedRequireCorpOwned); val.IsValid() && !isEmptyValue(val) { - transformed["requireCorpOwned"] = transformedRequireCorpOwned - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireScreenLock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedEncryptionStatuses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyAllowedDeviceManagementLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinimumVersion, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(original["minimum_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedMinimumVersion); val.IsValid() && !isEmptyValue(val) { - transformed["minimumVersion"] = transformedMinimumVersion - } - - transformedOsType, err := expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(original["os_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedOsType); val.IsValid() && !isEmptyValue(val) { - transformed["osType"] = transformedOsType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsMinimumVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyOsConstraintsOsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireAdminApproval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsDevicePolicyRequireCorpOwned(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsBasicConditionsRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsCustom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpr, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExpr(original["expr"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedExpr); val.IsValid() && !isEmptyValue(val) { - transformed["expr"] = transformedExpr - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExpr(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_access_levels_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessLevelsAccessLevelsCustomExprLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAccessContextManagerAccessPolicy() *resource_access_context_manager_access_policy_schema.Resource { - return &resource_access_context_manager_access_policy_schema.Resource{ - Create: resourceAccessContextManagerAccessPolicyCreate, - Read: resourceAccessContextManagerAccessPolicyRead, - Update: resourceAccessContextManagerAccessPolicyUpdate, - Delete: resourceAccessContextManagerAccessPolicyDelete, - - Importer: &resource_access_context_manager_access_policy_schema.ResourceImporter{ - State: resourceAccessContextManagerAccessPolicyImport, - }, - - Timeouts: &resource_access_context_manager_access_policy_schema.ResourceTimeout{ - Create: resource_access_context_manager_access_policy_schema.DefaultTimeout(6 * resource_access_context_manager_access_policy_time.Minute), - Update: resource_access_context_manager_access_policy_schema.DefaultTimeout(6 * resource_access_context_manager_access_policy_time.Minute), - Delete: resource_access_context_manager_access_policy_schema.DefaultTimeout(6 * resource_access_context_manager_access_policy_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_access_policy_schema.Schema{ - "parent": { - Type: resource_access_context_manager_access_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of this AccessPolicy in the Cloud Resource Hierarchy. -Format: organizations/{organization_id}`, - }, - "title": { - Type: resource_access_context_manager_access_policy_schema.TypeString, - Required: true, - Description: `Human readable title. Does not affect behavior.`, - }, - "create_time": { - Type: resource_access_context_manager_access_policy_schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was created in UTC.`, - }, - "name": { - Type: resource_access_context_manager_access_policy_schema.TypeString, - Computed: true, - Description: `Resource name of the AccessPolicy. Format: {policy_id}`, - }, - "update_time": { - Type: resource_access_context_manager_access_policy_schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was updated in UTC.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerAccessPolicyCreate(d *resource_access_context_manager_access_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandAccessContextManagerAccessPolicyParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_access_context_manager_access_policy_reflect.ValueOf(parentProp)) && (ok || !resource_access_context_manager_access_policy_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - titleProp, err := expandAccessContextManagerAccessPolicyTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(resource_access_context_manager_access_policy_reflect.ValueOf(titleProp)) && (ok || !resource_access_context_manager_access_policy_reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies") - if err != nil { - return err - } - - resource_access_context_manager_access_policy_log.Printf("[DEBUG] Creating new AccessPolicy: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_policy_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error creating AccessPolicy: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = accessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating AccessPolicy", userAgent, - d.Timeout(resource_access_context_manager_access_policy_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_access_context_manager_access_policy_fmt.Errorf("Error waiting to create AccessPolicy: %s", err) - } - - if err := d.Set("name", flattenAccessContextManagerAccessPolicyName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resp := res["response"].(map[string]interface{}) - name := GetResourceNameFromSelfLink(resp["name"].(string)) - resource_access_context_manager_access_policy_log.Printf("[DEBUG] Setting AccessPolicy name, id to %s", name) - if err := d.Set("name", name); err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - resource_access_context_manager_access_policy_log.Printf("[DEBUG] Finished creating AccessPolicy %q: %#v", d.Id(), res) - - return resourceAccessContextManagerAccessPolicyRead(d, meta) -} - -func resourceAccessContextManagerAccessPolicyRead(d *resource_access_context_manager_access_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_access_policy_fmt.Sprintf("AccessContextManagerAccessPolicy %q", d.Id())) - } - - if err := d.Set("name", flattenAccessContextManagerAccessPolicyName(res["name"], d, config)); err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("create_time", flattenAccessContextManagerAccessPolicyCreateTime(res["createTime"], d, config)); err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("update_time", flattenAccessContextManagerAccessPolicyUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("parent", flattenAccessContextManagerAccessPolicyParent(res["parent"], d, config)); err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error reading AccessPolicy: %s", err) - } - if err := d.Set("title", flattenAccessContextManagerAccessPolicyTitle(res["title"], d, config)); err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error reading AccessPolicy: %s", err) - } - - return nil -} - -func resourceAccessContextManagerAccessPolicyUpdate(d *resource_access_context_manager_access_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - titleProp, err := expandAccessContextManagerAccessPolicyTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(resource_access_context_manager_access_policy_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_access_policy_reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") - if err != nil { - return err - } - - resource_access_context_manager_access_policy_log.Printf("[DEBUG] Updating AccessPolicy %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("title") { - updateMask = append(updateMask, "title") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_access_context_manager_access_policy_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_policy_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_access_policy_fmt.Errorf("Error updating AccessPolicy %q: %s", d.Id(), err) - } else { - resource_access_context_manager_access_policy_log.Printf("[DEBUG] Finished updating AccessPolicy %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating AccessPolicy", userAgent, - d.Timeout(resource_access_context_manager_access_policy_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerAccessPolicyRead(d, meta) -} - -func resourceAccessContextManagerAccessPolicyDelete(d *resource_access_context_manager_access_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}accessPolicies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_access_context_manager_access_policy_log.Printf("[DEBUG] Deleting AccessPolicy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_access_policy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AccessPolicy") - } - - err = accessContextManagerOperationWaitTime( - config, res, "Deleting AccessPolicy", userAgent, - d.Timeout(resource_access_context_manager_access_policy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_access_context_manager_access_policy_log.Printf("[DEBUG] Finished deleting AccessPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerAccessPolicyImport(d *resource_access_context_manager_access_policy_schema.ResourceData, meta interface{}) ([]*resource_access_context_manager_access_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, resource_access_context_manager_access_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_access_context_manager_access_policy_schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerAccessPolicyName(v interface{}, d *resource_access_context_manager_access_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenAccessContextManagerAccessPolicyCreateTime(v interface{}, d *resource_access_context_manager_access_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessPolicyUpdateTime(v interface{}, d *resource_access_context_manager_access_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessPolicyParent(v interface{}, d *resource_access_context_manager_access_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerAccessPolicyTitle(v interface{}, d *resource_access_context_manager_access_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerAccessPolicyParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerAccessPolicyTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAccessContextManagerGcpUserAccessBinding() *resource_access_context_manager_gcp_user_access_binding_schema.Resource { - return &resource_access_context_manager_gcp_user_access_binding_schema.Resource{ - Create: resourceAccessContextManagerGcpUserAccessBindingCreate, - Read: resourceAccessContextManagerGcpUserAccessBindingRead, - Update: resourceAccessContextManagerGcpUserAccessBindingUpdate, - Delete: resourceAccessContextManagerGcpUserAccessBindingDelete, - - Importer: &resource_access_context_manager_gcp_user_access_binding_schema.ResourceImporter{ - State: resourceAccessContextManagerGcpUserAccessBindingImport, - }, - - Timeouts: &resource_access_context_manager_gcp_user_access_binding_schema.ResourceTimeout{ - Create: resource_access_context_manager_gcp_user_access_binding_schema.DefaultTimeout(4 * resource_access_context_manager_gcp_user_access_binding_time.Minute), - Update: resource_access_context_manager_gcp_user_access_binding_schema.DefaultTimeout(4 * resource_access_context_manager_gcp_user_access_binding_time.Minute), - Delete: resource_access_context_manager_gcp_user_access_binding_schema.DefaultTimeout(4 * resource_access_context_manager_gcp_user_access_binding_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_gcp_user_access_binding_schema.Schema{ - "access_levels": { - Type: resource_access_context_manager_gcp_user_access_binding_schema.TypeList, - Required: true, - Description: `Required. Access level that a user must have to be granted access. Only one access level is supported, not multiple. This repeated field must have exactly one element. Example: "accessPolicies/9522/accessLevels/device_trusted"`, - MinItems: 1, - MaxItems: 1, - Elem: &resource_access_context_manager_gcp_user_access_binding_schema.Schema{ - Type: resource_access_context_manager_gcp_user_access_binding_schema.TypeString, - }, - }, - "group_key": { - Type: resource_access_context_manager_gcp_user_access_binding_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the G Suite Directory API's Groups resource. If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht"`, - }, - "organization_id": { - Type: resource_access_context_manager_gcp_user_access_binding_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. ID of the parent organization.`, - }, - "name": { - Type: resource_access_context_manager_gcp_user_access_binding_schema.TypeString, - Computed: true, - Description: `Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by RFC 3986 Section 2.3). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N"`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerGcpUserAccessBindingCreate(d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - groupKeyProp, err := expandAccessContextManagerGcpUserAccessBindingGroupKey(d.Get("group_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("group_key"); !isEmptyValue(resource_access_context_manager_gcp_user_access_binding_reflect.ValueOf(groupKeyProp)) && (ok || !resource_access_context_manager_gcp_user_access_binding_reflect.DeepEqual(v, groupKeyProp)) { - obj["groupKey"] = groupKeyProp - } - accessLevelsProp, err := expandAccessContextManagerGcpUserAccessBindingAccessLevels(d.Get("access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(resource_access_context_manager_gcp_user_access_binding_reflect.ValueOf(accessLevelsProp)) && (ok || !resource_access_context_manager_gcp_user_access_binding_reflect.DeepEqual(v, accessLevelsProp)) { - obj["accessLevels"] = accessLevelsProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}organizations/{{organization_id}}/gcpUserAccessBindings") - if err != nil { - return err - } - - resource_access_context_manager_gcp_user_access_binding_log.Printf("[DEBUG] Creating new GcpUserAccessBinding: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_gcp_user_access_binding_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error creating GcpUserAccessBinding: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = accessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating GcpUserAccessBinding", userAgent, - d.Timeout(resource_access_context_manager_gcp_user_access_binding_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error waiting to create GcpUserAccessBinding: %s", err) - } - - if err := d.Set("name", flattenAccessContextManagerGcpUserAccessBindingName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_access_context_manager_gcp_user_access_binding_log.Printf("[DEBUG] Finished creating GcpUserAccessBinding %q: %#v", d.Id(), res) - - return resourceAccessContextManagerGcpUserAccessBindingRead(d, meta) -} - -func resourceAccessContextManagerGcpUserAccessBindingRead(d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_gcp_user_access_binding_fmt.Sprintf("AccessContextManagerGcpUserAccessBinding %q", d.Id())) - } - - if err := d.Set("name", flattenAccessContextManagerGcpUserAccessBindingName(res["name"], d, config)); err != nil { - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) - } - if err := d.Set("group_key", flattenAccessContextManagerGcpUserAccessBindingGroupKey(res["groupKey"], d, config)); err != nil { - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) - } - if err := d.Set("access_levels", flattenAccessContextManagerGcpUserAccessBindingAccessLevels(res["accessLevels"], d, config)); err != nil { - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error reading GcpUserAccessBinding: %s", err) - } - - return nil -} - -func resourceAccessContextManagerGcpUserAccessBindingUpdate(d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - accessLevelsProp, err := expandAccessContextManagerGcpUserAccessBindingAccessLevels(d.Get("access_levels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access_levels"); !isEmptyValue(resource_access_context_manager_gcp_user_access_binding_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_gcp_user_access_binding_reflect.DeepEqual(v, accessLevelsProp)) { - obj["accessLevels"] = accessLevelsProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - resource_access_context_manager_gcp_user_access_binding_log.Printf("[DEBUG] Updating GcpUserAccessBinding %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("access_levels") { - updateMask = append(updateMask, "accessLevels") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_access_context_manager_gcp_user_access_binding_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_gcp_user_access_binding_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error updating GcpUserAccessBinding %q: %s", d.Id(), err) - } else { - resource_access_context_manager_gcp_user_access_binding_log.Printf("[DEBUG] Finished updating GcpUserAccessBinding %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating GcpUserAccessBinding", userAgent, - d.Timeout(resource_access_context_manager_gcp_user_access_binding_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerGcpUserAccessBindingRead(d, meta) -} - -func resourceAccessContextManagerGcpUserAccessBindingDelete(d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_access_context_manager_gcp_user_access_binding_log.Printf("[DEBUG] Deleting GcpUserAccessBinding %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_gcp_user_access_binding_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GcpUserAccessBinding") - } - - err = accessContextManagerOperationWaitTime( - config, res, "Deleting GcpUserAccessBinding", userAgent, - d.Timeout(resource_access_context_manager_gcp_user_access_binding_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_access_context_manager_gcp_user_access_binding_log.Printf("[DEBUG] Finished deleting GcpUserAccessBinding %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerGcpUserAccessBindingImport(d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, meta interface{}) ([]*resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - - if err := d.Set("name", name); err != nil { - return nil, resource_access_context_manager_gcp_user_access_binding_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - return []*resource_access_context_manager_gcp_user_access_binding_schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerGcpUserAccessBindingName(v interface{}, d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerGcpUserAccessBindingGroupKey(v interface{}, d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerGcpUserAccessBindingAccessLevels(v interface{}, d *resource_access_context_manager_gcp_user_access_binding_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerGcpUserAccessBindingGroupKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerGcpUserAccessBindingAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAccessContextManagerServicePerimeter() *resource_access_context_manager_service_perimeter_schema.Resource { - return &resource_access_context_manager_service_perimeter_schema.Resource{ - Create: resourceAccessContextManagerServicePerimeterCreate, - Read: resourceAccessContextManagerServicePerimeterRead, - Update: resourceAccessContextManagerServicePerimeterUpdate, - Delete: resourceAccessContextManagerServicePerimeterDelete, - - Importer: &resource_access_context_manager_service_perimeter_schema.ResourceImporter{ - State: resourceAccessContextManagerServicePerimeterImport, - }, - - Timeouts: &resource_access_context_manager_service_perimeter_schema.ResourceTimeout{ - Create: resource_access_context_manager_service_perimeter_schema.DefaultTimeout(6 * resource_access_context_manager_service_perimeter_time.Minute), - Update: resource_access_context_manager_service_perimeter_schema.DefaultTimeout(6 * resource_access_context_manager_service_perimeter_time.Minute), - Delete: resource_access_context_manager_service_perimeter_schema.DefaultTimeout(6 * resource_access_context_manager_service_perimeter_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "name": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name for the ServicePerimeter. The short_name component must -begin with a letter and only include alphanumeric and '_'. -Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}`, - }, - "parent": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The AccessPolicy this ServicePerimeter lives in. -Format: accessPolicies/{policy_id}`, - }, - "title": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Required: true, - Description: `Human readable title. Must be unique within the Policy.`, - }, - "description": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Description of the ServicePerimeter and its use. Does not affect -behavior.`, - }, - "perimeter_type": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_access_context_manager_service_perimeter_validation.StringInSlice([]string{"PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE", ""}, false), - Description: `Specifies the type of the Perimeter. There are two types: regular and -bridge. Regular Service Perimeter contains resources, access levels, -and restricted services. Every resource can be in at most -ONE regular Service Perimeter. - -In addition to being in a regular service perimeter, a resource can also -be in zero or more perimeter bridges. A perimeter bridge only contains -resources. Cross project operations are permitted if all effected -resources share some perimeter (whether bridge or regular). Perimeter -Bridge does not contain access levels or services: those are governed -entirely by the regular perimeter that resource is in. - -Perimeter Bridges are typically useful when building more complex -topologies with many independent perimeters that need to share some data -with a common perimeter, but should not be able to share data among -themselves. Default value: "PERIMETER_TYPE_REGULAR" Possible values: ["PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE"]`, - Default: "PERIMETER_TYPE_REGULAR", - }, - "spec": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Proposed (or dry run) ServicePerimeter configuration. -This configuration allows to specify and test ServicePerimeter configuration -without enforcing actual access restrictions. Only allowed to be set when -the 'useExplicitDryRunSpec' flag is set.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "access_levels": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of AccessLevel resource names that allow resources within -the ServicePerimeter to be accessed from the internet. -AccessLevels listed must be in the same policy as this -ServicePerimeter. Referencing a nonexistent AccessLevel is a -syntax error. If no AccessLevel names are listed, resources within -the perimeter can only be accessed via GCP calls with request -origins within the perimeter. For Service Perimeter Bridge, must -be empty. - -Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, - }, - "egress_policies": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may -have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for -a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "egress_from": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines conditions on the source of a request causing this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should -represent individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeter_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will -be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - }, - }, - }, - "egress_to": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that -cause this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches -if it contains an operation/service in this list.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods -AND permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, -then ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside -the perimeter.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "ingress_policies": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `List of 'IngressPolicies' to apply to the perimeter. A perimeter may -have multiple 'IngressPolicies', each of which is evaluated -separately. Access is granted if any 'Ingress Policy' grants it. -Must be empty for a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "ingress_from": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the source of a request causing this 'IngressPolicy' -to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent -individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeter_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be -allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - "sources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Sources that this 'IngressPolicy' authorizes access from.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "access_level": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed -must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' -If * is specified, then all IngressSources will be allowed.`, - }, - "resource": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case -of allowing all Google Cloud resources only is not supported.`, - }, - }, - }, - }, - }, - }, - }, - "ingress_to": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and request destination that cause -this 'IngressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' -are allowed to perform in this 'ServicePerimeter'.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND -permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then -ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', protected by this 'ServicePerimeter' -that are allowed to be accessed by sources defined in the -corresponding 'IngressFrom'. A request matches if it contains -a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all -resources inside the perimeter, provided that the request -also matches the 'operations' field.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of GCP resources that are inside of the service perimeter. -Currently only projects are allowed. -Format: projects/{project_number}`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, - }, - "restricted_services": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `GCP services that are subject to the Service Perimeter -restrictions. Must contain a list of services. For example, if -'storage.googleapis.com' is specified, access to the storage -buckets inside the perimeter must meet the perimeter's access -restrictions.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, - }, - "vpc_accessible_services": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Specifies how APIs are allowed to communicate within the Service -Perimeter.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "allowed_services": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `The list of APIs usable within the Service Perimeter. -Must be empty unless 'enableRestriction' is True.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - "enable_restriction": { - Type: resource_access_context_manager_service_perimeter_schema.TypeBool, - Optional: true, - Description: `Whether to restrict API calls within the Service Perimeter to the -list of APIs specified in 'allowedServices'.`, - }, - }, - }, - }, - }, - }, - }, - "status": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `ServicePerimeter configuration. Specifies sets of resources, -restricted services and access levels that determine -perimeter content and boundaries.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "access_levels": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of AccessLevel resource names that allow resources within -the ServicePerimeter to be accessed from the internet. -AccessLevels listed must be in the same policy as this -ServicePerimeter. Referencing a nonexistent AccessLevel is a -syntax error. If no AccessLevel names are listed, resources within -the perimeter can only be accessed via GCP calls with request -origins within the perimeter. For Service Perimeter Bridge, must -be empty. - -Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, - }, - "egress_policies": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may -have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for -a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "egress_from": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines conditions on the source of a request causing this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should -represent individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeter_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will -be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - }, - }, - }, - "egress_to": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that -cause this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches -if it contains an operation/service in this list.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods -AND permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, -then ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside -the perimeter.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "ingress_policies": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `List of 'IngressPolicies' to apply to the perimeter. A perimeter may -have multiple 'IngressPolicies', each of which is evaluated -separately. Access is granted if any 'Ingress Policy' grants it. -Must be empty for a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "ingress_from": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the source of a request causing this 'IngressPolicy' -to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent -individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeter_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be -allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - "sources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Sources that this 'IngressPolicy' authorizes access from.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "access_level": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed -must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' -If * is specified, then all IngressSources will be allowed.`, - }, - "resource": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case -of allowing all Google Cloud resources only is not supported.`, - }, - }, - }, - }, - }, - }, - }, - "ingress_to": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and request destination that cause -this 'IngressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' -are allowed to perform in this 'ServicePerimeter'.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND -permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then -ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', protected by this 'ServicePerimeter' -that are allowed to be accessed by sources defined in the -corresponding 'IngressFrom'. A request matches if it contains -a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all -resources inside the perimeter, provided that the request -also matches the 'operations' field.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `A list of GCP resources that are inside of the service perimeter. -Currently only projects are allowed. -Format: projects/{project_number}`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, - }, - "restricted_services": { - Type: resource_access_context_manager_service_perimeter_schema.TypeSet, - Optional: true, - Description: `GCP services that are subject to the Service Perimeter -restrictions. Must contain a list of services. For example, if -'storage.googleapis.com' is specified, access to the storage -buckets inside the perimeter must meet the perimeter's access -restrictions.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - Set: resource_access_context_manager_service_perimeter_schema.HashString, - AtLeastOneOf: []string{"status.0.resources", "status.0.access_levels", "status.0.restricted_services"}, - }, - "vpc_accessible_services": { - Type: resource_access_context_manager_service_perimeter_schema.TypeList, - Optional: true, - Description: `Specifies how APIs are allowed to communicate within the Service -Perimeter.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeter_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeter_schema.Schema{ - "allowed_services": { - Type: resource_access_context_manager_service_perimeter_schema.TypeSet, - Optional: true, - Description: `The list of APIs usable within the Service Perimeter. -Must be empty unless 'enableRestriction' is True.`, - Elem: &resource_access_context_manager_service_perimeter_schema.Schema{ - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - }, - Set: resource_access_context_manager_service_perimeter_schema.HashString, - }, - "enable_restriction": { - Type: resource_access_context_manager_service_perimeter_schema.TypeBool, - Optional: true, - Description: `Whether to restrict API calls within the Service Perimeter to the -list of APIs specified in 'allowedServices'.`, - }, - }, - }, - }, - }, - }, - }, - "use_explicit_dry_run_spec": { - Type: resource_access_context_manager_service_perimeter_schema.TypeBool, - Optional: true, - Description: `Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists -for all Service Perimeters, and that spec is identical to the status for those -Service Perimeters. When this flag is set, it inhibits the generation of the -implicit spec, thereby allowing the user to explicitly provide a -configuration ("spec") to use in a dry-run version of the Service Perimeter. -This allows the user to test changes to the enforced config ("status") without -actually enforcing them. This testing is done through analyzing the differences -between currently enforced and suggested restrictions. useExplicitDryRunSpec must -bet set to True if any of the fields in the spec are set to non-default values.`, - }, - "create_time": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was created in UTC.`, - }, - "update_time": { - Type: resource_access_context_manager_service_perimeter_schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was updated in UTC.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerServicePerimeterCreate(d *resource_access_context_manager_service_perimeter_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - titleProp, err := expandAccessContextManagerServicePerimeterTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(titleProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - descriptionProp, err := expandAccessContextManagerServicePerimeterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(descriptionProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - perimeterTypeProp, err := expandAccessContextManagerServicePerimeterPerimeterType(d.Get("perimeter_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("perimeter_type"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(perimeterTypeProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, perimeterTypeProp)) { - obj["perimeterType"] = perimeterTypeProp - } - statusProp, err := expandAccessContextManagerServicePerimeterStatus(d.Get("status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(statusProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, statusProp)) { - obj["status"] = statusProp - } - specProp, err := expandAccessContextManagerServicePerimeterSpec(d.Get("spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("spec"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(specProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, specProp)) { - obj["spec"] = specProp - } - useExplicitDryRunSpecProp, err := expandAccessContextManagerServicePerimeterUseExplicitDryRunSpec(d.Get("use_explicit_dry_run_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("use_explicit_dry_run_spec"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(useExplicitDryRunSpecProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, useExplicitDryRunSpecProp)) { - obj["useExplicitDryRunSpec"] = useExplicitDryRunSpecProp - } - parentProp, err := expandAccessContextManagerServicePerimeterParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(parentProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - nameProp, err := expandAccessContextManagerServicePerimeterName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(nameProp)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceAccessContextManagerServicePerimeterEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters") - if err != nil { - return err - } - - resource_access_context_manager_service_perimeter_log.Printf("[DEBUG] Creating new ServicePerimeter: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeter_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error creating ServicePerimeter: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = accessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating ServicePerimeter", userAgent, - d.Timeout(resource_access_context_manager_service_perimeter_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error waiting to create ServicePerimeter: %s", err) - } - - if err := d.Set("name", flattenAccessContextManagerServicePerimeterName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_access_context_manager_service_perimeter_log.Printf("[DEBUG] Finished creating ServicePerimeter %q: %#v", d.Id(), res) - - return resourceAccessContextManagerServicePerimeterRead(d, meta) -} - -func resourceAccessContextManagerServicePerimeterRead(d *resource_access_context_manager_service_perimeter_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_service_perimeter_fmt.Sprintf("AccessContextManagerServicePerimeter %q", d.Id())) - } - - if err := d.Set("title", flattenAccessContextManagerServicePerimeterTitle(res["title"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("description", flattenAccessContextManagerServicePerimeterDescription(res["description"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("create_time", flattenAccessContextManagerServicePerimeterCreateTime(res["createTime"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("update_time", flattenAccessContextManagerServicePerimeterUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("perimeter_type", flattenAccessContextManagerServicePerimeterPerimeterType(res["perimeterType"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("status", flattenAccessContextManagerServicePerimeterStatus(res["status"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("spec", flattenAccessContextManagerServicePerimeterSpec(res["spec"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("use_explicit_dry_run_spec", flattenAccessContextManagerServicePerimeterUseExplicitDryRunSpec(res["useExplicitDryRunSpec"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - if err := d.Set("name", flattenAccessContextManagerServicePerimeterName(res["name"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error reading ServicePerimeter: %s", err) - } - - return nil -} - -func resourceAccessContextManagerServicePerimeterUpdate(d *resource_access_context_manager_service_perimeter_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - titleProp, err := expandAccessContextManagerServicePerimeterTitle(d.Get("title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("title"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, titleProp)) { - obj["title"] = titleProp - } - descriptionProp, err := expandAccessContextManagerServicePerimeterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - statusProp, err := expandAccessContextManagerServicePerimeterStatus(d.Get("status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, statusProp)) { - obj["status"] = statusProp - } - specProp, err := expandAccessContextManagerServicePerimeterSpec(d.Get("spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("spec"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, specProp)) { - obj["spec"] = specProp - } - useExplicitDryRunSpecProp, err := expandAccessContextManagerServicePerimeterUseExplicitDryRunSpec(d.Get("use_explicit_dry_run_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("use_explicit_dry_run_spec"); !isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_service_perimeter_reflect.DeepEqual(v, useExplicitDryRunSpecProp)) { - obj["useExplicitDryRunSpec"] = useExplicitDryRunSpecProp - } - - obj, err = resourceAccessContextManagerServicePerimeterEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - resource_access_context_manager_service_perimeter_log.Printf("[DEBUG] Updating ServicePerimeter %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("title") { - updateMask = append(updateMask, "title") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("status") { - updateMask = append(updateMask, "status") - } - - if d.HasChange("spec") { - updateMask = append(updateMask, "spec") - } - - if d.HasChange("use_explicit_dry_run_spec") { - updateMask = append(updateMask, "useExplicitDryRunSpec") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_access_context_manager_service_perimeter_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeter_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_service_perimeter_fmt.Errorf("Error updating ServicePerimeter %q: %s", d.Id(), err) - } else { - resource_access_context_manager_service_perimeter_log.Printf("[DEBUG] Finished updating ServicePerimeter %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating ServicePerimeter", userAgent, - d.Timeout(resource_access_context_manager_service_perimeter_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerServicePerimeterRead(d, meta) -} - -func resourceAccessContextManagerServicePerimeterDelete(d *resource_access_context_manager_service_perimeter_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_access_context_manager_service_perimeter_log.Printf("[DEBUG] Deleting ServicePerimeter %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeter_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ServicePerimeter") - } - - err = accessContextManagerOperationWaitTime( - config, res, "Deleting ServicePerimeter", userAgent, - d.Timeout(resource_access_context_manager_service_perimeter_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_access_context_manager_service_perimeter_log.Printf("[DEBUG] Finished deleting ServicePerimeter %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerServicePerimeterImport(d *resource_access_context_manager_service_perimeter_schema.ResourceData, meta interface{}) ([]*resource_access_context_manager_service_perimeter_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - stringParts := resource_access_context_manager_service_perimeter_strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, resource_access_context_manager_service_perimeter_fmt.Errorf("Error parsing parent name. Should be in form accessPolicies/{{policy_id}}/servicePerimeters/{{short_name}}") - } - if err := d.Set("parent", resource_access_context_manager_service_perimeter_fmt.Sprintf("%s/%s", stringParts[0], stringParts[1])); err != nil { - return nil, resource_access_context_manager_service_perimeter_fmt.Errorf("Error setting parent, %s", err) - } - return []*resource_access_context_manager_service_perimeter_schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerServicePerimeterTitle(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterDescription(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterCreateTime(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterUpdateTime(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterPerimeterType(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_access_context_manager_service_perimeter_reflect.ValueOf(v)) { - return "PERIMETER_TYPE_REGULAR" - } - - return v -} - -func flattenAccessContextManagerServicePerimeterStatus(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimeterStatusResources(original["resources"], d, config) - transformed["access_levels"] = - flattenAccessContextManagerServicePerimeterStatusAccessLevels(original["accessLevels"], d, config) - transformed["restricted_services"] = - flattenAccessContextManagerServicePerimeterStatusRestrictedServices(original["restrictedServices"], d, config) - transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServices(original["vpcAccessibleServices"], d, config) - transformed["ingress_policies"] = - flattenAccessContextManagerServicePerimeterStatusIngressPolicies(original["ingressPolicies"], d, config) - transformed["egress_policies"] = - flattenAccessContextManagerServicePerimeterStatusEgressPolicies(original["egressPolicies"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterStatusResources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusAccessLevels(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusRestrictedServices(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_access_context_manager_service_perimeter_schema.NewSet(resource_access_context_manager_service_perimeter_schema.HashString, v.([]interface{})) -} - -func flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServices(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) - transformed["allowed_services"] = - flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_access_context_manager_service_perimeter_schema.NewSet(resource_access_context_manager_service_perimeter_schema.HashString, v.([]interface{})) -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPolicies(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ingress_from": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(original["ingressFrom"], d, config), - "ingress_to": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(original["ingressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) - transformed["sources"] = - flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(original["sources"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "access_level": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), - "resource": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "egress_from": flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(original["egressFrom"], d, config), - "egress_to": flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(original["egressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpec(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimeterSpecResources(original["resources"], d, config) - transformed["access_levels"] = - flattenAccessContextManagerServicePerimeterSpecAccessLevels(original["accessLevels"], d, config) - transformed["restricted_services"] = - flattenAccessContextManagerServicePerimeterSpecRestrictedServices(original["restrictedServices"], d, config) - transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServices(original["vpcAccessibleServices"], d, config) - transformed["ingress_policies"] = - flattenAccessContextManagerServicePerimeterSpecIngressPolicies(original["ingressPolicies"], d, config) - transformed["egress_policies"] = - flattenAccessContextManagerServicePerimeterSpecEgressPolicies(original["egressPolicies"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterSpecResources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecAccessLevels(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecRestrictedServices(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServices(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) - transformed["allowed_services"] = - flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ingress_from": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(original["ingressFrom"], d, config), - "ingress_to": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(original["ingressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) - transformed["sources"] = - flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(original["sources"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "access_level": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), - "resource": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "egress_from": flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(original["egressFrom"], d, config), - "egress_to": flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(original["egressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterUseExplicitDryRunSpec(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimeterName(v interface{}, d *resource_access_context_manager_service_perimeter_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerServicePerimeterTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterPerimeterType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimeterStatusResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedAccessLevels, err := expandAccessContextManagerServicePerimeterStatusAccessLevels(original["access_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevels"] = transformedAccessLevels - } - - transformedRestrictedServices, err := expandAccessContextManagerServicePerimeterStatusRestrictedServices(original["restricted_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { - transformed["restrictedServices"] = transformedRestrictedServices - } - - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimeterStatusVPCAccessibleServices(original["vpc_accessible_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices - } - - transformedIngressPolicies, err := expandAccessContextManagerServicePerimeterStatusIngressPolicies(original["ingress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["ingressPolicies"] = transformedIngressPolicies - } - - transformedEgressPolicies, err := expandAccessContextManagerServicePerimeterStatusEgressPolicies(original["egress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["egressPolicies"] = transformedEgressPolicies - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterStatusResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_access_context_manager_service_perimeter_schema.Set).List() - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableRestriction, err := expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { - transformed["enableRestriction"] = transformedEnableRestriction - } - - transformedAllowedServices, err := expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { - transformed["allowedServices"] = transformedAllowedServices - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_access_context_manager_service_perimeter_schema.Set).List() - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIngressFrom, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(original["ingress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["ingressFrom"] = transformedIngressFrom - } - - transformedIngressTo, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(original["ingress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { - transformed["ingressTo"] = transformedIngressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - transformedSources, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(original["sources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { - transformed["sources"] = transformedSources - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAccessLevel, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevel"] = transformedAccessLevel - } - - transformedResource, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { - transformed["resource"] = transformedResource - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEgressFrom, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(original["egress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["egressFrom"] = transformedEgressFrom - } - - transformedEgressTo, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(original["egress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { - transformed["egressTo"] = transformedEgressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimeterSpecResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedAccessLevels, err := expandAccessContextManagerServicePerimeterSpecAccessLevels(original["access_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevels"] = transformedAccessLevels - } - - transformedRestrictedServices, err := expandAccessContextManagerServicePerimeterSpecRestrictedServices(original["restricted_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { - transformed["restrictedServices"] = transformedRestrictedServices - } - - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimeterSpecVPCAccessibleServices(original["vpc_accessible_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices - } - - transformedIngressPolicies, err := expandAccessContextManagerServicePerimeterSpecIngressPolicies(original["ingress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["ingressPolicies"] = transformedIngressPolicies - } - - transformedEgressPolicies, err := expandAccessContextManagerServicePerimeterSpecEgressPolicies(original["egress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["egressPolicies"] = transformedEgressPolicies - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterSpecResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableRestriction, err := expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { - transformed["enableRestriction"] = transformedEnableRestriction - } - - transformedAllowedServices, err := expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { - transformed["allowedServices"] = transformedAllowedServices - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIngressFrom, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(original["ingress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["ingressFrom"] = transformedIngressFrom - } - - transformedIngressTo, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(original["ingress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { - transformed["ingressTo"] = transformedIngressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - transformedSources, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(original["sources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { - transformed["sources"] = transformedSources - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAccessLevel, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevel"] = transformedAccessLevel - } - - transformedResource, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { - transformed["resource"] = transformedResource - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEgressFrom, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(original["egress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["egressFrom"] = transformedEgressFrom - } - - transformedEgressTo, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(original["egress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { - transformed["egressTo"] = transformedEgressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeter_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterUseExplicitDryRunSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimeterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAccessContextManagerServicePerimeterEncoder(d *resource_access_context_manager_service_perimeter_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "parent") - return obj, nil -} - -func resourceAccessContextManagerServicePerimeterResource() *resource_access_context_manager_service_perimeter_resource_schema.Resource { - return &resource_access_context_manager_service_perimeter_resource_schema.Resource{ - Create: resourceAccessContextManagerServicePerimeterResourceCreate, - Read: resourceAccessContextManagerServicePerimeterResourceRead, - Delete: resourceAccessContextManagerServicePerimeterResourceDelete, - - Importer: &resource_access_context_manager_service_perimeter_resource_schema.ResourceImporter{ - State: resourceAccessContextManagerServicePerimeterResourceImport, - }, - - Timeouts: &resource_access_context_manager_service_perimeter_resource_schema.ResourceTimeout{ - Create: resource_access_context_manager_service_perimeter_resource_schema.DefaultTimeout(4 * resource_access_context_manager_service_perimeter_resource_time.Minute), - Delete: resource_access_context_manager_service_perimeter_resource_schema.DefaultTimeout(4 * resource_access_context_manager_service_perimeter_resource_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_service_perimeter_resource_schema.Schema{ - "perimeter_name": { - Type: resource_access_context_manager_service_perimeter_resource_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Service Perimeter to add this resource to.`, - }, - "resource": { - Type: resource_access_context_manager_service_perimeter_resource_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A GCP resource that is inside of the service perimeter. -Currently only projects are allowed. -Format: projects/{project_number}`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAccessContextManagerServicePerimeterResourceCreate(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - resourceProp, err := expandNestedAccessContextManagerServicePerimeterResourceResource(d.Get("resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resource"); !isEmptyValue(resource_access_context_manager_service_perimeter_resource_reflect.ValueOf(resourceProp)) && (ok || !resource_access_context_manager_service_perimeter_resource_reflect.DeepEqual(v, resourceProp)) { - obj["resource"] = resourceProp - } - - lockName, err := replaceVars(d, config, "{{perimeter_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return err - } - - resource_access_context_manager_service_perimeter_resource_log.Printf("[DEBUG] Creating new ServicePerimeterResource: %#v", obj) - - obj, err = resourceAccessContextManagerServicePerimeterResourcePatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - url, err = addQueryParams(url, map[string]string{"updateMask": "status.resources"}) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeter_resource_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error creating ServicePerimeterResource: %s", err) - } - - id, err := replaceVars(d, config, "{{perimeter_name}}/{{resource}}") - if err != nil { - return resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = accessContextManagerOperationWaitTimeWithResponse( - config, res, &opRes, "Creating ServicePerimeterResource", userAgent, - d.Timeout(resource_access_context_manager_service_perimeter_resource_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error waiting to create ServicePerimeterResource: %s", err) - } - - if _, ok := opRes["status"]; ok { - opRes, err = flattenNestedAccessContextManagerServicePerimeterResource(d, meta, opRes) - if err != nil { - return resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error getting nested object from operation response: %s", err) - } - if opRes == nil { - - return resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error decoding response from operation, could not find nested object") - } - } - if err := d.Set("resource", flattenNestedAccessContextManagerServicePerimeterResourceResource(opRes["resource"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{perimeter_name}}/{{resource}}") - if err != nil { - return resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_access_context_manager_service_perimeter_resource_log.Printf("[DEBUG] Finished creating ServicePerimeterResource %q: %#v", d.Id(), res) - - return resourceAccessContextManagerServicePerimeterResourceRead(d, meta) -} - -func resourceAccessContextManagerServicePerimeterResourceRead(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_service_perimeter_resource_fmt.Sprintf("AccessContextManagerServicePerimeterResource %q", d.Id())) - } - - res, err = flattenNestedAccessContextManagerServicePerimeterResource(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_access_context_manager_service_perimeter_resource_log.Printf("[DEBUG] Removing AccessContextManagerServicePerimeterResource because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("resource", flattenNestedAccessContextManagerServicePerimeterResourceResource(res["resource"], d, config)); err != nil { - return resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error reading ServicePerimeterResource: %s", err) - } - - return nil -} - -func resourceAccessContextManagerServicePerimeterResourceDelete(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "{{perimeter_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceAccessContextManagerServicePerimeterResourcePatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "ServicePerimeterResource") - } - url, err = addQueryParams(url, map[string]string{"updateMask": "status.resources"}) - if err != nil { - return err - } - resource_access_context_manager_service_perimeter_resource_log.Printf("[DEBUG] Deleting ServicePerimeterResource %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeter_resource_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ServicePerimeterResource") - } - - err = accessContextManagerOperationWaitTime( - config, res, "Deleting ServicePerimeterResource", userAgent, - d.Timeout(resource_access_context_manager_service_perimeter_resource_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_access_context_manager_service_perimeter_resource_log.Printf("[DEBUG] Finished deleting ServicePerimeterResource %q: %#v", d.Id(), res) - return nil -} - -func resourceAccessContextManagerServicePerimeterResourceImport(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}) ([]*resource_access_context_manager_service_perimeter_resource_schema.ResourceData, error) { - config := meta.(*Config) - - parts, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/servicePerimeters/(?P[^/]+)/(?P.+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("perimeter_name", resource_access_context_manager_service_perimeter_resource_fmt.Sprintf("accessPolicies/%s/servicePerimeters/%s", parts["accessPolicy"], parts["perimeter"])); err != nil { - return nil, resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error setting perimeter_name: %s", err) - } - if err := d.Set("resource", parts["resource"]); err != nil { - return nil, resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Error setting resource: %s", err) - } - return []*resource_access_context_manager_service_perimeter_resource_schema.ResourceData{d}, nil -} - -func flattenNestedAccessContextManagerServicePerimeterResourceResource(v interface{}, d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedAccessContextManagerServicePerimeterResourceResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedAccessContextManagerServicePerimeterResource(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["status"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["resources"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_access_context_manager_service_perimeter_resource_fmt.Errorf("expected list or map for value status.resources. Actual value: %v", v) - } - - _, item, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedResource, err := expandNestedAccessContextManagerServicePerimeterResourceResource(d.Get("resource"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedResource := flattenNestedAccessContextManagerServicePerimeterResourceResource(expectedResource, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - - item := map[string]interface{}{ - "resource": itemRaw, - } - - itemResource := flattenNestedAccessContextManagerServicePerimeterResourceResource(item["resource"], d, meta.(*Config)) - - if !(isEmptyValue(resource_access_context_manager_service_perimeter_resource_reflect.ValueOf(itemResource)) && isEmptyValue(resource_access_context_manager_service_perimeter_resource_reflect.ValueOf(expectedFlattenedResource))) && !resource_access_context_manager_service_perimeter_resource_reflect.DeepEqual(itemResource, expectedFlattenedResource) { - resource_access_context_manager_service_perimeter_resource_log.Printf("[DEBUG] Skipping item with resource= %#v, looking for %#v)", itemResource, expectedFlattenedResource) - continue - } - resource_access_context_manager_service_perimeter_resource_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceAccessContextManagerServicePerimeterResourcePatchCreateEncoder(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerServicePerimeterResourceListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - if found != nil { - return nil, resource_access_context_manager_service_perimeter_resource_fmt.Errorf("Unable to create ServicePerimeterResource, existing object already found: %+v", found) - } - - res := map[string]interface{}{ - "resources": append(currItems, obj["resource"]), - } - wrapped := map[string]interface{}{ - "status": res, - } - res = wrapped - - return res, nil -} - -func resourceAccessContextManagerServicePerimeterResourcePatchDeleteEncoder(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceAccessContextManagerServicePerimeterResourceListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceAccessContextManagerServicePerimeterResourceFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - - return nil, &resource_access_context_manager_service_perimeter_resource_googleapi.Error{ - Code: 404, - Message: "ServicePerimeterResource not found in list", - } - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "resources": updatedItems, - } - wrapped := map[string]interface{}{ - "status": res, - } - res = wrapped - - return res, nil -} - -func resourceAccessContextManagerServicePerimeterResourceListForPatch(d *resource_access_context_manager_service_perimeter_resource_schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{perimeter_name}}") - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", "", url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - if v, ok = res["status"]; ok && v != nil { - res = v.(map[string]interface{}) - } else { - return nil, nil - } - - v, ok = res["resources"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, resource_access_context_manager_service_perimeter_resource_fmt.Errorf(`expected list for nested field "resources"`) - } - return ls, nil - } - return nil, nil -} - -func resourceAccessContextManagerServicePerimeters() *resource_access_context_manager_service_perimeters_schema.Resource { - return &resource_access_context_manager_service_perimeters_schema.Resource{ - Create: resourceAccessContextManagerServicePerimetersCreate, - Read: resourceAccessContextManagerServicePerimetersRead, - Update: resourceAccessContextManagerServicePerimetersUpdate, - Delete: resourceAccessContextManagerServicePerimetersDelete, - - Importer: &resource_access_context_manager_service_perimeters_schema.ResourceImporter{ - State: resourceAccessContextManagerServicePerimetersImport, - }, - - Timeouts: &resource_access_context_manager_service_perimeters_schema.ResourceTimeout{ - Create: resource_access_context_manager_service_perimeters_schema.DefaultTimeout(6 * resource_access_context_manager_service_perimeters_time.Minute), - Update: resource_access_context_manager_service_perimeters_schema.DefaultTimeout(6 * resource_access_context_manager_service_perimeters_time.Minute), - Delete: resource_access_context_manager_service_perimeters_schema.DefaultTimeout(6 * resource_access_context_manager_service_perimeters_time.Minute), - }, - - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "parent": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The AccessPolicy this ServicePerimeter lives in. -Format: accessPolicies/{policy_id}`, - }, - "service_perimeters": { - Type: resource_access_context_manager_service_perimeters_schema.TypeSet, - Optional: true, - Description: `The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy.`, - Elem: accesscontextmanagerServicePerimetersServicePerimetersSchema(), - }, - }, - UseJSONNumber: true, - } -} - -func accesscontextmanagerServicePerimetersServicePerimetersSchema() *resource_access_context_manager_service_perimeters_schema.Resource { - return &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "name": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name for the ServicePerimeter. The short_name component must -begin with a letter and only include alphanumeric and '_'. -Format: accessPolicies/{policy_id}/servicePerimeters/{short_name}`, - }, - "title": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Required: true, - Description: `Human readable title. Must be unique within the Policy.`, - }, - "description": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Description of the ServicePerimeter and its use. Does not affect -behavior.`, - }, - "perimeter_type": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_access_context_manager_service_perimeters_validation.StringInSlice([]string{"PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE", ""}, false), - Description: `Specifies the type of the Perimeter. There are two types: regular and -bridge. Regular Service Perimeter contains resources, access levels, -and restricted services. Every resource can be in at most -ONE regular Service Perimeter. - -In addition to being in a regular service perimeter, a resource can also -be in zero or more perimeter bridges. A perimeter bridge only contains -resources. Cross project operations are permitted if all effected -resources share some perimeter (whether bridge or regular). Perimeter -Bridge does not contain access levels or services: those are governed -entirely by the regular perimeter that resource is in. - -Perimeter Bridges are typically useful when building more complex -topologies with many independent perimeters that need to share some data -with a common perimeter, but should not be able to share data among -themselves. Default value: "PERIMETER_TYPE_REGULAR" Possible values: ["PERIMETER_TYPE_REGULAR", "PERIMETER_TYPE_BRIDGE"]`, - Default: "PERIMETER_TYPE_REGULAR", - }, - "spec": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Proposed (or dry run) ServicePerimeter configuration. -This configuration allows to specify and test ServicePerimeter configuration -without enforcing actual access restrictions. Only allowed to be set when -the 'useExplicitDryRunSpec' flag is set.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "access_levels": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of AccessLevel resource names that allow resources within -the ServicePerimeter to be accessed from the internet. -AccessLevels listed must be in the same policy as this -ServicePerimeter. Referencing a nonexistent AccessLevel is a -syntax error. If no AccessLevel names are listed, resources within -the perimeter can only be accessed via GCP calls with request -origins within the perimeter. For Service Perimeter Bridge, must -be empty. - -Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "egress_policies": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may -have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for -a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "egress_from": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines conditions on the source of a request causing this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should -represent individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeters_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will -be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - }, - }, - }, - "egress_to": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that -cause this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches -if it contains an operation/service in this list.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods -AND permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, -then ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside -the perimeter.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "ingress_policies": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `List of 'IngressPolicies' to apply to the perimeter. A perimeter may -have multiple 'IngressPolicies', each of which is evaluated -separately. Access is granted if any 'Ingress Policy' grants it. -Must be empty for a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "ingress_from": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the source of a request causing this 'IngressPolicy' -to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent -individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeters_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be -allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - "sources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Sources that this 'IngressPolicy' authorizes access from.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "access_level": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed -must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' -If * is specified, then all IngressSources will be allowed.`, - }, - "resource": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case -of allowing all Google Cloud resources only is not supported.`, - }, - }, - }, - }, - }, - }, - }, - "ingress_to": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and request destination that cause -this 'IngressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' -are allowed to perform in this 'ServicePerimeter'.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND -permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then -ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', protected by this 'ServicePerimeter' -that are allowed to be accessed by sources defined in the -corresponding 'IngressFrom'. A request matches if it contains -a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all -resources inside the perimeter, provided that the request -also matches the 'operations' field.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of GCP resources that are inside of the service perimeter. -Currently only projects are allowed. -Format: projects/{project_number}`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "restricted_services": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `GCP services that are subject to the Service Perimeter -restrictions. Must contain a list of services. For example, if -'storage.googleapis.com' is specified, access to the storage -buckets inside the perimeter must meet the perimeter's access -restrictions.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "vpc_accessible_services": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Specifies how APIs are allowed to communicate within the Service -Perimeter.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "allowed_services": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `The list of APIs usable within the Service Perimeter. -Must be empty unless 'enableRestriction' is True.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "enable_restriction": { - Type: resource_access_context_manager_service_perimeters_schema.TypeBool, - Optional: true, - Description: `Whether to restrict API calls within the Service Perimeter to the -list of APIs specified in 'allowedServices'.`, - }, - }, - }, - }, - }, - }, - }, - "status": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `ServicePerimeter configuration. Specifies sets of resources, -restricted services and access levels that determine -perimeter content and boundaries.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "access_levels": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of AccessLevel resource names that allow resources within -the ServicePerimeter to be accessed from the internet. -AccessLevels listed must be in the same policy as this -ServicePerimeter. Referencing a nonexistent AccessLevel is a -syntax error. If no AccessLevel names are listed, resources within -the perimeter can only be accessed via GCP calls with request -origins within the perimeter. For Service Perimeter Bridge, must -be empty. - -Format: accessPolicies/{policy_id}/accessLevels/{access_level_name}`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "egress_policies": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `List of EgressPolicies to apply to the perimeter. A perimeter may -have multiple EgressPolicies, each of which is evaluated separately. -Access is granted if any EgressPolicy grants it. Must be empty for -a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "egress_from": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines conditions on the source of a request causing this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this 'EgressPolicy'. -Should be in the format of email address. The email address should -represent individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeters_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access to outside the -perimeter. If left unspecified, then members of 'identities' field will -be allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - }, - }, - }, - "egress_to": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and destination resources that -cause this 'EgressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' that this egress rule applies to. A request matches -if it contains an operation/service in this list.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong -to the service specified by 'serviceName' field. A single MethodSelector -entry with '*' specified for the 'method' field will allow all methods -AND permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for 'method' should be a valid method name for the corresponding -'serviceName' in 'ApiOperation'. If '*' used as value for method, -then ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with serviceName -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', that match this to stanza. A request matches -if it contains a resource in this list. If * is specified for resources, -then this 'EgressTo' rule will authorize access to all resources outside -the perimeter.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "ingress_policies": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `List of 'IngressPolicies' to apply to the perimeter. A perimeter may -have multiple 'IngressPolicies', each of which is evaluated -separately. Access is granted if any 'Ingress Policy' grants it. -Must be empty for a perimeter bridge.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "ingress_from": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the source of a request causing this 'IngressPolicy' -to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "identities": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of identities that are allowed access through this ingress policy. -Should be in the format of email address. The email address should represent -individual user or service account only.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "identity_type": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - ValidateFunc: resource_access_context_manager_service_perimeters_validation.StringInSlice([]string{"IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT", ""}, false), - Description: `Specifies the type of identities that are allowed access from outside the -perimeter. If left unspecified, then members of 'identities' field will be -allowed access. Possible values: ["IDENTITY_TYPE_UNSPECIFIED", "ANY_IDENTITY", "ANY_USER_ACCOUNT", "ANY_SERVICE_ACCOUNT"]`, - }, - "sources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Sources that this 'IngressPolicy' authorizes access from.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "access_level": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `An 'AccessLevel' resource name that allow resources within the -'ServicePerimeters' to be accessed from the internet. 'AccessLevels' listed -must be in the same policy as this 'ServicePerimeter'. Referencing a nonexistent -'AccessLevel' will cause an error. If no 'AccessLevel' names are listed, -resources within the perimeter can only be accessed via Google Cloud calls -with request origins within the perimeter. -Example 'accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.' -If * is specified, then all IngressSources will be allowed.`, - }, - "resource": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `A Google Cloud resource that is allowed to ingress the perimeter. -Requests from these resources will be allowed to access perimeter data. -Currently only projects are allowed. Format 'projects/{project_number}' -The project may be in any Google Cloud organization, not just the -organization that the perimeter is defined in. '*' is not allowed, the case -of allowing all Google Cloud resources only is not supported.`, - }, - }, - }, - }, - }, - }, - }, - "ingress_to": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Defines the conditions on the 'ApiOperation' and request destination that cause -this 'IngressPolicy' to apply.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "operations": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of 'ApiOperations' the sources specified in corresponding 'IngressFrom' -are allowed to perform in this 'ServicePerimeter'.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method_selectors": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `API methods or permissions to allow. Method or permission must belong to -the service specified by serviceName field. A single 'MethodSelector' entry -with '*' specified for the method field will allow all methods AND -permissions for the service specified in 'serviceName'.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "method": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for method should be a valid method name for the corresponding -serviceName in 'ApiOperation'. If '*' used as value for 'method', then -ALL methods and permissions are allowed.`, - }, - "permission": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `Value for permission should be a valid Cloud IAM permission for the -corresponding 'serviceName' in 'ApiOperation'.`, - }, - }, - }, - }, - "service_name": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Optional: true, - Description: `The name of the API whose methods or permissions the 'IngressPolicy' or -'EgressPolicy' want to allow. A single 'ApiOperation' with 'serviceName' -field set to '*' will allow all methods AND permissions for all services.`, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of resources, currently only projects in the form -'projects/', protected by this 'ServicePerimeter' -that are allowed to be accessed by sources defined in the -corresponding 'IngressFrom'. A request matches if it contains -a resource in this list. If '*' is specified for resources, -then this 'IngressTo' rule will authorize access to all -resources inside the perimeter, provided that the request -also matches the 'operations' field.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "resources": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `A list of GCP resources that are inside of the service perimeter. -Currently only projects are allowed. -Format: projects/{project_number}`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - }, - "restricted_services": { - Type: resource_access_context_manager_service_perimeters_schema.TypeSet, - Optional: true, - Description: `GCP services that are subject to the Service Perimeter -restrictions. Must contain a list of services. For example, if -'storage.googleapis.com' is specified, access to the storage -buckets inside the perimeter must meet the perimeter's access -restrictions.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - Set: resource_access_context_manager_service_perimeters_schema.HashString, - }, - "vpc_accessible_services": { - Type: resource_access_context_manager_service_perimeters_schema.TypeList, - Optional: true, - Description: `Specifies how APIs are allowed to communicate within the Service -Perimeter.`, - MaxItems: 1, - Elem: &resource_access_context_manager_service_perimeters_schema.Resource{ - Schema: map[string]*resource_access_context_manager_service_perimeters_schema.Schema{ - "allowed_services": { - Type: resource_access_context_manager_service_perimeters_schema.TypeSet, - Optional: true, - Description: `The list of APIs usable within the Service Perimeter. -Must be empty unless 'enableRestriction' is True.`, - Elem: &resource_access_context_manager_service_perimeters_schema.Schema{ - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - }, - Set: resource_access_context_manager_service_perimeters_schema.HashString, - }, - "enable_restriction": { - Type: resource_access_context_manager_service_perimeters_schema.TypeBool, - Optional: true, - Description: `Whether to restrict API calls within the Service Perimeter to the -list of APIs specified in 'allowedServices'.`, - }, - }, - }, - }, - }, - }, - }, - "use_explicit_dry_run_spec": { - Type: resource_access_context_manager_service_perimeters_schema.TypeBool, - Optional: true, - Description: `Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists -for all Service Perimeters, and that spec is identical to the status for those -Service Perimeters. When this flag is set, it inhibits the generation of the -implicit spec, thereby allowing the user to explicitly provide a -configuration ("spec") to use in a dry-run version of the Service Perimeter. -This allows the user to test changes to the enforced config ("status") without -actually enforcing them. This testing is done through analyzing the differences -between currently enforced and suggested restrictions. useExplicitDryRunSpec must -bet set to True if any of the fields in the spec are set to non-default values.`, - }, - "create_time": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was created in UTC.`, - }, - "update_time": { - Type: resource_access_context_manager_service_perimeters_schema.TypeString, - Computed: true, - Description: `Time the AccessPolicy was updated in UTC.`, - }, - }, - } -} - -func resourceAccessContextManagerServicePerimetersCreate(d *resource_access_context_manager_service_perimeters_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - servicePerimetersProp, err := expandAccessContextManagerServicePerimetersServicePerimeters(d.Get("service_perimeters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_perimeters"); !isEmptyValue(resource_access_context_manager_service_perimeters_reflect.ValueOf(servicePerimetersProp)) && (ok || !resource_access_context_manager_service_perimeters_reflect.DeepEqual(v, servicePerimetersProp)) { - obj["servicePerimeters"] = servicePerimetersProp - } - parentProp, err := expandAccessContextManagerServicePerimetersParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_access_context_manager_service_perimeters_reflect.ValueOf(parentProp)) && (ok || !resource_access_context_manager_service_perimeters_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") - if err != nil { - return err - } - - resource_access_context_manager_service_perimeters_log.Printf("[DEBUG] Creating new ServicePerimeters: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeters_schema.TimeoutCreate)) - if err != nil { - return resource_access_context_manager_service_perimeters_fmt.Errorf("Error creating ServicePerimeters: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/servicePerimeters") - if err != nil { - return resource_access_context_manager_service_perimeters_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = accessContextManagerOperationWaitTime( - config, res, "Creating ServicePerimeters", userAgent, - d.Timeout(resource_access_context_manager_service_perimeters_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_access_context_manager_service_perimeters_fmt.Errorf("Error waiting to create ServicePerimeters: %s", err) - } - - resource_access_context_manager_service_perimeters_log.Printf("[DEBUG] Finished creating ServicePerimeters %q: %#v", d.Id(), res) - - return resourceAccessContextManagerServicePerimetersRead(d, meta) -} - -func resourceAccessContextManagerServicePerimetersRead(d *resource_access_context_manager_service_perimeters_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_access_context_manager_service_perimeters_fmt.Sprintf("AccessContextManagerServicePerimeters %q", d.Id())) - } - - if err := d.Set("service_perimeters", flattenAccessContextManagerServicePerimetersServicePerimeters(res["servicePerimeters"], d, config)); err != nil { - return resource_access_context_manager_service_perimeters_fmt.Errorf("Error reading ServicePerimeters: %s", err) - } - - return nil -} - -func resourceAccessContextManagerServicePerimetersUpdate(d *resource_access_context_manager_service_perimeters_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - servicePerimetersProp, err := expandAccessContextManagerServicePerimetersServicePerimeters(d.Get("service_perimeters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_perimeters"); !isEmptyValue(resource_access_context_manager_service_perimeters_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_service_perimeters_reflect.DeepEqual(v, servicePerimetersProp)) { - obj["servicePerimeters"] = servicePerimetersProp - } - parentProp, err := expandAccessContextManagerServicePerimetersParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_access_context_manager_service_perimeters_reflect.ValueOf(v)) && (ok || !resource_access_context_manager_service_perimeters_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") - if err != nil { - return err - } - - resource_access_context_manager_service_perimeters_log.Printf("[DEBUG] Updating ServicePerimeters %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeters_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_service_perimeters_fmt.Errorf("Error updating ServicePerimeters %q: %s", d.Id(), err) - } else { - resource_access_context_manager_service_perimeters_log.Printf("[DEBUG] Finished updating ServicePerimeters %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating ServicePerimeters", userAgent, - d.Timeout(resource_access_context_manager_service_perimeters_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAccessContextManagerServicePerimetersRead(d, meta) -} - -func resourceAccessContextManagerServicePerimetersDelete(d *resource_access_context_manager_service_perimeters_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["servicePerimeters"] = []string{} - - url, err := replaceVars(d, config, "{{AccessContextManagerBasePath}}{{parent}}/servicePerimeters:replaceAll") - if err != nil { - return err - } - - resource_access_context_manager_service_perimeters_log.Printf("[DEBUG] Deleting servicePerimeters %q: %#v", d.Id(), obj) - res, err := sendRequestWithTimeout(config, "POST", "", url, userAgent, obj, d.Timeout(resource_access_context_manager_service_perimeters_schema.TimeoutUpdate)) - - if err != nil { - return resource_access_context_manager_service_perimeters_fmt.Errorf("Error deleting ServicePerimeters %q: %s", d.Id(), err) - } else { - resource_access_context_manager_service_perimeters_log.Printf("[DEBUG] Finished deleting ServicePerimeters %q: %#v", d.Id(), res) - } - - err = accessContextManagerOperationWaitTime( - config, res, "Updating ServicePerimeters", userAgent, - d.Timeout(resource_access_context_manager_service_perimeters_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return nil -} - -func resourceAccessContextManagerServicePerimetersImport(d *resource_access_context_manager_service_perimeters_schema.ResourceData, meta interface{}) ([]*resource_access_context_manager_service_perimeters_schema.ResourceData, error) { - config := meta.(*Config) - - parts, err := getImportIdQualifiers([]string{"accessPolicies/(?P[^/]+)/(.+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("parent", resource_access_context_manager_service_perimeters_fmt.Sprintf("accessPolicies/%s", parts["accessPolicy"])); err != nil { - return nil, resource_access_context_manager_service_perimeters_fmt.Errorf("Error setting parent: %s", err) - } - return []*resource_access_context_manager_service_perimeters_schema.ResourceData{d}, nil -} - -func flattenAccessContextManagerServicePerimetersServicePerimeters(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_access_context_manager_service_perimeters_schema.NewSet(resource_access_context_manager_service_perimeters_schema.HashResource(accesscontextmanagerServicePerimetersServicePerimetersSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "name": flattenAccessContextManagerServicePerimetersServicePerimetersName(original["name"], d, config), - "title": flattenAccessContextManagerServicePerimetersServicePerimetersTitle(original["title"], d, config), - "description": flattenAccessContextManagerServicePerimetersServicePerimetersDescription(original["description"], d, config), - "create_time": flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(original["createTime"], d, config), - "update_time": flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(original["updateTime"], d, config), - "perimeter_type": flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(original["perimeterType"], d, config), - "status": flattenAccessContextManagerServicePerimetersServicePerimetersStatus(original["status"], d, config), - "spec": flattenAccessContextManagerServicePerimetersServicePerimetersSpec(original["spec"], d, config), - "use_explicit_dry_run_spec": flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(original["useExplicitDryRunSpec"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_access_context_manager_service_perimeters_reflect.ValueOf(v)) { - return "PERIMETER_TYPE_REGULAR" - } - - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(original["resources"], d, config) - transformed["access_levels"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(original["accessLevels"], d, config) - transformed["restricted_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(original["restrictedServices"], d, config) - transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(original["vpcAccessibleServices"], d, config) - transformed["ingress_policies"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(original["ingressPolicies"], d, config) - transformed["egress_policies"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(original["egressPolicies"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_access_context_manager_service_perimeters_schema.NewSet(resource_access_context_manager_service_perimeters_schema.HashString, v.([]interface{})) -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) - transformed["allowed_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_access_context_manager_service_perimeters_schema.NewSet(resource_access_context_manager_service_perimeters_schema.HashString, v.([]interface{})) -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ingress_from": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(original["ingressFrom"], d, config), - "ingress_to": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(original["ingressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) - transformed["sources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(original["sources"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), - "resource": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "egress_from": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(original["egressFrom"], d, config), - "egress_to": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(original["egressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(original["resources"], d, config) - transformed["access_levels"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(original["accessLevels"], d, config) - transformed["restricted_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(original["restrictedServices"], d, config) - transformed["vpc_accessible_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(original["vpcAccessibleServices"], d, config) - transformed["ingress_policies"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(original["ingressPolicies"], d, config) - transformed["egress_policies"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(original["egressPolicies"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_restriction"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) - transformed["allowed_services"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(original["allowedServices"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ingress_from": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(original["ingressFrom"], d, config), - "ingress_to": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(original["ingressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) - transformed["sources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(original["sources"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), - "resource": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "egress_from": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(original["egressFrom"], d, config), - "egress_to": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(original["egressTo"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["identity_type"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) - transformed["identities"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resources"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(original["resources"], d, config) - transformed["operations"] = - flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(original["operations"], d, config) - return []interface{}{transformed} -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), - "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "method": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), - "permission": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), - }) - } - return transformed -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d *resource_access_context_manager_service_perimeters_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessContextManagerServicePerimetersServicePerimeters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_access_context_manager_service_perimeters_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAccessContextManagerServicePerimetersServicePerimetersName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedTitle, err := expandAccessContextManagerServicePerimetersServicePerimetersTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandAccessContextManagerServicePerimetersServicePerimetersDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedCreateTime, err := expandAccessContextManagerServicePerimetersServicePerimetersCreateTime(original["create_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedCreateTime); val.IsValid() && !isEmptyValue(val) { - transformed["createTime"] = transformedCreateTime - } - - transformedUpdateTime, err := expandAccessContextManagerServicePerimetersServicePerimetersUpdateTime(original["update_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedUpdateTime); val.IsValid() && !isEmptyValue(val) { - transformed["updateTime"] = transformedUpdateTime - } - - transformedPerimeterType, err := expandAccessContextManagerServicePerimetersServicePerimetersPerimeterType(original["perimeter_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedPerimeterType); val.IsValid() && !isEmptyValue(val) { - transformed["perimeterType"] = transformedPerimeterType - } - - transformedStatus, err := expandAccessContextManagerServicePerimetersServicePerimetersStatus(original["status"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedStatus); val.IsValid() && !isEmptyValue(val) { - transformed["status"] = transformedStatus - } - - transformedSpec, err := expandAccessContextManagerServicePerimetersServicePerimetersSpec(original["spec"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedSpec); val.IsValid() && !isEmptyValue(val) { - transformed["spec"] = transformedSpec - } - - transformedUseExplicitDryRunSpec, err := expandAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(original["use_explicit_dry_run_spec"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedUseExplicitDryRunSpec); val.IsValid() && !isEmptyValue(val) { - transformed["useExplicitDryRunSpec"] = transformedUseExplicitDryRunSpec - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedAccessLevels, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(original["access_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevels"] = transformedAccessLevels - } - - transformedRestrictedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(original["restricted_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { - transformed["restrictedServices"] = transformedRestrictedServices - } - - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(original["vpc_accessible_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices - } - - transformedIngressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(original["ingress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["ingressPolicies"] = transformedIngressPolicies - } - - transformedEgressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(original["egress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["egressPolicies"] = transformedEgressPolicies - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_access_context_manager_service_perimeters_schema.Set).List() - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableRestriction, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { - transformed["enableRestriction"] = transformedEnableRestriction - } - - transformedAllowedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { - transformed["allowedServices"] = transformedAllowedServices - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_access_context_manager_service_perimeters_schema.Set).List() - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIngressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(original["ingress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["ingressFrom"] = transformedIngressFrom - } - - transformedIngressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(original["ingress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { - transformed["ingressTo"] = transformedIngressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - transformedSources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(original["sources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { - transformed["sources"] = transformedSources - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAccessLevel, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevel"] = transformedAccessLevel - } - - transformedResource, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { - transformed["resource"] = transformedResource - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEgressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(original["egress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["egressFrom"] = transformedEgressFrom - } - - transformedEgressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(original["egress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { - transformed["egressTo"] = transformedEgressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedAccessLevels, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(original["access_levels"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedAccessLevels); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevels"] = transformedAccessLevels - } - - transformedRestrictedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(original["restricted_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedRestrictedServices); val.IsValid() && !isEmptyValue(val) { - transformed["restrictedServices"] = transformedRestrictedServices - } - - transformedVPCAccessibleServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(original["vpc_accessible_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedVPCAccessibleServices); val.IsValid() && !isEmptyValue(val) { - transformed["vpcAccessibleServices"] = transformedVPCAccessibleServices - } - - transformedIngressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(original["ingress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIngressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["ingressPolicies"] = transformedIngressPolicies - } - - transformedEgressPolicies, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(original["egress_policies"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEgressPolicies); val.IsValid() && !isEmptyValue(val) { - transformed["egressPolicies"] = transformedEgressPolicies - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableRestriction, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(original["enable_restriction"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEnableRestriction); val.IsValid() && !isEmptyValue(val) { - transformed["enableRestriction"] = transformedEnableRestriction - } - - transformedAllowedServices, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(original["allowed_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedAllowedServices); val.IsValid() && !isEmptyValue(val) { - transformed["allowedServices"] = transformedAllowedServices - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesEnableRestriction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecVPCAccessibleServicesAllowedServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIngressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(original["ingress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIngressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["ingressFrom"] = transformedIngressFrom - } - - transformedIngressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(original["ingress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIngressTo); val.IsValid() && !isEmptyValue(val) { - transformed["ingressTo"] = transformedIngressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - transformedSources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(original["sources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedSources); val.IsValid() && !isEmptyValue(val) { - transformed["sources"] = transformedSources - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAccessLevel, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(original["access_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedAccessLevel); val.IsValid() && !isEmptyValue(val) { - transformed["accessLevel"] = transformedAccessLevel - } - - transformedResource, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResource); val.IsValid() && !isEmptyValue(val) { - transformed["resource"] = transformedResource - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEgressFrom, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(original["egress_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEgressFrom); val.IsValid() && !isEmptyValue(val) { - transformed["egressFrom"] = transformedEgressFrom - } - - transformedEgressTo, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(original["egress_to"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedEgressTo); val.IsValid() && !isEmptyValue(val) { - transformed["egressTo"] = transformedEgressTo - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdentityType, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(original["identity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentityType); val.IsValid() && !isEmptyValue(val) { - transformed["identityType"] = transformedIdentityType - } - - transformedIdentities, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedIdentities); val.IsValid() && !isEmptyValue(val) { - transformed["identities"] = transformedIdentities - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResources, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedOperations, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(original["operations"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedOperations); val.IsValid() && !isEmptyValue(val) { - transformed["operations"] = transformedOperations - } - - return transformed, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceName, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(original["service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceName"] = transformedServiceName - } - - transformedMethodSelectors, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(original["method_selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethodSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["methodSelectors"] = transformedMethodSelectors - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedPermission, err := expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config) - if err != nil { - return nil, err - } else if val := resource_access_context_manager_service_perimeters_reflect.ValueOf(transformedPermission); val.IsValid() && !isEmptyValue(val) { - transformed["permission"] = transformedPermission - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessContextManagerServicePerimetersParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceActiveDirectoryDomain() *resource_active_directory_domain_schema.Resource { - return &resource_active_directory_domain_schema.Resource{ - Create: resourceActiveDirectoryDomainCreate, - Read: resourceActiveDirectoryDomainRead, - Update: resourceActiveDirectoryDomainUpdate, - Delete: resourceActiveDirectoryDomainDelete, - - Importer: &resource_active_directory_domain_schema.ResourceImporter{ - State: resourceActiveDirectoryDomainImport, - }, - - Timeouts: &resource_active_directory_domain_schema.ResourceTimeout{ - Create: resource_active_directory_domain_schema.DefaultTimeout(60 * resource_active_directory_domain_time.Minute), - Update: resource_active_directory_domain_schema.DefaultTimeout(60 * resource_active_directory_domain_time.Minute), - Delete: resource_active_directory_domain_schema.DefaultTimeout(60 * resource_active_directory_domain_time.Minute), - }, - - Schema: map[string]*resource_active_directory_domain_schema.Schema{ - "domain_name": { - Type: resource_active_directory_domain_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateADDomainName(), - Description: `The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, -https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains.`, - }, - "locations": { - Type: resource_active_directory_domain_schema.TypeList, - Required: true, - Description: `Locations where domain needs to be provisioned. [regions][compute/docs/regions-zones/] -e.g. us-west1 or us-east4 Service supports up to 4 locations at once. Each location will use a /26 block.`, - Elem: &resource_active_directory_domain_schema.Schema{ - Type: resource_active_directory_domain_schema.TypeString, - }, - }, - "reserved_ip_range": { - Type: resource_active_directory_domain_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The CIDR range of internal addresses that are reserved for this domain. Reserved networks must be /24 or larger. -Ranges must be unique and non-overlapping with existing subnets in authorizedNetworks`, - }, - "admin": { - Type: resource_active_directory_domain_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of delegated administrator account used to perform Active Directory operations. -If not specified, setupadmin will be used.`, - Default: "setupadmin", - }, - "authorized_networks": { - Type: resource_active_directory_domain_schema.TypeSet, - Optional: true, - Description: `The full names of the Google Compute Engine networks the domain instance is connected to. The domain is only available on networks listed in authorizedNetworks. -If CIDR subnets overlap between networks, domain creation will fail.`, - Elem: &resource_active_directory_domain_schema.Schema{ - Type: resource_active_directory_domain_schema.TypeString, - }, - Set: resource_active_directory_domain_schema.HashString, - }, - "labels": { - Type: resource_active_directory_domain_schema.TypeMap, - Optional: true, - Description: `Resource labels that can contain user-provided metadata`, - Elem: &resource_active_directory_domain_schema.Schema{Type: resource_active_directory_domain_schema.TypeString}, - }, - "fqdn": { - Type: resource_active_directory_domain_schema.TypeString, - Computed: true, - Description: `The fully-qualified domain name of the exposed domain used by clients to connect to the service. -Similar to what would be chosen for an Active Directory set up on an internal network.`, - }, - "name": { - Type: resource_active_directory_domain_schema.TypeString, - Computed: true, - Description: `The unique name of the domain using the format: 'projects/{project}/locations/global/domains/{domainName}'.`, - }, - "project": { - Type: resource_active_directory_domain_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceActiveDirectoryDomainCreate(d *resource_active_directory_domain_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandActiveDirectoryDomainLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(labelsProp)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - authorizedNetworksProp, err := expandActiveDirectoryDomainAuthorizedNetworks(d.Get("authorized_networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_networks"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(authorizedNetworksProp)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, authorizedNetworksProp)) { - obj["authorizedNetworks"] = authorizedNetworksProp - } - reservedIpRangeProp, err := expandActiveDirectoryDomainReservedIpRange(d.Get("reserved_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reserved_ip_range"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(reservedIpRangeProp)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, reservedIpRangeProp)) { - obj["reservedIpRange"] = reservedIpRangeProp - } - locationsProp, err := expandActiveDirectoryDomainLocations(d.Get("locations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locations"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(locationsProp)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, locationsProp)) { - obj["locations"] = locationsProp - } - adminProp, err := expandActiveDirectoryDomainAdmin(d.Get("admin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admin"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(adminProp)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, adminProp)) { - obj["admin"] = adminProp - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains?domainName={{domain_name}}") - if err != nil { - return err - } - - resource_active_directory_domain_log.Printf("[DEBUG] Creating new Domain: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_active_directory_domain_schema.TimeoutCreate)) - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error creating Domain: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = activeDirectoryOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Domain", userAgent, - d.Timeout(resource_active_directory_domain_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_active_directory_domain_fmt.Errorf("Error waiting to create Domain: %s", err) - } - - if err := d.Set("name", flattenActiveDirectoryDomainName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_active_directory_domain_log.Printf("[DEBUG] Finished creating Domain %q: %#v", d.Id(), res) - - return resourceActiveDirectoryDomainRead(d, meta) -} - -func resourceActiveDirectoryDomainRead(d *resource_active_directory_domain_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_active_directory_domain_fmt.Sprintf("ActiveDirectoryDomain %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - - if err := d.Set("name", flattenActiveDirectoryDomainName(res["name"], d, config)); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("labels", flattenActiveDirectoryDomainLabels(res["labels"], d, config)); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("authorized_networks", flattenActiveDirectoryDomainAuthorizedNetworks(res["authorizedNetworks"], d, config)); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("reserved_ip_range", flattenActiveDirectoryDomainReservedIpRange(res["reservedIpRange"], d, config)); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("locations", flattenActiveDirectoryDomainLocations(res["locations"], d, config)); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("admin", flattenActiveDirectoryDomainAdmin(res["admin"], d, config)); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - if err := d.Set("fqdn", flattenActiveDirectoryDomainFqdn(res["fqdn"], d, config)); err != nil { - return resource_active_directory_domain_fmt.Errorf("Error reading Domain: %s", err) - } - - return nil -} - -func resourceActiveDirectoryDomainUpdate(d *resource_active_directory_domain_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandActiveDirectoryDomainLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - authorizedNetworksProp, err := expandActiveDirectoryDomainAuthorizedNetworks(d.Get("authorized_networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_networks"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, authorizedNetworksProp)) { - obj["authorizedNetworks"] = authorizedNetworksProp - } - locationsProp, err := expandActiveDirectoryDomainLocations(d.Get("locations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locations"); !isEmptyValue(resource_active_directory_domain_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_reflect.DeepEqual(v, locationsProp)) { - obj["locations"] = locationsProp - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}{{name}}") - if err != nil { - return err - } - - resource_active_directory_domain_log.Printf("[DEBUG] Updating Domain %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("authorized_networks") { - updateMask = append(updateMask, "authorizedNetworks") - } - - if d.HasChange("locations") { - updateMask = append(updateMask, "locations") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_active_directory_domain_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_active_directory_domain_schema.TimeoutUpdate)) - - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error updating Domain %q: %s", d.Id(), err) - } else { - resource_active_directory_domain_log.Printf("[DEBUG] Finished updating Domain %q: %#v", d.Id(), res) - } - - err = activeDirectoryOperationWaitTime( - config, res, project, "Updating Domain", userAgent, - d.Timeout(resource_active_directory_domain_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceActiveDirectoryDomainRead(d, meta) -} - -func resourceActiveDirectoryDomainDelete(d *resource_active_directory_domain_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_active_directory_domain_fmt.Errorf("Error fetching project for Domain: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain_name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_active_directory_domain_log.Printf("[DEBUG] Deleting Domain %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_active_directory_domain_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Domain") - } - - err = activeDirectoryOperationWaitTime( - config, res, project, "Deleting Domain", userAgent, - d.Timeout(resource_active_directory_domain_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_active_directory_domain_log.Printf("[DEBUG] Finished deleting Domain %q: %#v", d.Id(), res) - return nil -} - -func resourceActiveDirectoryDomainImport(d *resource_active_directory_domain_schema.ResourceData, meta interface{}) ([]*resource_active_directory_domain_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_active_directory_domain_schema.ResourceData{d}, nil -} - -func flattenActiveDirectoryDomainName(v interface{}, d *resource_active_directory_domain_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainLabels(v interface{}, d *resource_active_directory_domain_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainAuthorizedNetworks(v interface{}, d *resource_active_directory_domain_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_active_directory_domain_schema.NewSet(resource_active_directory_domain_schema.HashString, v.([]interface{})) -} - -func flattenActiveDirectoryDomainReservedIpRange(v interface{}, d *resource_active_directory_domain_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainLocations(v interface{}, d *resource_active_directory_domain_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainAdmin(v interface{}, d *resource_active_directory_domain_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenActiveDirectoryDomainFqdn(v interface{}, d *resource_active_directory_domain_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandActiveDirectoryDomainLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandActiveDirectoryDomainAuthorizedNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_active_directory_domain_schema.Set).List() - return v, nil -} - -func expandActiveDirectoryDomainReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandActiveDirectoryDomainLocations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandActiveDirectoryDomainAdmin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceActiveDirectoryDomainTrust() *resource_active_directory_domain_trust_schema.Resource { - return &resource_active_directory_domain_trust_schema.Resource{ - Create: resourceActiveDirectoryDomainTrustCreate, - Read: resourceActiveDirectoryDomainTrustRead, - Update: resourceActiveDirectoryDomainTrustUpdate, - Delete: resourceActiveDirectoryDomainTrustDelete, - - Importer: &resource_active_directory_domain_trust_schema.ResourceImporter{ - State: resourceActiveDirectoryDomainTrustImport, - }, - - Timeouts: &resource_active_directory_domain_trust_schema.ResourceTimeout{ - Create: resource_active_directory_domain_trust_schema.DefaultTimeout(10 * resource_active_directory_domain_trust_time.Minute), - Update: resource_active_directory_domain_trust_schema.DefaultTimeout(10 * resource_active_directory_domain_trust_time.Minute), - Delete: resource_active_directory_domain_trust_schema.DefaultTimeout(10 * resource_active_directory_domain_trust_time.Minute), - }, - - Schema: map[string]*resource_active_directory_domain_trust_schema.Schema{ - "domain": { - Type: resource_active_directory_domain_trust_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, -https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains.`, - }, - "target_dns_ip_addresses": { - Type: resource_active_directory_domain_trust_schema.TypeSet, - Required: true, - Description: `The target DNS server IP addresses which can resolve the remote domain involved in the trust.`, - Elem: &resource_active_directory_domain_trust_schema.Schema{ - Type: resource_active_directory_domain_trust_schema.TypeString, - }, - Set: resource_active_directory_domain_trust_schema.HashString, - }, - "target_domain_name": { - Type: resource_active_directory_domain_trust_schema.TypeString, - Required: true, - Description: `The fully qualified target domain name which will be in trust with the current domain.`, - }, - "trust_direction": { - Type: resource_active_directory_domain_trust_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_active_directory_domain_trust_validation.StringInSlice([]string{"INBOUND", "OUTBOUND", "BIDIRECTIONAL"}, false), - Description: `The trust direction, which decides if the current domain is trusted, trusting, or both. Possible values: ["INBOUND", "OUTBOUND", "BIDIRECTIONAL"]`, - }, - "trust_handshake_secret": { - Type: resource_active_directory_domain_trust_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The trust secret used for the handshake with the target domain. This will not be stored.`, - Sensitive: true, - }, - "trust_type": { - Type: resource_active_directory_domain_trust_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_active_directory_domain_trust_validation.StringInSlice([]string{"FOREST", "EXTERNAL"}, false), - Description: `The type of trust represented by the trust resource. Possible values: ["FOREST", "EXTERNAL"]`, - }, - "selective_authentication": { - Type: resource_active_directory_domain_trust_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the trusted side has forest/domain wide access or selective access to an approved set of resources.`, - }, - "project": { - Type: resource_active_directory_domain_trust_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceActiveDirectoryDomainTrustCreate(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_domain_name"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(targetDomainNameProp)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, targetDomainNameProp)) { - obj["targetDomainName"] = targetDomainNameProp - } - trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_type"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(trustTypeProp)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustTypeProp)) { - obj["trustType"] = trustTypeProp - } - trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_direction"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(trustDirectionProp)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustDirectionProp)) { - obj["trustDirection"] = trustDirectionProp - } - selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selective_authentication"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(selectiveAuthenticationProp)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, selectiveAuthenticationProp)) { - obj["selectiveAuthentication"] = selectiveAuthenticationProp - } - targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(targetDnsIpAddressesProp)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, targetDnsIpAddressesProp)) { - obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp - } - trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_handshake_secret"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(trustHandshakeSecretProp)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustHandshakeSecretProp)) { - obj["trustHandshakeSecret"] = trustHandshakeSecretProp - } - - obj, err = resourceActiveDirectoryDomainTrustEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:attachTrust") - if err != nil { - return err - } - - resource_active_directory_domain_trust_log.Printf("[DEBUG] Creating new DomainTrust: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error fetching project for DomainTrust: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_active_directory_domain_trust_schema.TimeoutCreate)) - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error creating DomainTrust: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = activeDirectoryOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating DomainTrust", userAgent, - d.Timeout(resource_active_directory_domain_trust_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_active_directory_domain_trust_fmt.Errorf("Error waiting to create DomainTrust: %s", err) - } - - opRes, err = resourceActiveDirectoryDomainTrustDecoder(d, meta, opRes) - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error decoding response from operation, could not find object") - } - - if _, ok := opRes["trusts"]; ok { - opRes, err = flattenNestedActiveDirectoryDomainTrust(d, meta, opRes) - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error getting nested object from operation response: %s", err) - } - if opRes == nil { - - return resource_active_directory_domain_trust_fmt.Errorf("Error decoding response from operation, could not find nested object") - } - } - if err := d.Set("target_domain_name", flattenNestedActiveDirectoryDomainTrustTargetDomainName(opRes["targetDomainName"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_active_directory_domain_trust_log.Printf("[DEBUG] Finished creating DomainTrust %q: %#v", d.Id(), res) - - return resourceActiveDirectoryDomainTrustRead(d, meta) -} - -func resourceActiveDirectoryDomainTrustRead(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error fetching project for DomainTrust: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_active_directory_domain_trust_fmt.Sprintf("ActiveDirectoryDomainTrust %q", d.Id())) - } - - res, err = flattenNestedActiveDirectoryDomainTrust(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_active_directory_domain_trust_log.Printf("[DEBUG] Removing ActiveDirectoryDomainTrust because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceActiveDirectoryDomainTrustDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_active_directory_domain_trust_log.Printf("[DEBUG] Removing ActiveDirectoryDomainTrust because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error reading DomainTrust: %s", err) - } - - if err := d.Set("target_domain_name", flattenNestedActiveDirectoryDomainTrustTargetDomainName(res["targetDomainName"], d, config)); err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("trust_type", flattenNestedActiveDirectoryDomainTrustTrustType(res["trustType"], d, config)); err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("trust_direction", flattenNestedActiveDirectoryDomainTrustTrustDirection(res["trustDirection"], d, config)); err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("selective_authentication", flattenNestedActiveDirectoryDomainTrustSelectiveAuthentication(res["selectiveAuthentication"], d, config)); err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error reading DomainTrust: %s", err) - } - if err := d.Set("target_dns_ip_addresses", flattenNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(res["targetDnsIpAddresses"], d, config)); err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error reading DomainTrust: %s", err) - } - - return nil -} - -func resourceActiveDirectoryDomainTrustUpdate(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error fetching project for DomainTrust: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_domain_name"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, targetDomainNameProp)) { - obj["targetDomainName"] = targetDomainNameProp - } - trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_type"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustTypeProp)) { - obj["trustType"] = trustTypeProp - } - trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_direction"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustDirectionProp)) { - obj["trustDirection"] = trustDirectionProp - } - selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selective_authentication"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, selectiveAuthenticationProp)) { - obj["selectiveAuthentication"] = selectiveAuthenticationProp - } - targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, targetDnsIpAddressesProp)) { - obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp - } - trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_handshake_secret"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustHandshakeSecretProp)) { - obj["trustHandshakeSecret"] = trustHandshakeSecretProp - } - - obj, err = resourceActiveDirectoryDomainTrustUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:reconfigureTrust") - if err != nil { - return err - } - - resource_active_directory_domain_trust_log.Printf("[DEBUG] Updating DomainTrust %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_active_directory_domain_trust_schema.TimeoutUpdate)) - - if err != nil { - return resource_active_directory_domain_trust_fmt.Errorf("Error updating DomainTrust %q: %s", d.Id(), err) - } else { - resource_active_directory_domain_trust_log.Printf("[DEBUG] Finished updating DomainTrust %q: %#v", d.Id(), res) - } - - err = activeDirectoryOperationWaitTime( - config, res, project, "Updating DomainTrust", userAgent, - d.Timeout(resource_active_directory_domain_trust_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceActiveDirectoryDomainTrustRead(d, meta) -} - -func resourceActiveDirectoryDomainTrustDelete(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ActiveDirectoryBasePath}}projects/{{project}}/locations/global/domains/{{domain}}:detachTrust") - if err != nil { - return err - } - - obj := make(map[string]interface{}) - targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_domain_name"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, targetDomainNameProp)) { - obj["targetDomainName"] = targetDomainNameProp - } - trustTypeProp, err := expandNestedActiveDirectoryDomainTrustTrustType(d.Get("trust_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_type"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustTypeProp)) { - obj["trustType"] = trustTypeProp - } - trustDirectionProp, err := expandNestedActiveDirectoryDomainTrustTrustDirection(d.Get("trust_direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_direction"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustDirectionProp)) { - obj["trustDirection"] = trustDirectionProp - } - selectiveAuthenticationProp, err := expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(d.Get("selective_authentication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selective_authentication"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, selectiveAuthenticationProp)) { - obj["selectiveAuthentication"] = selectiveAuthenticationProp - } - targetDnsIpAddressesProp, err := expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(d.Get("target_dns_ip_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_dns_ip_addresses"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, targetDnsIpAddressesProp)) { - obj["targetDnsIpAddresses"] = targetDnsIpAddressesProp - } - trustHandshakeSecretProp, err := expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(d.Get("trust_handshake_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trust_handshake_secret"); !isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(v)) && (ok || !resource_active_directory_domain_trust_reflect.DeepEqual(v, trustHandshakeSecretProp)) { - obj["trustHandshakeSecret"] = trustHandshakeSecretProp - } - - obj, err = resourceActiveDirectoryDomainTrustEncoder(d, meta, obj) - if err != nil { - return err - } - - resource_active_directory_domain_trust_log.Printf("[DEBUG] Deleting DomainTrust %q", d.Id()) - - res, err := sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_active_directory_domain_trust_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DomainTrust") - } - - err = activeDirectoryOperationWaitTime( - config, res, project, "Deleting DomainTrust", userAgent, - d.Timeout(resource_active_directory_domain_trust_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_active_directory_domain_trust_log.Printf("[DEBUG] Finished deleting DomainTrust %q: %#v", d.Id(), res) - return nil -} - -func resourceActiveDirectoryDomainTrustImport(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}) ([]*resource_active_directory_domain_trust_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/domains/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}") - if err != nil { - return nil, resource_active_directory_domain_trust_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_active_directory_domain_trust_schema.ResourceData{d}, nil -} - -func flattenNestedActiveDirectoryDomainTrustTargetDomainName(v interface{}, d *resource_active_directory_domain_trust_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustTrustType(v interface{}, d *resource_active_directory_domain_trust_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustTrustDirection(v interface{}, d *resource_active_directory_domain_trust_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustSelectiveAuthentication(v interface{}, d *resource_active_directory_domain_trust_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(v interface{}, d *resource_active_directory_domain_trust_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_active_directory_domain_trust_schema.NewSet(resource_active_directory_domain_trust_schema.HashString, v.([]interface{})) -} - -func expandNestedActiveDirectoryDomainTrustTargetDomainName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTrustType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTrustDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustSelectiveAuthentication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTargetDnsIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_active_directory_domain_trust_schema.Set).List() - return v, nil -} - -func expandNestedActiveDirectoryDomainTrustTrustHandshakeSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceActiveDirectoryDomainTrustEncoder(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - wrappedReq := map[string]interface{}{ - "trust": obj, - } - return wrappedReq, nil -} - -func resourceActiveDirectoryDomainTrustUpdateEncoder(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - wrappedReq := map[string]interface{}{ - "targetDomainName": obj["targetDomainName"], - "targetDnsIpAddresses": obj["targetDnsIpAddresses"], - } - return wrappedReq, nil -} - -func flattenNestedActiveDirectoryDomainTrust(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["trusts"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_active_directory_domain_trust_fmt.Errorf("expected list or map for value trusts. Actual value: %v", v) - } - - _, item, err := resourceActiveDirectoryDomainTrustFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceActiveDirectoryDomainTrustFindNestedObjectInList(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedTargetDomainName, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedTargetDomainName := flattenNestedActiveDirectoryDomainTrustTargetDomainName(expectedTargetDomainName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - item, err := resourceActiveDirectoryDomainTrustDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemTargetDomainName := flattenNestedActiveDirectoryDomainTrustTargetDomainName(item["targetDomainName"], d, meta.(*Config)) - - if !(isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(itemTargetDomainName)) && isEmptyValue(resource_active_directory_domain_trust_reflect.ValueOf(expectedFlattenedTargetDomainName))) && !resource_active_directory_domain_trust_reflect.DeepEqual(itemTargetDomainName, expectedFlattenedTargetDomainName) { - resource_active_directory_domain_trust_log.Printf("[DEBUG] Skipping item with targetDomainName= %#v, looking for %#v)", itemTargetDomainName, expectedFlattenedTargetDomainName) - continue - } - resource_active_directory_domain_trust_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceActiveDirectoryDomainTrustDecoder(d *resource_active_directory_domain_trust_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - v, ok := res["domainTrust"] - if !ok || v == nil { - return res, nil - } - - return v.(map[string]interface{}), nil -} - -func resourceApigeeEnvgroup() *resource_apigee_envgroup_schema.Resource { - return &resource_apigee_envgroup_schema.Resource{ - Create: resourceApigeeEnvgroupCreate, - Read: resourceApigeeEnvgroupRead, - Update: resourceApigeeEnvgroupUpdate, - Delete: resourceApigeeEnvgroupDelete, - - Importer: &resource_apigee_envgroup_schema.ResourceImporter{ - State: resourceApigeeEnvgroupImport, - }, - - Timeouts: &resource_apigee_envgroup_schema.ResourceTimeout{ - Create: resource_apigee_envgroup_schema.DefaultTimeout(30 * resource_apigee_envgroup_time.Minute), - Update: resource_apigee_envgroup_schema.DefaultTimeout(4 * resource_apigee_envgroup_time.Minute), - Delete: resource_apigee_envgroup_schema.DefaultTimeout(30 * resource_apigee_envgroup_time.Minute), - }, - - Schema: map[string]*resource_apigee_envgroup_schema.Schema{ - "name": { - Type: resource_apigee_envgroup_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment group.`, - }, - "org_id": { - Type: resource_apigee_envgroup_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee environment group, -in the format 'organizations/{{org_name}}'.`, - }, - "hostnames": { - Type: resource_apigee_envgroup_schema.TypeList, - Optional: true, - Description: `Hostnames of the environment group.`, - Elem: &resource_apigee_envgroup_schema.Schema{ - Type: resource_apigee_envgroup_schema.TypeString, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvgroupCreate(d *resource_apigee_envgroup_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeEnvgroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_apigee_envgroup_reflect.ValueOf(nameProp)) && (ok || !resource_apigee_envgroup_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - hostnamesProp, err := expandApigeeEnvgroupHostnames(d.Get("hostnames"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("hostnames"); !isEmptyValue(resource_apigee_envgroup_reflect.ValueOf(hostnamesProp)) && (ok || !resource_apigee_envgroup_reflect.DeepEqual(v, hostnamesProp)) { - obj["hostnames"] = hostnamesProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups") - if err != nil { - return err - } - - resource_apigee_envgroup_log.Printf("[DEBUG] Creating new Envgroup: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_envgroup_schema.TimeoutCreate)) - if err != nil { - return resource_apigee_envgroup_fmt.Errorf("Error creating Envgroup: %s", err) - } - - id, err := replaceVars(d, config, "{{org_id}}/envgroups/{{name}}") - if err != nil { - return resource_apigee_envgroup_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = apigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Envgroup", userAgent, - d.Timeout(resource_apigee_envgroup_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_apigee_envgroup_fmt.Errorf("Error waiting to create Envgroup: %s", err) - } - - if err := d.Set("name", flattenApigeeEnvgroupName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{org_id}}/envgroups/{{name}}") - if err != nil { - return resource_apigee_envgroup_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_apigee_envgroup_log.Printf("[DEBUG] Finished creating Envgroup %q: %#v", d.Id(), res) - - return resourceApigeeEnvgroupRead(d, meta) -} - -func resourceApigeeEnvgroupRead(d *resource_apigee_envgroup_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_apigee_envgroup_fmt.Sprintf("ApigeeEnvgroup %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeEnvgroupName(res["name"], d, config)); err != nil { - return resource_apigee_envgroup_fmt.Errorf("Error reading Envgroup: %s", err) - } - if err := d.Set("hostnames", flattenApigeeEnvgroupHostnames(res["hostnames"], d, config)); err != nil { - return resource_apigee_envgroup_fmt.Errorf("Error reading Envgroup: %s", err) - } - - return nil -} - -func resourceApigeeEnvgroupUpdate(d *resource_apigee_envgroup_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - hostnamesProp, err := expandApigeeEnvgroupHostnames(d.Get("hostnames"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("hostnames"); !isEmptyValue(resource_apigee_envgroup_reflect.ValueOf(v)) && (ok || !resource_apigee_envgroup_reflect.DeepEqual(v, hostnamesProp)) { - obj["hostnames"] = hostnamesProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") - if err != nil { - return err - } - - resource_apigee_envgroup_log.Printf("[DEBUG] Updating Envgroup %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("hostnames") { - updateMask = append(updateMask, "hostnames") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_apigee_envgroup_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_envgroup_schema.TimeoutUpdate)) - - if err != nil { - return resource_apigee_envgroup_fmt.Errorf("Error updating Envgroup %q: %s", d.Id(), err) - } else { - resource_apigee_envgroup_log.Printf("[DEBUG] Finished updating Envgroup %q: %#v", d.Id(), res) - } - - err = apigeeOperationWaitTime( - config, res, "Updating Envgroup", userAgent, - d.Timeout(resource_apigee_envgroup_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceApigeeEnvgroupRead(d, meta) -} - -func resourceApigeeEnvgroupDelete(d *resource_apigee_envgroup_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/envgroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_apigee_envgroup_log.Printf("[DEBUG] Deleting Envgroup %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_envgroup_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Envgroup") - } - - err = apigeeOperationWaitTime( - config, res, "Deleting Envgroup", userAgent, - d.Timeout(resource_apigee_envgroup_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_apigee_envgroup_log.Printf("[DEBUG] Finished deleting Envgroup %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvgroupImport(d *resource_apigee_envgroup_schema.ResourceData, meta interface{}) ([]*resource_apigee_envgroup_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := resource_apigee_envgroup_strings.Split(d.Get("name").(string), "/") - if len(nameParts) == 4 { - - orgId := resource_apigee_envgroup_fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, resource_apigee_envgroup_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[3]); err != nil { - return nil, resource_apigee_envgroup_fmt.Errorf("Error setting name: %s", err) - } - } else if len(nameParts) == 3 { - - orgId := resource_apigee_envgroup_fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, resource_apigee_envgroup_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[2]); err != nil { - return nil, resource_apigee_envgroup_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_apigee_envgroup_fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "organizations/{{org_name}}/envgroups/{{name}}", - "organizations/{{org_name}}/{{name}}") - } - - id, err := replaceVars(d, config, "{{org_id}}/envgroups/{{name}}") - if err != nil { - return nil, resource_apigee_envgroup_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_apigee_envgroup_schema.ResourceData{d}, nil -} - -func flattenApigeeEnvgroupName(v interface{}, d *resource_apigee_envgroup_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvgroupHostnames(v interface{}, d *resource_apigee_envgroup_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvgroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvgroupHostnames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceApigeeEnvgroupAttachment() *resource_apigee_envgroup_attachment_schema.Resource { - return &resource_apigee_envgroup_attachment_schema.Resource{ - Create: resourceApigeeEnvgroupAttachmentCreate, - Read: resourceApigeeEnvgroupAttachmentRead, - Delete: resourceApigeeEnvgroupAttachmentDelete, - - Importer: &resource_apigee_envgroup_attachment_schema.ResourceImporter{ - State: resourceApigeeEnvgroupAttachmentImport, - }, - - Timeouts: &resource_apigee_envgroup_attachment_schema.ResourceTimeout{ - Create: resource_apigee_envgroup_attachment_schema.DefaultTimeout(30 * resource_apigee_envgroup_attachment_time.Minute), - Delete: resource_apigee_envgroup_attachment_schema.DefaultTimeout(30 * resource_apigee_envgroup_attachment_time.Minute), - }, - - Schema: map[string]*resource_apigee_envgroup_attachment_schema.Schema{ - "envgroup_id": { - Type: resource_apigee_envgroup_attachment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee environment group associated with the Apigee environment, -in the format 'organizations/{{org_name}}/envgroups/{{envgroup_name}}'.`, - }, - "environment": { - Type: resource_apigee_envgroup_attachment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment.`, - }, - "name": { - Type: resource_apigee_envgroup_attachment_schema.TypeString, - Computed: true, - Description: `The name of the newly created attachment (output parameter).`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvgroupAttachmentCreate(d *resource_apigee_envgroup_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - environmentProp, err := expandApigeeEnvgroupAttachmentEnvironment(d.Get("environment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("environment"); !isEmptyValue(resource_apigee_envgroup_attachment_reflect.ValueOf(environmentProp)) && (ok || !resource_apigee_envgroup_attachment_reflect.DeepEqual(v, environmentProp)) { - obj["environment"] = environmentProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments") - if err != nil { - return err - } - - resource_apigee_envgroup_attachment_log.Printf("[DEBUG] Creating new EnvgroupAttachment: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_envgroup_attachment_schema.TimeoutCreate)) - if err != nil { - return resource_apigee_envgroup_attachment_fmt.Errorf("Error creating EnvgroupAttachment: %s", err) - } - - id, err := replaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return resource_apigee_envgroup_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = apigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating EnvgroupAttachment", userAgent, - d.Timeout(resource_apigee_envgroup_attachment_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_apigee_envgroup_attachment_fmt.Errorf("Error waiting to create EnvgroupAttachment: %s", err) - } - - if err := d.Set("name", flattenApigeeEnvgroupAttachmentName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return resource_apigee_envgroup_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_apigee_envgroup_attachment_log.Printf("[DEBUG] Finished creating EnvgroupAttachment %q: %#v", d.Id(), res) - - return resourceApigeeEnvgroupAttachmentRead(d, meta) -} - -func resourceApigeeEnvgroupAttachmentRead(d *resource_apigee_envgroup_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_apigee_envgroup_attachment_fmt.Sprintf("ApigeeEnvgroupAttachment %q", d.Id())) - } - - if err := d.Set("environment", flattenApigeeEnvgroupAttachmentEnvironment(res["environment"], d, config)); err != nil { - return resource_apigee_envgroup_attachment_fmt.Errorf("Error reading EnvgroupAttachment: %s", err) - } - if err := d.Set("name", flattenApigeeEnvgroupAttachmentName(res["name"], d, config)); err != nil { - return resource_apigee_envgroup_attachment_fmt.Errorf("Error reading EnvgroupAttachment: %s", err) - } - - return nil -} - -func resourceApigeeEnvgroupAttachmentDelete(d *resource_apigee_envgroup_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_apigee_envgroup_attachment_log.Printf("[DEBUG] Deleting EnvgroupAttachment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_envgroup_attachment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EnvgroupAttachment") - } - - err = apigeeOperationWaitTime( - config, res, "Deleting EnvgroupAttachment", userAgent, - d.Timeout(resource_apigee_envgroup_attachment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_apigee_envgroup_attachment_log.Printf("[DEBUG] Finished deleting EnvgroupAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvgroupAttachmentImport(d *resource_apigee_envgroup_attachment_schema.ResourceData, meta interface{}) ([]*resource_apigee_envgroup_attachment_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/attachments/(?P.+)", - "(?P.+)/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{envgroup_id}}/attachments/{{name}}") - if err != nil { - return nil, resource_apigee_envgroup_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_apigee_envgroup_attachment_schema.ResourceData{d}, nil -} - -func flattenApigeeEnvgroupAttachmentEnvironment(v interface{}, d *resource_apigee_envgroup_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvgroupAttachmentName(v interface{}, d *resource_apigee_envgroup_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvgroupAttachmentEnvironment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceApigeeEnvironment() *resource_apigee_environment_schema.Resource { - return &resource_apigee_environment_schema.Resource{ - Create: resourceApigeeEnvironmentCreate, - Read: resourceApigeeEnvironmentRead, - Update: resourceApigeeEnvironmentUpdate, - Delete: resourceApigeeEnvironmentDelete, - - Importer: &resource_apigee_environment_schema.ResourceImporter{ - State: resourceApigeeEnvironmentImport, - }, - - Timeouts: &resource_apigee_environment_schema.ResourceTimeout{ - Create: resource_apigee_environment_schema.DefaultTimeout(30 * resource_apigee_environment_time.Minute), - Update: resource_apigee_environment_schema.DefaultTimeout(4 * resource_apigee_environment_time.Minute), - Delete: resource_apigee_environment_schema.DefaultTimeout(30 * resource_apigee_environment_time.Minute), - }, - - Schema: map[string]*resource_apigee_environment_schema.Schema{ - "name": { - Type: resource_apigee_environment_schema.TypeString, - Required: true, - Description: `The resource ID of the environment.`, - }, - "org_id": { - Type: resource_apigee_environment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee environment, -in the format 'organizations/{{org_name}}'.`, - }, - "description": { - Type: resource_apigee_environment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Description of the environment.`, - }, - "display_name": { - Type: resource_apigee_environment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Display name of the environment.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeEnvironmentCreate(d *resource_apigee_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeEnvironmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_apigee_environment_reflect.ValueOf(nameProp)) && (ok || !resource_apigee_environment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandApigeeEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_apigee_environment_reflect.ValueOf(displayNameProp)) && (ok || !resource_apigee_environment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandApigeeEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_apigee_environment_reflect.ValueOf(descriptionProp)) && (ok || !resource_apigee_environment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments") - if err != nil { - return err - } - - resource_apigee_environment_log.Printf("[DEBUG] Creating new Environment: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_environment_schema.TimeoutCreate)) - if err != nil { - return resource_apigee_environment_fmt.Errorf("Error creating Environment: %s", err) - } - - id, err := replaceVars(d, config, "{{org_id}}/environments/{{name}}") - if err != nil { - return resource_apigee_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = apigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Environment", userAgent, - d.Timeout(resource_apigee_environment_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_apigee_environment_fmt.Errorf("Error waiting to create Environment: %s", err) - } - - if err := d.Set("name", flattenApigeeEnvironmentName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{org_id}}/environments/{{name}}") - if err != nil { - return resource_apigee_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_apigee_environment_log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) - - return resourceApigeeEnvironmentRead(d, meta) -} - -func resourceApigeeEnvironmentRead(d *resource_apigee_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_apigee_environment_fmt.Sprintf("ApigeeEnvironment %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeEnvironmentName(res["name"], d, config)); err != nil { - return resource_apigee_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("display_name", flattenApigeeEnvironmentDisplayName(res["displayName"], d, config)); err != nil { - return resource_apigee_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("description", flattenApigeeEnvironmentDescription(res["description"], d, config)); err != nil { - return resource_apigee_environment_fmt.Errorf("Error reading Environment: %s", err) - } - - return nil -} - -func resourceApigeeEnvironmentUpdate(d *resource_apigee_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeEnvironmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_apigee_environment_reflect.ValueOf(v)) && (ok || !resource_apigee_environment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandApigeeEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_apigee_environment_reflect.ValueOf(v)) && (ok || !resource_apigee_environment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandApigeeEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_apigee_environment_reflect.ValueOf(v)) && (ok || !resource_apigee_environment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") - if err != nil { - return err - } - - resource_apigee_environment_log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_environment_schema.TimeoutUpdate)) - - if err != nil { - return resource_apigee_environment_fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) - } else { - resource_apigee_environment_log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) - } - - err = apigeeOperationWaitTime( - config, res, "Updating Environment", userAgent, - d.Timeout(resource_apigee_environment_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceApigeeEnvironmentRead(d, meta) -} - -func resourceApigeeEnvironmentDelete(d *resource_apigee_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/environments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_apigee_environment_log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_environment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Environment") - } - - err = apigeeOperationWaitTime( - config, res, "Deleting Environment", userAgent, - d.Timeout(resource_apigee_environment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_apigee_environment_log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeEnvironmentImport(d *resource_apigee_environment_schema.ResourceData, meta interface{}) ([]*resource_apigee_environment_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := resource_apigee_environment_strings.Split(d.Get("name").(string), "/") - if len(nameParts) == 4 { - - orgId := resource_apigee_environment_fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, resource_apigee_environment_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[3]); err != nil { - return nil, resource_apigee_environment_fmt.Errorf("Error setting name: %s", err) - } - } else if len(nameParts) == 3 { - - orgId := resource_apigee_environment_fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, resource_apigee_environment_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[2]); err != nil { - return nil, resource_apigee_environment_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_apigee_environment_fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "organizations/{{org_name}}/environments/{{name}}", - "organizations/{{org_name}}/{{name}}") - } - - id, err := replaceVars(d, config, "{{org_id}}/environments/{{name}}") - if err != nil { - return nil, resource_apigee_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_apigee_environment_schema.ResourceData{d}, nil -} - -func flattenApigeeEnvironmentName(v interface{}, d *resource_apigee_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentDisplayName(v interface{}, d *resource_apigee_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeEnvironmentDescription(v interface{}, d *resource_apigee_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeEnvironmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeEnvironmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceApigeeInstance() *resource_apigee_instance_schema.Resource { - return &resource_apigee_instance_schema.Resource{ - Create: resourceApigeeInstanceCreate, - Read: resourceApigeeInstanceRead, - Delete: resourceApigeeInstanceDelete, - - Importer: &resource_apigee_instance_schema.ResourceImporter{ - State: resourceApigeeInstanceImport, - }, - - Timeouts: &resource_apigee_instance_schema.ResourceTimeout{ - Create: resource_apigee_instance_schema.DefaultTimeout(60 * resource_apigee_instance_time.Minute), - Delete: resource_apigee_instance_schema.DefaultTimeout(60 * resource_apigee_instance_time.Minute), - }, - - Schema: map[string]*resource_apigee_instance_schema.Schema{ - "location": { - Type: resource_apigee_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Compute Engine location where the instance resides. For trial organization -subscriptions, the location must be a Compute Engine zone. For paid organization -subscriptions, it should correspond to a Compute Engine region.`, - }, - "name": { - Type: resource_apigee_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource ID of the instance.`, - }, - "org_id": { - Type: resource_apigee_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee Organization associated with the Apigee instance, -in the format 'organizations/{{org_name}}'.`, - }, - "description": { - Type: resource_apigee_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Description of the instance.`, - }, - "disk_encryption_key_name": { - Type: resource_apigee_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Customer Managed Encryption Key (CMEK) used for disk and volume encryption. Required for Apigee paid subscriptions only. -Use the following format: 'projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)'`, - }, - "display_name": { - Type: resource_apigee_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Display name of the instance.`, - }, - "peering_cidr_range": { - Type: resource_apigee_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_apigee_instance_validation.StringInSlice([]string{"SLASH_16", "SLASH_20", "SLASH_22", ""}, false), - Description: `The size of the CIDR block range that will be reserved by the instance. Possible values: ["SLASH_16", "SLASH_20", "SLASH_22"]`, - }, - "host": { - Type: resource_apigee_instance_schema.TypeString, - Computed: true, - Description: `Output only. Hostname or IP address of the exposed Apigee endpoint used by clients to connect to the service.`, - }, - "port": { - Type: resource_apigee_instance_schema.TypeString, - Computed: true, - Description: `Output only. Port number of the exposed Apigee endpoint.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeInstanceCreate(d *resource_apigee_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandApigeeInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_apigee_instance_reflect.ValueOf(nameProp)) && (ok || !resource_apigee_instance_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - locationProp, err := expandApigeeInstanceLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(resource_apigee_instance_reflect.ValueOf(locationProp)) && (ok || !resource_apigee_instance_reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - peeringCidrRangeProp, err := expandApigeeInstancePeeringCidrRange(d.Get("peering_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering_cidr_range"); !isEmptyValue(resource_apigee_instance_reflect.ValueOf(peeringCidrRangeProp)) && (ok || !resource_apigee_instance_reflect.DeepEqual(v, peeringCidrRangeProp)) { - obj["peeringCidrRange"] = peeringCidrRangeProp - } - descriptionProp, err := expandApigeeInstanceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_apigee_instance_reflect.ValueOf(descriptionProp)) && (ok || !resource_apigee_instance_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandApigeeInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_apigee_instance_reflect.ValueOf(displayNameProp)) && (ok || !resource_apigee_instance_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - diskEncryptionKeyNameProp, err := expandApigeeInstanceDiskEncryptionKeyName(d.Get("disk_encryption_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption_key_name"); !isEmptyValue(resource_apigee_instance_reflect.ValueOf(diskEncryptionKeyNameProp)) && (ok || !resource_apigee_instance_reflect.DeepEqual(v, diskEncryptionKeyNameProp)) { - obj["diskEncryptionKeyName"] = diskEncryptionKeyNameProp - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances") - if err != nil { - return err - } - - resource_apigee_instance_log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_instance_schema.TimeoutCreate)) - if err != nil { - return resource_apigee_instance_fmt.Errorf("Error creating Instance: %s", err) - } - - id, err := replaceVars(d, config, "{{org_id}}/instances/{{name}}") - if err != nil { - return resource_apigee_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = apigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Instance", userAgent, - d.Timeout(resource_apigee_instance_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_apigee_instance_fmt.Errorf("Error waiting to create Instance: %s", err) - } - - if err := d.Set("name", flattenApigeeInstanceName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{org_id}}/instances/{{name}}") - if err != nil { - return resource_apigee_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_apigee_instance_log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceApigeeInstanceRead(d, meta) -} - -func resourceApigeeInstanceRead(d *resource_apigee_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_apigee_instance_fmt.Sprintf("ApigeeInstance %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeInstanceName(res["name"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("location", flattenApigeeInstanceLocation(res["location"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("peering_cidr_range", flattenApigeeInstancePeeringCidrRange(res["peeringCidrRange"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("description", flattenApigeeInstanceDescription(res["description"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("display_name", flattenApigeeInstanceDisplayName(res["displayName"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("disk_encryption_key_name", flattenApigeeInstanceDiskEncryptionKeyName(res["diskEncryptionKeyName"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("host", flattenApigeeInstanceHost(res["host"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("port", flattenApigeeInstancePort(res["port"], d, config)); err != nil { - return resource_apigee_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceApigeeInstanceDelete(d *resource_apigee_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{org_id}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_apigee_instance_log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_instance_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = apigeeOperationWaitTime( - config, res, "Deleting Instance", userAgent, - d.Timeout(resource_apigee_instance_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_apigee_instance_log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeInstanceImport(d *resource_apigee_instance_schema.ResourceData, meta interface{}) ([]*resource_apigee_instance_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := resource_apigee_instance_strings.Split(d.Get("name").(string), "/") - if len(nameParts) == 4 { - - orgId := resource_apigee_instance_fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, resource_apigee_instance_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[3]); err != nil { - return nil, resource_apigee_instance_fmt.Errorf("Error setting name: %s", err) - } - } else if len(nameParts) == 3 { - - orgId := resource_apigee_instance_fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, resource_apigee_instance_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("name", nameParts[2]); err != nil { - return nil, resource_apigee_instance_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_apigee_instance_fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "organizations/{{org_name}}/instances/{{name}}", - "organizations/{{org_name}}/{{name}}") - } - - id, err := replaceVars(d, config, "{{org_id}}/instances/{{name}}") - if err != nil { - return nil, resource_apigee_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_apigee_instance_schema.ResourceData{d}, nil -} - -func flattenApigeeInstanceName(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceLocation(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstancePeeringCidrRange(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceDescription(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceDisplayName(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceDiskEncryptionKeyName(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceHost(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstancePort(v interface{}, d *resource_apigee_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstancePeeringCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeInstanceDiskEncryptionKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceApigeeInstanceAttachment() *resource_apigee_instance_attachment_schema.Resource { - return &resource_apigee_instance_attachment_schema.Resource{ - Create: resourceApigeeInstanceAttachmentCreate, - Read: resourceApigeeInstanceAttachmentRead, - Delete: resourceApigeeInstanceAttachmentDelete, - - Importer: &resource_apigee_instance_attachment_schema.ResourceImporter{ - State: resourceApigeeInstanceAttachmentImport, - }, - - Timeouts: &resource_apigee_instance_attachment_schema.ResourceTimeout{ - Create: resource_apigee_instance_attachment_schema.DefaultTimeout(30 * resource_apigee_instance_attachment_time.Minute), - Delete: resource_apigee_instance_attachment_schema.DefaultTimeout(30 * resource_apigee_instance_attachment_time.Minute), - }, - - Schema: map[string]*resource_apigee_instance_attachment_schema.Schema{ - "environment": { - Type: resource_apigee_instance_attachment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource ID of the environment.`, - }, - "instance_id": { - Type: resource_apigee_instance_attachment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The Apigee instance associated with the Apigee environment, -in the format 'organisations/{{org_name}}/instances/{{instance_name}}'.`, - }, - "name": { - Type: resource_apigee_instance_attachment_schema.TypeString, - Computed: true, - Description: `The name of the newly created attachment (output parameter).`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeInstanceAttachmentCreate(d *resource_apigee_instance_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - environmentProp, err := expandApigeeInstanceAttachmentEnvironment(d.Get("environment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("environment"); !isEmptyValue(resource_apigee_instance_attachment_reflect.ValueOf(environmentProp)) && (ok || !resource_apigee_instance_attachment_reflect.DeepEqual(v, environmentProp)) { - obj["environment"] = environmentProp - } - - lockName, err := replaceVars(d, config, "{{instance_id}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments") - if err != nil { - return err - } - - resource_apigee_instance_attachment_log.Printf("[DEBUG] Creating new InstanceAttachment: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_instance_attachment_schema.TimeoutCreate)) - if err != nil { - return resource_apigee_instance_attachment_fmt.Errorf("Error creating InstanceAttachment: %s", err) - } - - id, err := replaceVars(d, config, "{{instance_id}}/attachments/{{name}}") - if err != nil { - return resource_apigee_instance_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = apigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating InstanceAttachment", userAgent, - d.Timeout(resource_apigee_instance_attachment_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_apigee_instance_attachment_fmt.Errorf("Error waiting to create InstanceAttachment: %s", err) - } - - if err := d.Set("name", flattenApigeeInstanceAttachmentName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{instance_id}}/attachments/{{name}}") - if err != nil { - return resource_apigee_instance_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_apigee_instance_attachment_log.Printf("[DEBUG] Finished creating InstanceAttachment %q: %#v", d.Id(), res) - - return resourceApigeeInstanceAttachmentRead(d, meta) -} - -func resourceApigeeInstanceAttachmentRead(d *resource_apigee_instance_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_apigee_instance_attachment_fmt.Sprintf("ApigeeInstanceAttachment %q", d.Id())) - } - - if err := d.Set("environment", flattenApigeeInstanceAttachmentEnvironment(res["environment"], d, config)); err != nil { - return resource_apigee_instance_attachment_fmt.Errorf("Error reading InstanceAttachment: %s", err) - } - if err := d.Set("name", flattenApigeeInstanceAttachmentName(res["name"], d, config)); err != nil { - return resource_apigee_instance_attachment_fmt.Errorf("Error reading InstanceAttachment: %s", err) - } - - return nil -} - -func resourceApigeeInstanceAttachmentDelete(d *resource_apigee_instance_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "{{instance_id}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}{{instance_id}}/attachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_apigee_instance_attachment_log.Printf("[DEBUG] Deleting InstanceAttachment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_instance_attachment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InstanceAttachment") - } - - err = apigeeOperationWaitTime( - config, res, "Deleting InstanceAttachment", userAgent, - d.Timeout(resource_apigee_instance_attachment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_apigee_instance_attachment_log.Printf("[DEBUG] Finished deleting InstanceAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeInstanceAttachmentImport(d *resource_apigee_instance_attachment_schema.ResourceData, meta interface{}) ([]*resource_apigee_instance_attachment_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/attachments/(?P.+)", - "(?P.+)/(?P.+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{instance_id}}/attachments/{{name}}") - if err != nil { - return nil, resource_apigee_instance_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_apigee_instance_attachment_schema.ResourceData{d}, nil -} - -func flattenApigeeInstanceAttachmentEnvironment(v interface{}, d *resource_apigee_instance_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeInstanceAttachmentName(v interface{}, d *resource_apigee_instance_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeInstanceAttachmentEnvironment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceApigeeOrganization() *resource_apigee_organization_schema.Resource { - return &resource_apigee_organization_schema.Resource{ - Create: resourceApigeeOrganizationCreate, - Read: resourceApigeeOrganizationRead, - Update: resourceApigeeOrganizationUpdate, - Delete: resourceApigeeOrganizationDelete, - - Importer: &resource_apigee_organization_schema.ResourceImporter{ - State: resourceApigeeOrganizationImport, - }, - - Timeouts: &resource_apigee_organization_schema.ResourceTimeout{ - Create: resource_apigee_organization_schema.DefaultTimeout(10 * resource_apigee_organization_time.Minute), - Update: resource_apigee_organization_schema.DefaultTimeout(4 * resource_apigee_organization_time.Minute), - Delete: resource_apigee_organization_schema.DefaultTimeout(10 * resource_apigee_organization_time.Minute), - }, - - Schema: map[string]*resource_apigee_organization_schema.Schema{ - "project_id": { - Type: resource_apigee_organization_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project ID associated with the Apigee organization.`, - }, - "analytics_region": { - Type: resource_apigee_organization_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Primary GCP region for analytics data storage. For valid values, see [Create an Apigee organization](https://cloud.google.com/apigee/docs/api-platform/get-started/create-org).`, - }, - "authorized_network": { - Type: resource_apigee_organization_schema.TypeString, - Optional: true, - Description: `Compute Engine network used for Service Networking to be peered with Apigee runtime instances. -See [Getting started with the Service Networking API](https://cloud.google.com/service-infrastructure/docs/service-networking/getting-started). -Valid only when 'RuntimeType' is set to CLOUD. The value can be updated only when there are no runtime instances. For example: "default".`, - }, - "description": { - Type: resource_apigee_organization_schema.TypeString, - Optional: true, - Description: `Description of the Apigee organization.`, - }, - "display_name": { - Type: resource_apigee_organization_schema.TypeString, - Optional: true, - Description: `The display name of the Apigee organization.`, - }, - "runtime_database_encryption_key_name": { - Type: resource_apigee_organization_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Cloud KMS key name used for encrypting the data that is stored and replicated across runtime instances. -Update is not allowed after the organization is created. -If not specified, a Google-Managed encryption key will be used. -Valid only when 'RuntimeType' is CLOUD. For example: 'projects/foo/locations/us/keyRings/bar/cryptoKeys/baz'.`, - }, - "runtime_type": { - Type: resource_apigee_organization_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_apigee_organization_validation.StringInSlice([]string{"CLOUD", "HYBRID", ""}, false), - Description: `Runtime type of the Apigee organization based on the Apigee subscription purchased. Default value: "CLOUD" Possible values: ["CLOUD", "HYBRID"]`, - Default: "CLOUD", - }, - "ca_certificate": { - Type: resource_apigee_organization_schema.TypeString, - Computed: true, - Description: `Output only. Base64-encoded public certificate for the root CA of the Apigee organization. -Valid only when 'RuntimeType' is CLOUD. A base64-encoded string.`, - }, - "name": { - Type: resource_apigee_organization_schema.TypeString, - Computed: true, - Description: `Output only. Name of the Apigee organization.`, - }, - "subscription_type": { - Type: resource_apigee_organization_schema.TypeString, - Computed: true, - Description: `Output only. Subscription type of the Apigee organization. -Valid values include trial (free, limited, and for evaluation purposes only) or paid (full subscription has been purchased).`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceApigeeOrganizationCreate(d *resource_apigee_organization_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandApigeeOrganizationDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(displayNameProp)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandApigeeOrganizationDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(descriptionProp)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - analyticsRegionProp, err := expandApigeeOrganizationAnalyticsRegion(d.Get("analytics_region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("analytics_region"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(analyticsRegionProp)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, analyticsRegionProp)) { - obj["analyticsRegion"] = analyticsRegionProp - } - authorizedNetworkProp, err := expandApigeeOrganizationAuthorizedNetwork(d.Get("authorized_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(authorizedNetworkProp)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, authorizedNetworkProp)) { - obj["authorizedNetwork"] = authorizedNetworkProp - } - runtimeTypeProp, err := expandApigeeOrganizationRuntimeType(d.Get("runtime_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_type"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(runtimeTypeProp)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, runtimeTypeProp)) { - obj["runtimeType"] = runtimeTypeProp - } - runtimeDatabaseEncryptionKeyNameProp, err := expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(d.Get("runtime_database_encryption_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_database_encryption_key_name"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(runtimeDatabaseEncryptionKeyNameProp)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, runtimeDatabaseEncryptionKeyNameProp)) { - obj["runtimeDatabaseEncryptionKeyName"] = runtimeDatabaseEncryptionKeyNameProp - } - - obj, err = resourceApigeeOrganizationEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations?parent=projects/{{project_id}}") - if err != nil { - return err - } - - resource_apigee_organization_log.Printf("[DEBUG] Creating new Organization: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_organization_schema.TimeoutCreate)) - if err != nil { - return resource_apigee_organization_fmt.Errorf("Error creating Organization: %s", err) - } - - id, err := replaceVars(d, config, "organizations/{{name}}") - if err != nil { - return resource_apigee_organization_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = apigeeOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Organization", userAgent, - d.Timeout(resource_apigee_organization_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_apigee_organization_fmt.Errorf("Error waiting to create Organization: %s", err) - } - - if err := d.Set("name", flattenApigeeOrganizationName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "organizations/{{name}}") - if err != nil { - return resource_apigee_organization_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_apigee_organization_log.Printf("[DEBUG] Finished creating Organization %q: %#v", d.Id(), res) - - return resourceApigeeOrganizationRead(d, meta) -} - -func resourceApigeeOrganizationRead(d *resource_apigee_organization_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_apigee_organization_fmt.Sprintf("ApigeeOrganization %q", d.Id())) - } - - if err := d.Set("name", flattenApigeeOrganizationName(res["name"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("display_name", flattenApigeeOrganizationDisplayName(res["displayName"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("description", flattenApigeeOrganizationDescription(res["description"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("analytics_region", flattenApigeeOrganizationAnalyticsRegion(res["analyticsRegion"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("authorized_network", flattenApigeeOrganizationAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("runtime_type", flattenApigeeOrganizationRuntimeType(res["runtimeType"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("subscription_type", flattenApigeeOrganizationSubscriptionType(res["subscriptionType"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("ca_certificate", flattenApigeeOrganizationCaCertificate(res["caCertificate"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - if err := d.Set("runtime_database_encryption_key_name", flattenApigeeOrganizationRuntimeDatabaseEncryptionKeyName(res["runtimeDatabaseEncryptionKeyName"], d, config)); err != nil { - return resource_apigee_organization_fmt.Errorf("Error reading Organization: %s", err) - } - - return nil -} - -func resourceApigeeOrganizationUpdate(d *resource_apigee_organization_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandApigeeOrganizationDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(v)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandApigeeOrganizationDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(v)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - analyticsRegionProp, err := expandApigeeOrganizationAnalyticsRegion(d.Get("analytics_region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("analytics_region"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(v)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, analyticsRegionProp)) { - obj["analyticsRegion"] = analyticsRegionProp - } - authorizedNetworkProp, err := expandApigeeOrganizationAuthorizedNetwork(d.Get("authorized_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(v)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, authorizedNetworkProp)) { - obj["authorizedNetwork"] = authorizedNetworkProp - } - runtimeTypeProp, err := expandApigeeOrganizationRuntimeType(d.Get("runtime_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_type"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(v)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, runtimeTypeProp)) { - obj["runtimeType"] = runtimeTypeProp - } - runtimeDatabaseEncryptionKeyNameProp, err := expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(d.Get("runtime_database_encryption_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_database_encryption_key_name"); !isEmptyValue(resource_apigee_organization_reflect.ValueOf(v)) && (ok || !resource_apigee_organization_reflect.DeepEqual(v, runtimeDatabaseEncryptionKeyNameProp)) { - obj["runtimeDatabaseEncryptionKeyName"] = runtimeDatabaseEncryptionKeyNameProp - } - - obj, err = resourceApigeeOrganizationEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}") - if err != nil { - return err - } - - resource_apigee_organization_log.Printf("[DEBUG] Updating Organization %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_organization_schema.TimeoutUpdate)) - - if err != nil { - return resource_apigee_organization_fmt.Errorf("Error updating Organization %q: %s", d.Id(), err) - } else { - resource_apigee_organization_log.Printf("[DEBUG] Finished updating Organization %q: %#v", d.Id(), res) - } - - err = apigeeOperationWaitTime( - config, res, "Updating Organization", userAgent, - d.Timeout(resource_apigee_organization_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceApigeeOrganizationRead(d, meta) -} - -func resourceApigeeOrganizationDelete(d *resource_apigee_organization_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ApigeeBasePath}}organizations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_apigee_organization_log.Printf("[DEBUG] Deleting Organization %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_apigee_organization_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Organization") - } - - err = apigeeOperationWaitTime( - config, res, "Deleting Organization", userAgent, - d.Timeout(resource_apigee_organization_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_apigee_organization_log.Printf("[DEBUG] Finished deleting Organization %q: %#v", d.Id(), res) - return nil -} - -func resourceApigeeOrganizationImport(d *resource_apigee_organization_schema.ResourceData, meta interface{}) ([]*resource_apigee_organization_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - parts := resource_apigee_organization_strings.Split(d.Get("name").(string), "/") - - var projectId string - switch len(parts) { - case 1: - projectId = parts[0] - case 2: - projectId = parts[1] - default: - return nil, resource_apigee_organization_fmt.Errorf( - "Saw %s when the name is expected to have shape %s or %s", - d.Get("name"), - "{{name}}", - "organizations/{{name}}", - ) - } - - if err := d.Set("name", projectId); err != nil { - return nil, resource_apigee_organization_fmt.Errorf("Error setting organization: %s", err) - } - - if err := d.Set("project_id", projectId); err != nil { - return nil, resource_apigee_organization_fmt.Errorf("Error setting organization: %s", err) - } - - id, err := replaceVars(d, config, "organizations/{{name}}") - if err != nil { - return nil, resource_apigee_organization_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_apigee_organization_schema.ResourceData{d}, nil -} - -func flattenApigeeOrganizationName(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationDisplayName(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationDescription(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationAnalyticsRegion(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationAuthorizedNetwork(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationRuntimeType(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationSubscriptionType(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationCaCertificate(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenApigeeOrganizationRuntimeDatabaseEncryptionKeyName(v interface{}, d *resource_apigee_organization_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandApigeeOrganizationDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationAnalyticsRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationRuntimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandApigeeOrganizationRuntimeDatabaseEncryptionKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceApigeeOrganizationEncoder(d *resource_apigee_organization_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - obj["name"] = d.Get("project_id").(string) - return obj, nil -} - -func resourceAppEngineApplication() *resource_app_engine_application_schema.Resource { - return &resource_app_engine_application_schema.Resource{ - Create: resourceAppEngineApplicationCreate, - Read: resourceAppEngineApplicationRead, - Update: resourceAppEngineApplicationUpdate, - Delete: resourceAppEngineApplicationDelete, - - Importer: &resource_app_engine_application_schema.ResourceImporter{ - State: resource_app_engine_application_schema.ImportStatePassthrough, - }, - - Timeouts: &resource_app_engine_application_schema.ResourceTimeout{ - Create: resource_app_engine_application_schema.DefaultTimeout(4 * resource_app_engine_application_time.Minute), - Update: resource_app_engine_application_schema.DefaultTimeout(4 * resource_app_engine_application_time.Minute), - }, - - CustomizeDiff: resource_app_engine_application_customdiff.All( - appEngineApplicationLocationIDCustomizeDiff, - ), - - Schema: map[string]*resource_app_engine_application_schema.Schema{ - "project": { - Type: resource_app_engine_application_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validateProjectID(), - Description: `The project ID to create the application under.`, - }, - "auth_domain": { - Type: resource_app_engine_application_schema.TypeString, - Optional: true, - Computed: true, - Description: `The domain to authenticate users with when using App Engine's User API.`, - }, - "location_id": { - Type: resource_app_engine_application_schema.TypeString, - Required: true, - Description: `The location to serve the app from.`, - }, - "serving_status": { - Type: resource_app_engine_application_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_application_validation.StringInSlice([]string{ - "UNSPECIFIED", - "SERVING", - "USER_DISABLED", - "SYSTEM_DISABLED", - }, false), - Computed: true, - Description: `The serving status of the app.`, - }, - "database_type": { - Type: resource_app_engine_application_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_application_validation.StringInSlice([]string{ - "CLOUD_FIRESTORE", - "CLOUD_DATASTORE_COMPATIBILITY", - - "CLOUD_DATASTORE", - }, false), - Computed: true, - }, - "feature_settings": { - Type: resource_app_engine_application_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `A block of optional settings to configure specific App Engine features:`, - Elem: appEngineApplicationFeatureSettingsResource(), - }, - "name": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - Description: `Unique name of the app.`, - }, - "app_id": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - Description: `Identifier of the app.`, - }, - "url_dispatch_rule": { - Type: resource_app_engine_application_schema.TypeList, - Computed: true, - Description: `A list of dispatch rule blocks. Each block has a domain, path, and service field.`, - Elem: appEngineApplicationURLDispatchRuleResource(), - }, - "code_bucket": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - Description: `The GCS bucket code is being stored in for this app.`, - }, - "default_hostname": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - Description: `The default hostname for this app.`, - }, - "default_bucket": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - Description: `The GCS bucket content is being stored in for this app.`, - }, - "gcr_domain": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - Description: `The GCR domain used for storing managed Docker images for this app.`, - }, - "iap": { - Type: resource_app_engine_application_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Settings for enabling Cloud Identity Aware Proxy`, - Elem: &resource_app_engine_application_schema.Resource{ - Schema: map[string]*resource_app_engine_application_schema.Schema{ - "enabled": { - Type: resource_app_engine_application_schema.TypeBool, - Optional: true, - Default: false, - Description: `Adapted for use with the app`, - }, - "oauth2_client_id": { - Type: resource_app_engine_application_schema.TypeString, - Required: true, - Description: `OAuth2 client ID to use for the authentication flow.`, - }, - "oauth2_client_secret": { - Type: resource_app_engine_application_schema.TypeString, - Required: true, - Sensitive: true, - Description: `OAuth2 client secret to use for the authentication flow. The SHA-256 hash of the value is returned in the oauth2ClientSecretSha256 field.`, - }, - "oauth2_client_secret_sha256": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - Sensitive: true, - Description: `Hex-encoded SHA-256 hash of the client secret.`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func appEngineApplicationURLDispatchRuleResource() *resource_app_engine_application_schema.Resource { - return &resource_app_engine_application_schema.Resource{ - Schema: map[string]*resource_app_engine_application_schema.Schema{ - "domain": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - }, - "path": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - }, - "service": { - Type: resource_app_engine_application_schema.TypeString, - Computed: true, - }, - }, - } -} - -func appEngineApplicationFeatureSettingsResource() *resource_app_engine_application_schema.Resource { - return &resource_app_engine_application_schema.Resource{ - Schema: map[string]*resource_app_engine_application_schema.Schema{ - "split_health_checks": { - Type: resource_app_engine_application_schema.TypeBool, - Required: true, - }, - }, - } -} - -func appEngineApplicationLocationIDCustomizeDiff(_ resource_app_engine_application_context.Context, d *resource_app_engine_application_schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange("location_id") - if old != "" && old != new { - return resource_app_engine_application_fmt.Errorf("Cannot change location_id once the resource is created.") - } - return nil -} - -func resourceAppEngineApplicationCreate(d *resource_app_engine_application_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - app, err := expandAppEngineApplication(d, project) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - resource_app_engine_application_log.Printf("[DEBUG] Creating App Engine App") - op, err := config.NewAppEngineClient(userAgent).Apps.Create(app).Do() - if err != nil { - return resource_app_engine_application_fmt.Errorf("Error creating App Engine application: %s", err.Error()) - } - - d.SetId(project) - - waitErr := appEngineOperationWaitTime(config, op, project, "App Engine app to create", userAgent, d.Timeout(resource_app_engine_application_schema.TimeoutCreate)) - if waitErr != nil { - d.SetId("") - return waitErr - } - resource_app_engine_application_log.Printf("[DEBUG] Created App Engine App") - - return resourceAppEngineApplicationRead(d, meta) -} - -func resourceAppEngineApplicationRead(d *resource_app_engine_application_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - pid := d.Id() - - app, err := config.NewAppEngineClient(userAgent).Apps.Get(pid).Do() - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_application_fmt.Sprintf("App Engine Application %q", pid)) - } - if err := d.Set("auth_domain", app.AuthDomain); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting auth_domain: %s", err) - } - if err := d.Set("code_bucket", app.CodeBucket); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting code_bucket: %s", err) - } - if err := d.Set("default_bucket", app.DefaultBucket); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting default_bucket: %s", err) - } - if err := d.Set("default_hostname", app.DefaultHostname); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting default_hostname: %s", err) - } - if err := d.Set("location_id", app.LocationId); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting location_id: %s", err) - } - if err := d.Set("name", app.Name); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("app_id", app.Id); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting app_id: %s", err) - } - if err := d.Set("serving_status", app.ServingStatus); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting serving_status: %s", err) - } - if err := d.Set("gcr_domain", app.GcrDomain); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting gcr_domain: %s", err) - } - if err := d.Set("database_type", app.DatabaseType); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting database_type: %s", err) - } - if err := d.Set("project", pid); err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting project: %s", err) - } - dispatchRules, err := flattenAppEngineApplicationDispatchRules(app.DispatchRules) - if err != nil { - return err - } - err = d.Set("url_dispatch_rule", dispatchRules) - if err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting dispatch rules in state. This is a bug, please report it at https://github.com/hashicorp/terraform-provider-google/issues. Error is:\n%s", err.Error()) - } - featureSettings, err := flattenAppEngineApplicationFeatureSettings(app.FeatureSettings) - if err != nil { - return err - } - err = d.Set("feature_settings", featureSettings) - if err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting feature settings in state. This is a bug, please report it at https://github.com/hashicorp/terraform-provider-google/issues. Error is:\n%s", err.Error()) - } - iap, err := flattenAppEngineApplicationIap(d, app.Iap) - if err != nil { - return err - } - err = d.Set("iap", iap) - if err != nil { - return resource_app_engine_application_fmt.Errorf("Error setting iap in state. This is a bug, please report it at https://github.com/hashicorp/terraform-provider-google/issues. Error is:\n%s", err.Error()) - } - return nil -} - -func resourceAppEngineApplicationUpdate(d *resource_app_engine_application_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - pid := d.Id() - app, err := expandAppEngineApplication(d, pid) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - resource_app_engine_application_log.Printf("[DEBUG] Updating App Engine App") - op, err := config.NewAppEngineClient(userAgent).Apps.Patch(pid, app).UpdateMask("authDomain,databaseType,servingStatus,featureSettings.splitHealthChecks,iap").Do() - if err != nil { - return resource_app_engine_application_fmt.Errorf("Error updating App Engine application: %s", err.Error()) - } - - waitErr := appEngineOperationWaitTime(config, op, pid, "App Engine app to update", userAgent, d.Timeout(resource_app_engine_application_schema.TimeoutUpdate)) - if waitErr != nil { - return waitErr - } - resource_app_engine_application_log.Printf("[DEBUG] Updated App Engine App") - - return resourceAppEngineApplicationRead(d, meta) -} - -func resourceAppEngineApplicationDelete(d *resource_app_engine_application_schema.ResourceData, meta interface{}) error { - resource_app_engine_application_log.Println("[WARN] App Engine applications cannot be destroyed once created. The project must be deleted to delete the application.") - return nil -} - -func expandAppEngineApplication(d *resource_app_engine_application_schema.ResourceData, project string) (*resource_app_engine_application_appengineappengine.Application, error) { - result := &resource_app_engine_application_appengineappengine.Application{ - AuthDomain: d.Get("auth_domain").(string), - LocationId: d.Get("location_id").(string), - Id: project, - GcrDomain: d.Get("gcr_domain").(string), - DatabaseType: d.Get("database_type").(string), - ServingStatus: d.Get("serving_status").(string), - } - featureSettings, err := expandAppEngineApplicationFeatureSettings(d) - if err != nil { - return nil, err - } - result.FeatureSettings = featureSettings - iap, err := expandAppEngineApplicationIap(d) - if err != nil { - return nil, err - } - result.Iap = iap - return result, nil -} - -func expandAppEngineApplicationFeatureSettings(d *resource_app_engine_application_schema.ResourceData) (*resource_app_engine_application_appengineappengine.FeatureSettings, error) { - blocks := d.Get("feature_settings").([]interface{}) - if len(blocks) < 1 { - return nil, nil - } - return &resource_app_engine_application_appengineappengine.FeatureSettings{ - SplitHealthChecks: d.Get("feature_settings.0.split_health_checks").(bool), - - ForceSendFields: []string{"SplitHealthChecks"}, - }, nil -} - -func expandAppEngineApplicationIap(d *resource_app_engine_application_schema.ResourceData) (*resource_app_engine_application_appengineappengine.IdentityAwareProxy, error) { - blocks := d.Get("iap").([]interface{}) - if len(blocks) < 1 { - return nil, nil - } - return &resource_app_engine_application_appengineappengine.IdentityAwareProxy{ - Enabled: d.Get("iap.0.enabled").(bool), - Oauth2ClientId: d.Get("iap.0.oauth2_client_id").(string), - Oauth2ClientSecret: d.Get("iap.0.oauth2_client_secret").(string), - Oauth2ClientSecretSha256: d.Get("iap.0.oauth2_client_secret_sha256").(string), - }, nil -} - -func flattenAppEngineApplicationFeatureSettings(settings *resource_app_engine_application_appengineappengine.FeatureSettings) ([]map[string]interface{}, error) { - if settings == nil { - return []map[string]interface{}{}, nil - } - result := map[string]interface{}{ - "split_health_checks": settings.SplitHealthChecks, - } - return []map[string]interface{}{result}, nil -} - -func flattenAppEngineApplicationIap(d *resource_app_engine_application_schema.ResourceData, iap *resource_app_engine_application_appengineappengine.IdentityAwareProxy) ([]map[string]interface{}, error) { - if iap == nil { - return []map[string]interface{}{}, nil - } - result := map[string]interface{}{ - "enabled": iap.Enabled, - "oauth2_client_id": iap.Oauth2ClientId, - "oauth2_client_secret": d.Get("iap.0.oauth2_client_secret"), - "oauth2_client_secret_sha256": iap.Oauth2ClientSecretSha256, - } - return []map[string]interface{}{result}, nil -} - -func flattenAppEngineApplicationDispatchRules(rules []*resource_app_engine_application_appengineappengine.UrlDispatchRule) ([]map[string]interface{}, error) { - results := make([]map[string]interface{}, 0, len(rules)) - for _, rule := range rules { - results = append(results, map[string]interface{}{ - "domain": rule.Domain, - "path": rule.Path, - "service": rule.Service, - }) - } - return results, nil -} - -func resourceAppEngineApplicationUrlDispatchRules() *resource_app_engine_application_url_dispatch_rules_schema.Resource { - return &resource_app_engine_application_url_dispatch_rules_schema.Resource{ - Create: resourceAppEngineApplicationUrlDispatchRulesCreate, - Read: resourceAppEngineApplicationUrlDispatchRulesRead, - Update: resourceAppEngineApplicationUrlDispatchRulesUpdate, - Delete: resourceAppEngineApplicationUrlDispatchRulesDelete, - - Importer: &resource_app_engine_application_url_dispatch_rules_schema.ResourceImporter{ - State: resourceAppEngineApplicationUrlDispatchRulesImport, - }, - - Timeouts: &resource_app_engine_application_url_dispatch_rules_schema.ResourceTimeout{ - Create: resource_app_engine_application_url_dispatch_rules_schema.DefaultTimeout(4 * resource_app_engine_application_url_dispatch_rules_time.Minute), - Update: resource_app_engine_application_url_dispatch_rules_schema.DefaultTimeout(4 * resource_app_engine_application_url_dispatch_rules_time.Minute), - Delete: resource_app_engine_application_url_dispatch_rules_schema.DefaultTimeout(4 * resource_app_engine_application_url_dispatch_rules_time.Minute), - }, - - Schema: map[string]*resource_app_engine_application_url_dispatch_rules_schema.Schema{ - "dispatch_rules": { - Type: resource_app_engine_application_url_dispatch_rules_schema.TypeList, - Required: true, - Description: `Rules to match an HTTP request and dispatch that request to a service.`, - Elem: &resource_app_engine_application_url_dispatch_rules_schema.Resource{ - Schema: map[string]*resource_app_engine_application_url_dispatch_rules_schema.Schema{ - "path": { - Type: resource_app_engine_application_url_dispatch_rules_schema.TypeString, - Required: true, - Description: `Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. -The sum of the lengths of the domain and path may not exceed 100 characters.`, - }, - "service": { - Type: resource_app_engine_application_url_dispatch_rules_schema.TypeString, - Required: true, - Description: `Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. -The sum of the lengths of the domain and path may not exceed 100 characters.`, - }, - "domain": { - Type: resource_app_engine_application_url_dispatch_rules_schema.TypeString, - Optional: true, - Description: `Domain name to match against. The wildcard "*" is supported if specified before a period: "*.". -Defaults to matching all domains: "*".`, - Default: "*", - }, - }, - }, - }, - "project": { - Type: resource_app_engine_application_url_dispatch_rules_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineApplicationUrlDispatchRulesCreate(d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - dispatchRulesProp, err := expandAppEngineApplicationUrlDispatchRulesDispatchRules(d.Get("dispatch_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dispatch_rules"); !isEmptyValue(resource_app_engine_application_url_dispatch_rules_reflect.ValueOf(dispatchRulesProp)) && (ok || !resource_app_engine_application_url_dispatch_rules_reflect.DeepEqual(v, dispatchRulesProp)) { - obj["dispatchRules"] = dispatchRulesProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") - if err != nil { - return err - } - - resource_app_engine_application_url_dispatch_rules_log.Printf("[DEBUG] Creating new ApplicationUrlDispatchRules: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_application_url_dispatch_rules_schema.TimeoutCreate), isAppEngineRetryableError) - if err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error creating ApplicationUrlDispatchRules: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = appEngineOperationWaitTime( - config, res, project, "Creating ApplicationUrlDispatchRules", userAgent, - d.Timeout(resource_app_engine_application_url_dispatch_rules_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error waiting to create ApplicationUrlDispatchRules: %s", err) - } - - resource_app_engine_application_url_dispatch_rules_log.Printf("[DEBUG] Finished creating ApplicationUrlDispatchRules %q: %#v", d.Id(), res) - - return resourceAppEngineApplicationUrlDispatchRulesRead(d, meta) -} - -func resourceAppEngineApplicationUrlDispatchRulesRead(d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_application_url_dispatch_rules_fmt.Sprintf("AppEngineApplicationUrlDispatchRules %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error reading ApplicationUrlDispatchRules: %s", err) - } - - if err := d.Set("dispatch_rules", flattenAppEngineApplicationUrlDispatchRulesDispatchRules(res["dispatchRules"], d, config)); err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error reading ApplicationUrlDispatchRules: %s", err) - } - - return nil -} - -func resourceAppEngineApplicationUrlDispatchRulesUpdate(d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - dispatchRulesProp, err := expandAppEngineApplicationUrlDispatchRulesDispatchRules(d.Get("dispatch_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dispatch_rules"); !isEmptyValue(resource_app_engine_application_url_dispatch_rules_reflect.ValueOf(v)) && (ok || !resource_app_engine_application_url_dispatch_rules_reflect.DeepEqual(v, dispatchRulesProp)) { - obj["dispatchRules"] = dispatchRulesProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") - if err != nil { - return err - } - - resource_app_engine_application_url_dispatch_rules_log.Printf("[DEBUG] Updating ApplicationUrlDispatchRules %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_application_url_dispatch_rules_schema.TimeoutUpdate), isAppEngineRetryableError) - - if err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error updating ApplicationUrlDispatchRules %q: %s", d.Id(), err) - } else { - resource_app_engine_application_url_dispatch_rules_log.Printf("[DEBUG] Finished updating ApplicationUrlDispatchRules %q: %#v", d.Id(), res) - } - - err = appEngineOperationWaitTime( - config, res, project, "Updating ApplicationUrlDispatchRules", userAgent, - d.Timeout(resource_app_engine_application_url_dispatch_rules_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineApplicationUrlDispatchRulesRead(d, meta) -} - -func resourceAppEngineApplicationUrlDispatchRulesDelete(d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error fetching project for ApplicationUrlDispatchRules: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}?updateMask=dispatch_rules") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_app_engine_application_url_dispatch_rules_log.Printf("[DEBUG] Deleting ApplicationUrlDispatchRules %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_application_url_dispatch_rules_schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "ApplicationUrlDispatchRules") - } - - err = appEngineOperationWaitTime( - config, res, project, "Deleting ApplicationUrlDispatchRules", userAgent, - d.Timeout(resource_app_engine_application_url_dispatch_rules_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_app_engine_application_url_dispatch_rules_log.Printf("[DEBUG] Finished deleting ApplicationUrlDispatchRules %q: %#v", d.Id(), res) - return nil -} - -func resourceAppEngineApplicationUrlDispatchRulesImport(d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, meta interface{}) ([]*resource_app_engine_application_url_dispatch_rules_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return nil, resource_app_engine_application_url_dispatch_rules_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_app_engine_application_url_dispatch_rules_schema.ResourceData{d}, nil -} - -func flattenAppEngineApplicationUrlDispatchRulesDispatchRules(v interface{}, d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "domain": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(original["domain"], d, config), - "path": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesPath(original["path"], d, config), - "service": flattenAppEngineApplicationUrlDispatchRulesDispatchRulesService(original["service"], d, config), - }) - } - return transformed -} - -func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(v interface{}, d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesPath(v interface{}, d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineApplicationUrlDispatchRulesDispatchRulesService(v interface{}, d *resource_app_engine_application_url_dispatch_rules_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomain, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(original["domain"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_application_url_dispatch_rules_reflect.ValueOf(transformedDomain); val.IsValid() && !isEmptyValue(val) { - transformed["domain"] = transformedDomain - } - - transformedPath, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_application_url_dispatch_rules_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedService, err := expandAppEngineApplicationUrlDispatchRulesDispatchRulesService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_application_url_dispatch_rules_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRulesDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRulesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineApplicationUrlDispatchRulesDispatchRulesService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func sslSettingsDiffSuppress(k, old, new string, d *resource_app_engine_domain_mapping_schema.ResourceData) bool { - - if k == "ssl_settings.#" && - old == "0" && new == "1" && - d.Get("ssl_settings.0.certificate_id") == "" && - d.Get("ssl_settings.0.ssl_management_type") == "MANUAL" { - return true - } - - return false -} - -func resourceAppEngineDomainMapping() *resource_app_engine_domain_mapping_schema.Resource { - return &resource_app_engine_domain_mapping_schema.Resource{ - Create: resourceAppEngineDomainMappingCreate, - Read: resourceAppEngineDomainMappingRead, - Update: resourceAppEngineDomainMappingUpdate, - Delete: resourceAppEngineDomainMappingDelete, - - Importer: &resource_app_engine_domain_mapping_schema.ResourceImporter{ - State: resourceAppEngineDomainMappingImport, - }, - - Timeouts: &resource_app_engine_domain_mapping_schema.ResourceTimeout{ - Create: resource_app_engine_domain_mapping_schema.DefaultTimeout(4 * resource_app_engine_domain_mapping_time.Minute), - Update: resource_app_engine_domain_mapping_schema.DefaultTimeout(4 * resource_app_engine_domain_mapping_time.Minute), - Delete: resource_app_engine_domain_mapping_schema.DefaultTimeout(4 * resource_app_engine_domain_mapping_time.Minute), - }, - - Schema: map[string]*resource_app_engine_domain_mapping_schema.Schema{ - "domain_name": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Relative name of the domain serving the application. Example: example.com.`, - }, - "override_strategy": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_domain_mapping_validation.StringInSlice([]string{"STRICT", "OVERRIDE", ""}, false), - Description: `Whether the domain creation should override any existing mappings for this domain. -By default, overrides are rejected. Default value: "STRICT" Possible values: ["STRICT", "OVERRIDE"]`, - Default: "STRICT", - }, - "ssl_settings": { - Type: resource_app_engine_domain_mapping_schema.TypeList, - Optional: true, - DiffSuppressFunc: sslSettingsDiffSuppress, - Description: `SSL configuration for this domain. If unconfigured, this domain will not serve with SSL.`, - MaxItems: 1, - Elem: &resource_app_engine_domain_mapping_schema.Resource{ - Schema: map[string]*resource_app_engine_domain_mapping_schema.Schema{ - "ssl_management_type": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Required: true, - ValidateFunc: resource_app_engine_domain_mapping_validation.StringInSlice([]string{"AUTOMATIC", "MANUAL"}, false), - Description: `SSL management type for this domain. If 'AUTOMATIC', a managed certificate is automatically provisioned. -If 'MANUAL', 'certificateId' must be manually specified in order to configure SSL for this domain. Possible values: ["AUTOMATIC", "MANUAL"]`, - }, - "certificate_id": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Computed: true, - Optional: true, - Description: `ID of the AuthorizedCertificate resource configuring SSL for the application. Clearing this field will -remove SSL support. -By default, a managed certificate is automatically created for every domain mapping. To omit SSL support -or to configure SSL manually, specify 'SslManagementType.MANUAL' on a 'CREATE' or 'UPDATE' request. You must be -authorized to administer the 'AuthorizedCertificate' resource to manually map it to a DomainMapping resource. -Example: 12345.`, - }, - "pending_managed_certificate_id": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Computed: true, - Description: `ID of the managed 'AuthorizedCertificate' resource currently being provisioned, if applicable. Until the new -managed certificate has been successfully provisioned, the previous SSL state will be preserved. Once the -provisioning process completes, the 'certificateId' field will reflect the new managed certificate and this -field will be left empty. To remove SSL support while there is still a pending managed certificate, clear the -'certificateId' field with an update request.`, - }, - }, - }, - }, - "name": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Computed: true, - Description: `Full path to the DomainMapping resource in the API. Example: apps/myapp/domainMapping/example.com.`, - }, - "resource_records": { - Type: resource_app_engine_domain_mapping_schema.TypeList, - Computed: true, - Description: `The resource records required to configure this domain mapping. These records must be added to the domain's DNS -configuration in order to serve the application via this domain mapping.`, - Elem: &resource_app_engine_domain_mapping_schema.Resource{ - Schema: map[string]*resource_app_engine_domain_mapping_schema.Schema{ - "name": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Optional: true, - Description: `Relative name of the object affected by this record. Only applicable for CNAME records. Example: 'www'.`, - }, - "rrdata": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Optional: true, - Description: `Data for this record. Values vary by record type, as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1).`, - }, - "type": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_domain_mapping_validation.StringInSlice([]string{"A", "AAAA", "CNAME", ""}, false), - Description: `Resource record type. Example: 'AAAA'. Possible values: ["A", "AAAA", "CNAME"]`, - }, - }, - }, - }, - "project": { - Type: resource_app_engine_domain_mapping_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineDomainMappingCreate(d *resource_app_engine_domain_mapping_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - sslSettingsProp, err := expandAppEngineDomainMappingSslSettings(d.Get("ssl_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_settings"); !isEmptyValue(resource_app_engine_domain_mapping_reflect.ValueOf(sslSettingsProp)) && (ok || !resource_app_engine_domain_mapping_reflect.DeepEqual(v, sslSettingsProp)) { - obj["sslSettings"] = sslSettingsProp - } - idProp, err := expandAppEngineDomainMappingDomainName(d.Get("domain_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("domain_name"); !isEmptyValue(resource_app_engine_domain_mapping_reflect.ValueOf(idProp)) && (ok || !resource_app_engine_domain_mapping_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings") - if err != nil { - return err - } - - resource_app_engine_domain_mapping_log.Printf("[DEBUG] Creating new DomainMapping: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_domain_mapping_schema.TimeoutCreate)) - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error creating DomainMapping: %s", err) - } - - id, err := replaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = appEngineOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating DomainMapping", userAgent, - d.Timeout(resource_app_engine_domain_mapping_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_app_engine_domain_mapping_fmt.Errorf("Error waiting to create DomainMapping: %s", err) - } - - if err := d.Set("name", flattenAppEngineDomainMappingName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_app_engine_domain_mapping_log.Printf("[DEBUG] Finished creating DomainMapping %q: %#v", d.Id(), res) - - return resourceAppEngineDomainMappingRead(d, meta) -} - -func resourceAppEngineDomainMappingRead(d *resource_app_engine_domain_mapping_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_domain_mapping_fmt.Sprintf("AppEngineDomainMapping %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - - if err := d.Set("name", flattenAppEngineDomainMappingName(res["name"], d, config)); err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("ssl_settings", flattenAppEngineDomainMappingSslSettings(res["sslSettings"], d, config)); err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("resource_records", flattenAppEngineDomainMappingResourceRecords(res["resourceRecords"], d, config)); err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("domain_name", flattenAppEngineDomainMappingDomainName(res["id"], d, config)); err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - - return nil -} - -func resourceAppEngineDomainMappingUpdate(d *resource_app_engine_domain_mapping_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - sslSettingsProp, err := expandAppEngineDomainMappingSslSettings(d.Get("ssl_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_settings"); !isEmptyValue(resource_app_engine_domain_mapping_reflect.ValueOf(v)) && (ok || !resource_app_engine_domain_mapping_reflect.DeepEqual(v, sslSettingsProp)) { - obj["sslSettings"] = sslSettingsProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return err - } - - resource_app_engine_domain_mapping_log.Printf("[DEBUG] Updating DomainMapping %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("ssl_settings") { - updateMask = append(updateMask, "ssl_settings.certificate_id", - "ssl_settings.ssl_management_type") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_app_engine_domain_mapping_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_domain_mapping_schema.TimeoutUpdate)) - - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error updating DomainMapping %q: %s", d.Id(), err) - } else { - resource_app_engine_domain_mapping_log.Printf("[DEBUG] Finished updating DomainMapping %q: %#v", d.Id(), res) - } - - err = appEngineOperationWaitTime( - config, res, project, "Updating DomainMapping", userAgent, - d.Timeout(resource_app_engine_domain_mapping_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineDomainMappingRead(d, meta) -} - -func resourceAppEngineDomainMappingDelete(d *resource_app_engine_domain_mapping_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_app_engine_domain_mapping_log.Printf("[DEBUG] Deleting DomainMapping %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_domain_mapping_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DomainMapping") - } - - err = appEngineOperationWaitTime( - config, res, project, "Deleting DomainMapping", userAgent, - d.Timeout(resource_app_engine_domain_mapping_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_app_engine_domain_mapping_log.Printf("[DEBUG] Finished deleting DomainMapping %q: %#v", d.Id(), res) - return nil -} - -func resourceAppEngineDomainMappingImport(d *resource_app_engine_domain_mapping_schema.ResourceData, meta interface{}) ([]*resource_app_engine_domain_mapping_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/domainMappings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "apps/{{project}}/domainMappings/{{domain_name}}") - if err != nil { - return nil, resource_app_engine_domain_mapping_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_app_engine_domain_mapping_schema.ResourceData{d}, nil -} - -func flattenAppEngineDomainMappingName(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingSslSettings(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["certificate_id"] = - flattenAppEngineDomainMappingSslSettingsCertificateId(original["certificateId"], d, config) - transformed["ssl_management_type"] = - flattenAppEngineDomainMappingSslSettingsSslManagementType(original["sslManagementType"], d, config) - transformed["pending_managed_certificate_id"] = - flattenAppEngineDomainMappingSslSettingsPendingManagedCertificateId(original["pendingManagedCertificateId"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineDomainMappingSslSettingsCertificateId(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingSslSettingsSslManagementType(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingSslSettingsPendingManagedCertificateId(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingResourceRecords(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenAppEngineDomainMappingResourceRecordsName(original["name"], d, config), - "rrdata": flattenAppEngineDomainMappingResourceRecordsRrdata(original["rrdata"], d, config), - "type": flattenAppEngineDomainMappingResourceRecordsType(original["type"], d, config), - }) - } - return transformed -} - -func flattenAppEngineDomainMappingResourceRecordsName(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingResourceRecordsRrdata(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingResourceRecordsType(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineDomainMappingDomainName(v interface{}, d *resource_app_engine_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineDomainMappingSslSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCertificateId, err := expandAppEngineDomainMappingSslSettingsCertificateId(original["certificate_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_domain_mapping_reflect.ValueOf(transformedCertificateId); val.IsValid() && !isEmptyValue(val) { - transformed["certificateId"] = transformedCertificateId - } - - transformedSslManagementType, err := expandAppEngineDomainMappingSslSettingsSslManagementType(original["ssl_management_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_domain_mapping_reflect.ValueOf(transformedSslManagementType); val.IsValid() && !isEmptyValue(val) { - transformed["sslManagementType"] = transformedSslManagementType - } - - transformedPendingManagedCertificateId, err := expandAppEngineDomainMappingSslSettingsPendingManagedCertificateId(original["pending_managed_certificate_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_domain_mapping_reflect.ValueOf(transformedPendingManagedCertificateId); val.IsValid() && !isEmptyValue(val) { - transformed["pendingManagedCertificateId"] = transformedPendingManagedCertificateId - } - - return transformed, nil -} - -func expandAppEngineDomainMappingSslSettingsCertificateId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineDomainMappingSslSettingsSslManagementType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineDomainMappingSslSettingsPendingManagedCertificateId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineDomainMappingDomainName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAppEngineFirewallRule() *resource_app_engine_firewall_rule_schema.Resource { - return &resource_app_engine_firewall_rule_schema.Resource{ - Create: resourceAppEngineFirewallRuleCreate, - Read: resourceAppEngineFirewallRuleRead, - Update: resourceAppEngineFirewallRuleUpdate, - Delete: resourceAppEngineFirewallRuleDelete, - - Importer: &resource_app_engine_firewall_rule_schema.ResourceImporter{ - State: resourceAppEngineFirewallRuleImport, - }, - - Timeouts: &resource_app_engine_firewall_rule_schema.ResourceTimeout{ - Create: resource_app_engine_firewall_rule_schema.DefaultTimeout(4 * resource_app_engine_firewall_rule_time.Minute), - Update: resource_app_engine_firewall_rule_schema.DefaultTimeout(4 * resource_app_engine_firewall_rule_time.Minute), - Delete: resource_app_engine_firewall_rule_schema.DefaultTimeout(4 * resource_app_engine_firewall_rule_time.Minute), - }, - - Schema: map[string]*resource_app_engine_firewall_rule_schema.Schema{ - "action": { - Type: resource_app_engine_firewall_rule_schema.TypeString, - Required: true, - ValidateFunc: resource_app_engine_firewall_rule_validation.StringInSlice([]string{"UNSPECIFIED_ACTION", "ALLOW", "DENY"}, false), - Description: `The action to take if this rule matches. Possible values: ["UNSPECIFIED_ACTION", "ALLOW", "DENY"]`, - }, - "source_range": { - Type: resource_app_engine_firewall_rule_schema.TypeString, - Required: true, - Description: `IP address or range, defined using CIDR notation, of requests that this rule applies to.`, - }, - "description": { - Type: resource_app_engine_firewall_rule_schema.TypeString, - Optional: true, - Description: `An optional string description of this rule.`, - }, - "priority": { - Type: resource_app_engine_firewall_rule_schema.TypeInt, - Optional: true, - Description: `A positive integer that defines the order of rule evaluation. -Rules with the lowest priority are evaluated first. - -A default rule at priority Int32.MaxValue matches all IPv4 and -IPv6 traffic when no previous rule matches. Only the action of -this rule can be modified by the user.`, - }, - "project": { - Type: resource_app_engine_firewall_rule_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineFirewallRuleCreate(d *resource_app_engine_firewall_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandAppEngineFirewallRuleDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(descriptionProp)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceRangeProp, err := expandAppEngineFirewallRuleSourceRange(d.Get("source_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_range"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(sourceRangeProp)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, sourceRangeProp)) { - obj["sourceRange"] = sourceRangeProp - } - actionProp, err := expandAppEngineFirewallRuleAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(actionProp)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - priorityProp, err := expandAppEngineFirewallRulePriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(priorityProp)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules") - if err != nil { - return err - } - - resource_app_engine_firewall_rule_log.Printf("[DEBUG] Creating new FirewallRule: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_firewall_rule_schema.TimeoutCreate)) - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error creating FirewallRule: %s", err) - } - - id, err := replaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceAppEngineFirewallRulePollRead(d, meta), PollCheckForExistence, "Creating FirewallRule", d.Timeout(resource_app_engine_firewall_rule_schema.TimeoutCreate), 1) - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error waiting to create FirewallRule: %s", err) - } - - resource_app_engine_firewall_rule_log.Printf("[DEBUG] Finished creating FirewallRule %q: %#v", d.Id(), res) - - return resourceAppEngineFirewallRuleRead(d, meta) -} - -func resourceAppEngineFirewallRulePollRead(d *resource_app_engine_firewall_rule_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_app_engine_firewall_rule_fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceAppEngineFirewallRuleRead(d *resource_app_engine_firewall_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_firewall_rule_fmt.Sprintf("AppEngineFirewallRule %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error reading FirewallRule: %s", err) - } - - if err := d.Set("description", flattenAppEngineFirewallRuleDescription(res["description"], d, config)); err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error reading FirewallRule: %s", err) - } - if err := d.Set("source_range", flattenAppEngineFirewallRuleSourceRange(res["sourceRange"], d, config)); err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error reading FirewallRule: %s", err) - } - if err := d.Set("action", flattenAppEngineFirewallRuleAction(res["action"], d, config)); err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error reading FirewallRule: %s", err) - } - if err := d.Set("priority", flattenAppEngineFirewallRulePriority(res["priority"], d, config)); err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error reading FirewallRule: %s", err) - } - - return nil -} - -func resourceAppEngineFirewallRuleUpdate(d *resource_app_engine_firewall_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandAppEngineFirewallRuleDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(v)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceRangeProp, err := expandAppEngineFirewallRuleSourceRange(d.Get("source_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_range"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(v)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, sourceRangeProp)) { - obj["sourceRange"] = sourceRangeProp - } - actionProp, err := expandAppEngineFirewallRuleAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(v)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - priorityProp, err := expandAppEngineFirewallRulePriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_app_engine_firewall_rule_reflect.ValueOf(v)) && (ok || !resource_app_engine_firewall_rule_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return err - } - - resource_app_engine_firewall_rule_log.Printf("[DEBUG] Updating FirewallRule %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("source_range") { - updateMask = append(updateMask, "sourceRange") - } - - if d.HasChange("action") { - updateMask = append(updateMask, "action") - } - - if d.HasChange("priority") { - updateMask = append(updateMask, "priority") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_app_engine_firewall_rule_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_firewall_rule_schema.TimeoutUpdate)) - - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error updating FirewallRule %q: %s", d.Id(), err) - } else { - resource_app_engine_firewall_rule_log.Printf("[DEBUG] Finished updating FirewallRule %q: %#v", d.Id(), res) - } - - return resourceAppEngineFirewallRuleRead(d, meta) -} - -func resourceAppEngineFirewallRuleDelete(d *resource_app_engine_firewall_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_firewall_rule_fmt.Errorf("Error fetching project for FirewallRule: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_app_engine_firewall_rule_log.Printf("[DEBUG] Deleting FirewallRule %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_firewall_rule_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "FirewallRule") - } - - resource_app_engine_firewall_rule_log.Printf("[DEBUG] Finished deleting FirewallRule %q: %#v", d.Id(), res) - return nil -} - -func resourceAppEngineFirewallRuleImport(d *resource_app_engine_firewall_rule_schema.ResourceData, meta interface{}) ([]*resource_app_engine_firewall_rule_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/firewall/ingressRules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "apps/{{project}}/firewall/ingressRules/{{priority}}") - if err != nil { - return nil, resource_app_engine_firewall_rule_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_app_engine_firewall_rule_schema.ResourceData{d}, nil -} - -func flattenAppEngineFirewallRuleDescription(v interface{}, d *resource_app_engine_firewall_rule_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFirewallRuleSourceRange(v interface{}, d *resource_app_engine_firewall_rule_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFirewallRuleAction(v interface{}, d *resource_app_engine_firewall_rule_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFirewallRulePriority(v interface{}, d *resource_app_engine_firewall_rule_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_firewall_rule_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandAppEngineFirewallRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFirewallRuleSourceRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFirewallRuleAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFirewallRulePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAppEngineFlexibleAppVersion() *resource_app_engine_flexible_app_version_schema.Resource { - return &resource_app_engine_flexible_app_version_schema.Resource{ - Create: resourceAppEngineFlexibleAppVersionCreate, - Read: resourceAppEngineFlexibleAppVersionRead, - Update: resourceAppEngineFlexibleAppVersionUpdate, - Delete: resourceAppEngineFlexibleAppVersionDelete, - - Importer: &resource_app_engine_flexible_app_version_schema.ResourceImporter{ - State: resourceAppEngineFlexibleAppVersionImport, - }, - - Timeouts: &resource_app_engine_flexible_app_version_schema.ResourceTimeout{ - Create: resource_app_engine_flexible_app_version_schema.DefaultTimeout(10 * resource_app_engine_flexible_app_version_time.Minute), - Update: resource_app_engine_flexible_app_version_schema.DefaultTimeout(10 * resource_app_engine_flexible_app_version_time.Minute), - Delete: resource_app_engine_flexible_app_version_schema.DefaultTimeout(10 * resource_app_engine_flexible_app_version_time.Minute), - }, - - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "liveness_check": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Required: true, - Description: `Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "path": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `The request path.`, - }, - "check_interval": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Interval between health checks.`, - Default: "30s", - }, - "failure_threshold": { - Type: resource_app_engine_flexible_app_version_schema.TypeFloat, - Optional: true, - Description: `Number of consecutive failed checks required before considering the VM unhealthy. Default: 4.`, - Default: 4.0, - }, - "host": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Host header to send when performing a HTTP Readiness check. Example: "myapp.appspot.com"`, - }, - "initial_delay": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `The initial delay before starting to execute the checks. Default: "300s"`, - Default: "300s", - }, - "success_threshold": { - Type: resource_app_engine_flexible_app_version_schema.TypeFloat, - Optional: true, - Description: `Number of consecutive successful checks required before considering the VM healthy. Default: 2.`, - Default: 2.0, - }, - "timeout": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Time before the check is considered failed. Default: "4s"`, - Default: "4s", - }, - }, - }, - }, - "readiness_check": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Required: true, - Description: `Configures readiness health checking for instances. Unhealthy instances are not put into the backend traffic rotation.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "path": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `The request path.`, - }, - "app_start_timeout": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `A maximum time limit on application initialization, measured from moment the application successfully -replies to a healthcheck until it is ready to serve traffic. Default: "300s"`, - Default: "300s", - }, - "check_interval": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Interval between health checks. Default: "5s".`, - Default: "5s", - }, - "failure_threshold": { - Type: resource_app_engine_flexible_app_version_schema.TypeFloat, - Optional: true, - Description: `Number of consecutive failed checks required before removing traffic. Default: 2.`, - Default: 2.0, - }, - "host": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Host header to send when performing a HTTP Readiness check. Example: "myapp.appspot.com"`, - }, - "success_threshold": { - Type: resource_app_engine_flexible_app_version_schema.TypeFloat, - Optional: true, - Description: `Number of consecutive successful checks required before receiving traffic. Default: 2.`, - Default: 2.0, - }, - "timeout": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Time before the check is considered failed. Default: "4s"`, - Default: "4s", - }, - }, - }, - }, - "runtime": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Desired runtime. Example python27.`, - }, - "service": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `AppEngine service resource. Can contain numbers, letters, and hyphens.`, - }, - "api_config": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Serving configuration for Google Cloud Endpoints.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "script": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Path to the script from the application root directory.`, - }, - "auth_fail_action": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}, false), - Description: `Action to take when users access resources that require authentication. Default value: "AUTH_FAIL_ACTION_REDIRECT" Possible values: ["AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED"]`, - Default: "AUTH_FAIL_ACTION_REDIRECT", - }, - "login": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}, false), - Description: `Level of login required to access this resource. Default value: "LOGIN_OPTIONAL" Possible values: ["LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED"]`, - Default: "LOGIN_OPTIONAL", - }, - "security_level": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}, false), - Description: `Security (HTTPS) enforcement for this URL. Possible values: ["SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS"]`, - }, - "url": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `URL to serve the endpoint at.`, - }, - }, - }, - }, - "automatic_scaling": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Automatic scaling is based on request rate, response latencies, and other application metrics.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "cpu_utilization": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Required: true, - Description: `Target scaling by CPU usage.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "target_utilization": { - Type: resource_app_engine_flexible_app_version_schema.TypeFloat, - Required: true, - Description: `Target CPU utilization ratio to maintain when scaling. Must be between 0 and 1.`, - }, - "aggregation_window_length": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Period of time over which CPU utilization is calculated.`, - }, - }, - }, - }, - "cool_down_period": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `The time period that the Autoscaler should wait before it starts collecting information from a new instance. -This prevents the autoscaler from collecting information when the instance is initializing, -during which the collected usage would not be reliable. Default: 120s`, - Default: "120s", - }, - "disk_utilization": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Target scaling by disk usage.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "target_read_bytes_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target bytes read per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_write_ops_per_second", "automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_read_ops_per_second"}, - }, - "target_read_ops_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target ops read per seconds.`, - AtLeastOneOf: []string{"automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_write_ops_per_second", "automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_read_ops_per_second"}, - }, - "target_write_bytes_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target bytes written per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_write_ops_per_second", "automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_read_ops_per_second"}, - }, - "target_write_ops_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target ops written per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_write_ops_per_second", "automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second", "automatic_scaling.0.disk_utilization.0.target_read_ops_per_second"}, - }, - }, - }, - }, - "max_concurrent_requests": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. - -Defaults to a runtime-specific value.`, - }, - "max_idle_instances": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Maximum number of idle instances that should be maintained for this version.`, - }, - "max_pending_latency": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it.`, - }, - "max_total_instances": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Maximum number of instances that should be started to handle requests for this version. Default: 20`, - Default: 20, - }, - "min_idle_instances": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service.`, - }, - "min_pending_latency": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it.`, - }, - "min_total_instances": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Minimum number of running instances that should be maintained for this version. Default: 2`, - Default: 2, - }, - "network_utilization": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Target scaling by network usage.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "target_received_bytes_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target bytes received per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_sent_packets_per_second", "automatic_scaling.0.network_utilization.0.target_received_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_received_packets_per_second"}, - }, - "target_received_packets_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target packets received per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_sent_packets_per_second", "automatic_scaling.0.network_utilization.0.target_received_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_received_packets_per_second"}, - }, - "target_sent_bytes_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target bytes sent per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_sent_packets_per_second", "automatic_scaling.0.network_utilization.0.target_received_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_received_packets_per_second"}, - }, - "target_sent_packets_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Target packets sent per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_sent_packets_per_second", "automatic_scaling.0.network_utilization.0.target_received_bytes_per_second", "automatic_scaling.0.network_utilization.0.target_received_packets_per_second"}, - }, - }, - }, - }, - "request_utilization": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Target scaling by request utilization.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "target_concurrent_requests": { - Type: resource_app_engine_flexible_app_version_schema.TypeFloat, - Optional: true, - Description: `Target number of concurrent requests.`, - AtLeastOneOf: []string{"automatic_scaling.0.request_utilization.0.target_request_count_per_second", "automatic_scaling.0.request_utilization.0.target_concurrent_requests"}, - }, - "target_request_count_per_second": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Target requests per second.`, - AtLeastOneOf: []string{"automatic_scaling.0.request_utilization.0.target_request_count_per_second", "automatic_scaling.0.request_utilization.0.target_concurrent_requests"}, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"automatic_scaling", "manual_scaling"}, - }, - "beta_settings": { - Type: resource_app_engine_flexible_app_version_schema.TypeMap, - Optional: true, - Description: `Metadata settings that are supplied to this version to enable beta runtime features.`, - Elem: &resource_app_engine_flexible_app_version_schema.Schema{Type: resource_app_engine_flexible_app_version_schema.TypeString}, - }, - "default_expiration": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Duration that static files should be cached by web proxies and browsers. -Only applicable if the corresponding StaticFilesHandler does not specify its own expiration time.`, - }, - "deployment": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Code and application artifacts that make up this version.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "cloud_build_options": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Options for the build operations performed as a part of the version deployment. Only applicable when creating a version using source code directly.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "app_yaml_path": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Path to the yaml file used in deployment, used to determine runtime configuration details.`, - }, - "cloud_build_timeout": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `The Cloud Build timeout used as part of any dependent builds performed by version creation. Defaults to 10 minutes. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files", "deployment.0.container"}, - }, - "container": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Computed: true, - Optional: true, - Description: `The Docker image for the container that runs the version.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "image": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `URI to the hosted container image in Google Container Registry. The URI must be fully qualified and include a tag or digest. -Examples: "gcr.io/my-project/image:tag" or "gcr.io/my-project/image@digest"`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files", "deployment.0.container"}, - }, - "files": { - Type: resource_app_engine_flexible_app_version_schema.TypeSet, - Optional: true, - Description: `Manifest of the files stored in Google Cloud Storage that are included as part of this version. -All files must be readable using the credentials supplied with this call.`, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - }, - "source_url": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Source URL`, - }, - "sha1_sum": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `SHA1 checksum of the file`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files", "deployment.0.container"}, - }, - "zip": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Zip File`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "source_url": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Source URL`, - }, - "files_count": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `files count`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files", "deployment.0.container"}, - }, - }, - }, - }, - "endpoints_api_service": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Code and application artifacts that make up this version.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Endpoints service name which is the name of the "service" resource in the Service Management API. -For example "myapi.endpoints.myproject.cloud.goog"`, - }, - "config_id": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Endpoints service configuration ID as specified by the Service Management API. For example "2016-09-19r1". - -By default, the rollout strategy for Endpoints is "FIXED". This means that Endpoints starts up with a particular configuration ID. -When a new configuration is rolled out, Endpoints must be given the new configuration ID. The configId field is used to give the configuration ID -and is required in this case. - -Endpoints also has a rollout strategy called "MANAGED". When using this, Endpoints fetches the latest configuration and does not need -the configuration ID. In this case, configId must be omitted.`, - }, - "disable_trace_sampling": { - Type: resource_app_engine_flexible_app_version_schema.TypeBool, - Optional: true, - Description: `Enable or disable trace sampling. By default, this is set to false for enabled.`, - Default: false, - }, - "rollout_strategy": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"FIXED", "MANAGED", ""}, false), - Description: `Endpoints rollout strategy. If FIXED, configId must be specified. If MANAGED, configId must be omitted. Default value: "FIXED" Possible values: ["FIXED", "MANAGED"]`, - Default: "FIXED", - }, - }, - }, - }, - "entrypoint": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `The entrypoint for the application.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "shell": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `The format should be a shell command that can be fed to bash -c.`, - }, - }, - }, - }, - "env_variables": { - Type: resource_app_engine_flexible_app_version_schema.TypeMap, - Optional: true, - Description: `Environment variables available to the application. As these are not returned in the API request, Terraform will not detect any changes made outside of the Terraform config.`, - Elem: &resource_app_engine_flexible_app_version_schema.Schema{Type: resource_app_engine_flexible_app_version_schema.TypeString}, - }, - "handlers": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Computed: true, - Optional: true, - Description: `An ordered list of URL-matching patterns that should be applied to incoming requests. -The first matching URL handles the request and other request handlers are not attempted.`, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "auth_fail_action": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}, false), - Description: `Actions to take when the user is not logged in. Possible values: ["AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED"]`, - }, - "login": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}, false), - Description: `Methods to restrict access to a URL based on login status. Possible values: ["LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED"]`, - }, - "redirect_http_response_code": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307", ""}, false), - Description: `30x code to use when performing redirects for the secure field. Possible values: ["REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307"]`, - }, - "script": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Executes a script to handle the requests that match this URL pattern. -Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto".`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "script_path": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Path to the script from the application root directory.`, - }, - }, - }, - }, - "security_level": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}, false), - Description: `Security (HTTPS) enforcement for this URL. Possible values: ["SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS"]`, - }, - "static_files": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. -Static file handlers describe which files in the application directory are static files, and which URLs serve them.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "application_readable": { - Type: resource_app_engine_flexible_app_version_schema.TypeBool, - Optional: true, - Description: `Whether files should also be uploaded as code data. By default, files declared in static file handlers are -uploaded as static data and are only served to end users; they cannot be read by the application. If enabled, -uploads are charged against both your code and static data storage resource quotas.`, - }, - "expiration": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Time a static file served by this handler should be cached by web proxies and browsers. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s". -Default is '0s'`, - Default: "0s", - }, - "http_headers": { - Type: resource_app_engine_flexible_app_version_schema.TypeMap, - Optional: true, - Description: `HTTP headers to use for all responses from these URLs. -An object containing a list of "key:value" value pairs.".`, - Elem: &resource_app_engine_flexible_app_version_schema.Schema{Type: resource_app_engine_flexible_app_version_schema.TypeString}, - }, - "mime_type": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `MIME type used to serve all files served by this handler. -Defaults to file-specific MIME types, which are derived from each file's filename extension.`, - }, - "path": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Path to the static files matched by the URL pattern, from the application root directory. -The path can refer to text matched in groupings in the URL pattern.`, - }, - "require_matching_file": { - Type: resource_app_engine_flexible_app_version_schema.TypeBool, - Optional: true, - Description: `Whether this handler should match the request if the file referenced by the handler does not exist.`, - }, - "upload_path_regex": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Regular expression that matches the file paths for all files that should be referenced by this handler.`, - }, - }, - }, - }, - "url_regex": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. -All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path.`, - }, - }, - }, - }, - "inbound_services": { - Type: resource_app_engine_flexible_app_version_schema.TypeSet, - Optional: true, - Description: `A list of the types of messages that this application is able to receive. Possible values: ["INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"]`, - Elem: &resource_app_engine_flexible_app_version_schema.Schema{ - Type: resource_app_engine_flexible_app_version_schema.TypeString, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"}, false), - }, - Set: resource_app_engine_flexible_app_version_schema.HashString, - }, - "instance_class": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Instance class that is used to run this version. Valid values are -AutomaticScaling: F1, F2, F4, F4_1G -ManualScaling: B1, B2, B4, B8, B4_1G -Defaults to F1 for AutomaticScaling and B1 for ManualScaling.`, - }, - "manual_scaling": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "instances": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Required: true, - Description: `Number of instances to assign to the service at the start. - -**Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 -Modules API set_num_instances() you must use 'lifecycle.ignore_changes = ["manual_scaling"[0].instances]' to prevent drift detection.`, - }, - }, - }, - ExactlyOneOf: []string{"automatic_scaling", "manual_scaling"}, - }, - "network": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Extra network settings`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Google Compute Engine network where the virtual machines are created. Specify the short name, not the resource path.`, - }, - "forwarded_ports": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `List of ports, or port pairs, to forward from the virtual machine to the application container.`, - Elem: &resource_app_engine_flexible_app_version_schema.Schema{ - Type: resource_app_engine_flexible_app_version_schema.TypeString, - }, - }, - "instance_tag": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Tag to apply to the instance during creation.`, - }, - "session_affinity": { - Type: resource_app_engine_flexible_app_version_schema.TypeBool, - Optional: true, - Description: `Enable session affinity.`, - }, - "subnetwork": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Google Cloud Platform sub-network where the virtual machines are created. Specify the short name, not the resource path. - -If the network that the instance is being created in is a Legacy network, then the IP address is allocated from the IPv4Range. -If the network that the instance is being created in is an auto Subnet Mode Network, then only network name should be specified (not the subnetworkName) and the IP address is created from the IPCidrRange of the subnetwork that exists in that zone for that network. -If the network that the instance is being created in is a custom Subnet Mode Network, then the subnetworkName must be specified and the IP address is created from the IPCidrRange of the subnetwork. -If specified, the subnetwork must exist in the same region as the App Engine flexible environment application.`, - }, - }, - }, - }, - "nobuild_files_regex": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `Files that match this pattern will not be built into this version. Only applicable for Go runtimes.`, - }, - "resources": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Machine resources for a version.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "cpu": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Number of CPU cores needed.`, - AtLeastOneOf: []string{"resources.0.cpu", "resources.0.disk_gb", "resources.0.memory_gb", "resources.0.volumes"}, - }, - "disk_gb": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Optional: true, - Description: `Disk size (GB) needed.`, - AtLeastOneOf: []string{"resources.0.cpu", "resources.0.disk_gb", "resources.0.memory_gb", "resources.0.volumes"}, - }, - "memory_gb": { - Type: resource_app_engine_flexible_app_version_schema.TypeFloat, - Optional: true, - Description: `Memory (GB) needed.`, - AtLeastOneOf: []string{"resources.0.cpu", "resources.0.disk_gb", "resources.0.memory_gb", "resources.0.volumes"}, - }, - "volumes": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `List of ports, or port pairs, to forward from the virtual machine to the application container.`, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Unique name for the volume.`, - }, - "size_gb": { - Type: resource_app_engine_flexible_app_version_schema.TypeInt, - Required: true, - Description: `Volume size in gigabytes.`, - }, - "volume_type": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Underlying volume type, e.g. 'tmpfs'.`, - }, - }, - }, - AtLeastOneOf: []string{"resources.0.cpu", "resources.0.disk_gb", "resources.0.memory_gb", "resources.0.volumes"}, - }, - }, - }, - }, - "runtime_api_version": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Computed: true, - Optional: true, - Description: `The version of the API in the given runtime environment. -Please see the app.yaml reference for valid values at https://cloud.google.com/appengine/docs/standard//config/appref`, - }, - "runtime_channel": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `The channel of the runtime to use. Only available for some runtimes.`, - }, - "runtime_main_executable_path": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Description: `The path or name of the app's main executable.`, - }, - "serving_status": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_flexible_app_version_validation.StringInSlice([]string{"SERVING", "STOPPED", ""}, false), - Description: `Current serving status of this version. Only the versions with a SERVING status create instances and can be billed. Default value: "SERVING" Possible values: ["SERVING", "STOPPED"]`, - Default: "SERVING", - }, - "version_id": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Relative name of the version within the service. For example, 'v1'. Version names can contain only lowercase letters, numbers, or hyphens. -Reserved names,"default", "latest", and any name with the prefix "ah-".`, - }, - "vpc_access_connector": { - Type: resource_app_engine_flexible_app_version_schema.TypeList, - Optional: true, - Description: `Enables VPC connectivity for standard apps.`, - MaxItems: 1, - Elem: &resource_app_engine_flexible_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_flexible_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Required: true, - Description: `Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1.`, - }, - }, - }, - }, - "name": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Computed: true, - Description: `Full path to the Version resource in the API. Example, "v1".`, - }, - "noop_on_destroy": { - Type: resource_app_engine_flexible_app_version_schema.TypeBool, - Optional: true, - Default: false, - }, - "delete_service_on_destroy": { - Type: resource_app_engine_flexible_app_version_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_app_engine_flexible_app_version_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineFlexibleAppVersionCreate(d *resource_app_engine_flexible_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineFlexibleAppVersionVersionId(d.Get("version_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(idProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - inboundServicesProp, err := expandAppEngineFlexibleAppVersionInboundServices(d.Get("inbound_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(inboundServicesProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, inboundServicesProp)) { - obj["inboundServices"] = inboundServicesProp - } - instanceClassProp, err := expandAppEngineFlexibleAppVersionInstanceClass(d.Get("instance_class"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(instanceClassProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, instanceClassProp)) { - obj["instanceClass"] = instanceClassProp - } - networkProp, err := expandAppEngineFlexibleAppVersionNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(networkProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - resourcesProp, err := expandAppEngineFlexibleAppVersionResources(d.Get("resources"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resources"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(resourcesProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, resourcesProp)) { - obj["resources"] = resourcesProp - } - runtimeProp, err := expandAppEngineFlexibleAppVersionRuntime(d.Get("runtime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(runtimeProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeProp)) { - obj["runtime"] = runtimeProp - } - runtimeChannelProp, err := expandAppEngineFlexibleAppVersionRuntimeChannel(d.Get("runtime_channel"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_channel"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(runtimeChannelProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeChannelProp)) { - obj["runtimeChannel"] = runtimeChannelProp - } - betaSettingsProp, err := expandAppEngineFlexibleAppVersionBetaSettings(d.Get("beta_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("beta_settings"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(betaSettingsProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, betaSettingsProp)) { - obj["betaSettings"] = betaSettingsProp - } - servingStatusProp, err := expandAppEngineFlexibleAppVersionServingStatus(d.Get("serving_status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("serving_status"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(servingStatusProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, servingStatusProp)) { - obj["servingStatus"] = servingStatusProp - } - runtimeApiVersionProp, err := expandAppEngineFlexibleAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(runtimeApiVersionProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeApiVersionProp)) { - obj["runtimeApiVersion"] = runtimeApiVersionProp - } - handlersProp, err := expandAppEngineFlexibleAppVersionHandlers(d.Get("handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(handlersProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, handlersProp)) { - obj["handlers"] = handlersProp - } - runtimeMainExecutablePathProp, err := expandAppEngineFlexibleAppVersionRuntimeMainExecutablePath(d.Get("runtime_main_executable_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_main_executable_path"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(runtimeMainExecutablePathProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeMainExecutablePathProp)) { - obj["runtimeMainExecutablePath"] = runtimeMainExecutablePathProp - } - apiConfigProp, err := expandAppEngineFlexibleAppVersionApiConfig(d.Get("api_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_config"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(apiConfigProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, apiConfigProp)) { - obj["apiConfig"] = apiConfigProp - } - envVariablesProp, err := expandAppEngineFlexibleAppVersionEnvVariables(d.Get("env_variables"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(envVariablesProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, envVariablesProp)) { - obj["envVariables"] = envVariablesProp - } - defaultExpirationProp, err := expandAppEngineFlexibleAppVersionDefaultExpiration(d.Get("default_expiration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_expiration"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(defaultExpirationProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, defaultExpirationProp)) { - obj["defaultExpiration"] = defaultExpirationProp - } - readinessCheckProp, err := expandAppEngineFlexibleAppVersionReadinessCheck(d.Get("readiness_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("readiness_check"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(readinessCheckProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, readinessCheckProp)) { - obj["readinessCheck"] = readinessCheckProp - } - livenessCheckProp, err := expandAppEngineFlexibleAppVersionLivenessCheck(d.Get("liveness_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("liveness_check"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(livenessCheckProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, livenessCheckProp)) { - obj["livenessCheck"] = livenessCheckProp - } - nobuildFilesRegexProp, err := expandAppEngineFlexibleAppVersionNobuildFilesRegex(d.Get("nobuild_files_regex"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nobuild_files_regex"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(nobuildFilesRegexProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, nobuildFilesRegexProp)) { - obj["nobuildFilesRegex"] = nobuildFilesRegexProp - } - deploymentProp, err := expandAppEngineFlexibleAppVersionDeployment(d.Get("deployment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(deploymentProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, deploymentProp)) { - obj["deployment"] = deploymentProp - } - endpointsApiServiceProp, err := expandAppEngineFlexibleAppVersionEndpointsApiService(d.Get("endpoints_api_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("endpoints_api_service"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(endpointsApiServiceProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, endpointsApiServiceProp)) { - obj["endpointsApiService"] = endpointsApiServiceProp - } - entrypointProp, err := expandAppEngineFlexibleAppVersionEntrypoint(d.Get("entrypoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(entrypointProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, entrypointProp)) { - obj["entrypoint"] = entrypointProp - } - vpcAccessConnectorProp, err := expandAppEngineFlexibleAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(vpcAccessConnectorProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, vpcAccessConnectorProp)) { - obj["vpcAccessConnector"] = vpcAccessConnectorProp - } - automaticScalingProp, err := expandAppEngineFlexibleAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(automaticScalingProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, automaticScalingProp)) { - obj["automaticScaling"] = automaticScalingProp - } - manualScalingProp, err := expandAppEngineFlexibleAppVersionManualScaling(d.Get("manual_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(manualScalingProp)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, manualScalingProp)) { - obj["manualScaling"] = manualScalingProp - } - - obj, err = resourceAppEngineFlexibleAppVersionEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") - if err != nil { - return err - } - - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Creating new FlexibleAppVersion: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error fetching project for FlexibleAppVersion: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutCreate), isAppEngineRetryableError) - if err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error creating FlexibleAppVersion: %s", err) - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = appEngineOperationWaitTime( - config, res, project, "Creating FlexibleAppVersion", userAgent, - d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_app_engine_flexible_app_version_fmt.Errorf("Error waiting to create FlexibleAppVersion: %s", err) - } - - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Finished creating FlexibleAppVersion %q: %#v", d.Id(), res) - - return resourceAppEngineFlexibleAppVersionRead(d, meta) -} - -func resourceAppEngineFlexibleAppVersionRead(d *resource_app_engine_flexible_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error fetching project for FlexibleAppVersion: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_flexible_app_version_fmt.Sprintf("AppEngineFlexibleAppVersion %q", d.Id())) - } - - if _, ok := d.GetOkExists("noop_on_destroy"); !ok { - if err := d.Set("noop_on_destroy", false); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error setting noop_on_destroy: %s", err) - } - } - if _, ok := d.GetOkExists("delete_service_on_destroy"); !ok { - if err := d.Set("delete_service_on_destroy", false); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error setting delete_service_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - - if err := d.Set("name", flattenAppEngineFlexibleAppVersionName(res["name"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("version_id", flattenAppEngineFlexibleAppVersionVersionId(res["id"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("inbound_services", flattenAppEngineFlexibleAppVersionInboundServices(res["inboundServices"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("instance_class", flattenAppEngineFlexibleAppVersionInstanceClass(res["instanceClass"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("network", flattenAppEngineFlexibleAppVersionNetwork(res["network"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("resources", flattenAppEngineFlexibleAppVersionResources(res["resources"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("runtime", flattenAppEngineFlexibleAppVersionRuntime(res["runtime"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("runtime_channel", flattenAppEngineFlexibleAppVersionRuntimeChannel(res["runtimeChannel"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("serving_status", flattenAppEngineFlexibleAppVersionServingStatus(res["servingStatus"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("runtime_api_version", flattenAppEngineFlexibleAppVersionRuntimeApiVersion(res["runtimeApiVersion"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("handlers", flattenAppEngineFlexibleAppVersionHandlers(res["handlers"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("runtime_main_executable_path", flattenAppEngineFlexibleAppVersionRuntimeMainExecutablePath(res["runtimeMainExecutablePath"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("api_config", flattenAppEngineFlexibleAppVersionApiConfig(res["apiConfig"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("default_expiration", flattenAppEngineFlexibleAppVersionDefaultExpiration(res["defaultExpiration"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("readiness_check", flattenAppEngineFlexibleAppVersionReadinessCheck(res["readinessCheck"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("liveness_check", flattenAppEngineFlexibleAppVersionLivenessCheck(res["livenessCheck"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("nobuild_files_regex", flattenAppEngineFlexibleAppVersionNobuildFilesRegex(res["nobuildFilesRegex"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("deployment", flattenAppEngineFlexibleAppVersionDeployment(res["deployment"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("endpoints_api_service", flattenAppEngineFlexibleAppVersionEndpointsApiService(res["endpointsApiService"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("vpc_access_connector", flattenAppEngineFlexibleAppVersionVPCAccessConnector(res["vpcAccessConnector"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("automatic_scaling", flattenAppEngineFlexibleAppVersionAutomaticScaling(res["automaticScaling"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - if err := d.Set("manual_scaling", flattenAppEngineFlexibleAppVersionManualScaling(res["manualScaling"], d, config)); err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error reading FlexibleAppVersion: %s", err) - } - - return nil -} - -func resourceAppEngineFlexibleAppVersionUpdate(d *resource_app_engine_flexible_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error fetching project for FlexibleAppVersion: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineFlexibleAppVersionVersionId(d.Get("version_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - inboundServicesProp, err := expandAppEngineFlexibleAppVersionInboundServices(d.Get("inbound_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, inboundServicesProp)) { - obj["inboundServices"] = inboundServicesProp - } - instanceClassProp, err := expandAppEngineFlexibleAppVersionInstanceClass(d.Get("instance_class"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, instanceClassProp)) { - obj["instanceClass"] = instanceClassProp - } - networkProp, err := expandAppEngineFlexibleAppVersionNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - resourcesProp, err := expandAppEngineFlexibleAppVersionResources(d.Get("resources"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resources"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, resourcesProp)) { - obj["resources"] = resourcesProp - } - runtimeProp, err := expandAppEngineFlexibleAppVersionRuntime(d.Get("runtime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeProp)) { - obj["runtime"] = runtimeProp - } - runtimeChannelProp, err := expandAppEngineFlexibleAppVersionRuntimeChannel(d.Get("runtime_channel"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_channel"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeChannelProp)) { - obj["runtimeChannel"] = runtimeChannelProp - } - betaSettingsProp, err := expandAppEngineFlexibleAppVersionBetaSettings(d.Get("beta_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("beta_settings"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, betaSettingsProp)) { - obj["betaSettings"] = betaSettingsProp - } - servingStatusProp, err := expandAppEngineFlexibleAppVersionServingStatus(d.Get("serving_status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("serving_status"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, servingStatusProp)) { - obj["servingStatus"] = servingStatusProp - } - runtimeApiVersionProp, err := expandAppEngineFlexibleAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeApiVersionProp)) { - obj["runtimeApiVersion"] = runtimeApiVersionProp - } - handlersProp, err := expandAppEngineFlexibleAppVersionHandlers(d.Get("handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, handlersProp)) { - obj["handlers"] = handlersProp - } - runtimeMainExecutablePathProp, err := expandAppEngineFlexibleAppVersionRuntimeMainExecutablePath(d.Get("runtime_main_executable_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_main_executable_path"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, runtimeMainExecutablePathProp)) { - obj["runtimeMainExecutablePath"] = runtimeMainExecutablePathProp - } - apiConfigProp, err := expandAppEngineFlexibleAppVersionApiConfig(d.Get("api_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_config"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, apiConfigProp)) { - obj["apiConfig"] = apiConfigProp - } - envVariablesProp, err := expandAppEngineFlexibleAppVersionEnvVariables(d.Get("env_variables"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, envVariablesProp)) { - obj["envVariables"] = envVariablesProp - } - defaultExpirationProp, err := expandAppEngineFlexibleAppVersionDefaultExpiration(d.Get("default_expiration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_expiration"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, defaultExpirationProp)) { - obj["defaultExpiration"] = defaultExpirationProp - } - readinessCheckProp, err := expandAppEngineFlexibleAppVersionReadinessCheck(d.Get("readiness_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("readiness_check"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, readinessCheckProp)) { - obj["readinessCheck"] = readinessCheckProp - } - livenessCheckProp, err := expandAppEngineFlexibleAppVersionLivenessCheck(d.Get("liveness_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("liveness_check"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, livenessCheckProp)) { - obj["livenessCheck"] = livenessCheckProp - } - nobuildFilesRegexProp, err := expandAppEngineFlexibleAppVersionNobuildFilesRegex(d.Get("nobuild_files_regex"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nobuild_files_regex"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, nobuildFilesRegexProp)) { - obj["nobuildFilesRegex"] = nobuildFilesRegexProp - } - deploymentProp, err := expandAppEngineFlexibleAppVersionDeployment(d.Get("deployment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, deploymentProp)) { - obj["deployment"] = deploymentProp - } - endpointsApiServiceProp, err := expandAppEngineFlexibleAppVersionEndpointsApiService(d.Get("endpoints_api_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("endpoints_api_service"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, endpointsApiServiceProp)) { - obj["endpointsApiService"] = endpointsApiServiceProp - } - entrypointProp, err := expandAppEngineFlexibleAppVersionEntrypoint(d.Get("entrypoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, entrypointProp)) { - obj["entrypoint"] = entrypointProp - } - vpcAccessConnectorProp, err := expandAppEngineFlexibleAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, vpcAccessConnectorProp)) { - obj["vpcAccessConnector"] = vpcAccessConnectorProp - } - automaticScalingProp, err := expandAppEngineFlexibleAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, automaticScalingProp)) { - obj["automaticScaling"] = automaticScalingProp - } - manualScalingProp, err := expandAppEngineFlexibleAppVersionManualScaling(d.Get("manual_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(resource_app_engine_flexible_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_flexible_app_version_reflect.DeepEqual(v, manualScalingProp)) { - obj["manualScaling"] = manualScalingProp - } - - obj, err = resourceAppEngineFlexibleAppVersionEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") - if err != nil { - return err - } - - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Updating FlexibleAppVersion %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutUpdate), isAppEngineRetryableError) - - if err != nil { - return resource_app_engine_flexible_app_version_fmt.Errorf("Error updating FlexibleAppVersion %q: %s", d.Id(), err) - } else { - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Finished updating FlexibleAppVersion %q: %#v", d.Id(), res) - } - - err = appEngineOperationWaitTime( - config, res, project, "Updating FlexibleAppVersion", userAgent, - d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineFlexibleAppVersionRead(d, meta) -} - -func resourceAppEngineFlexibleAppVersionDelete(d *resource_app_engine_flexible_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - if d.Get("noop_on_destroy") == true { - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Keeping the AppVersion %q", d.Id()) - return nil - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - if d.Get("delete_service_on_destroy") == true { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - var obj map[string]interface{} - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Deleting Service %q", d.Id()) - res, err := sendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - err = appEngineOperationWaitTime( - config, res, project, "Deleting Service", userAgent, - d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutDelete)) - - if err != nil { - return err - } - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil - } else { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return err - } - var obj map[string]interface{} - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Deleting AppVersion %q", d.Id()) - res, err := sendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "AppVersion") - } - err = appEngineOperationWaitTime( - config, res, project, "Deleting AppVersion", userAgent, - d.Timeout(resource_app_engine_flexible_app_version_schema.TimeoutDelete)) - - if err != nil { - return err - } - resource_app_engine_flexible_app_version_log.Printf("[DEBUG] Finished deleting AppVersion %q: %#v", d.Id(), res) - return nil - - } -} - -func resourceAppEngineFlexibleAppVersionImport(d *resource_app_engine_flexible_app_version_schema.ResourceData, meta interface{}) ([]*resource_app_engine_flexible_app_version_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return nil, resource_app_engine_flexible_app_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("noop_on_destroy", false); err != nil { - return nil, resource_app_engine_flexible_app_version_fmt.Errorf("Error setting noop_on_destroy: %s", err) - } - if err := d.Set("delete_service_on_destroy", false); err != nil { - return nil, resource_app_engine_flexible_app_version_fmt.Errorf("Error setting delete_service_on_destroy: %s", err) - } - - return []*resource_app_engine_flexible_app_version_schema.ResourceData{d}, nil -} - -func flattenAppEngineFlexibleAppVersionName(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionVersionId(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionInboundServices(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_app_engine_flexible_app_version_schema.NewSet(resource_app_engine_flexible_app_version_schema.HashString, v.([]interface{})) -} - -func flattenAppEngineFlexibleAppVersionInstanceClass(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionNetwork(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["forwarded_ports"] = - flattenAppEngineFlexibleAppVersionNetworkForwardedPorts(original["forwardedPorts"], d, config) - transformed["instance_tag"] = - flattenAppEngineFlexibleAppVersionNetworkInstanceTag(original["instanceTag"], d, config) - transformed["name"] = - flattenAppEngineFlexibleAppVersionNetworkName(original["name"], d, config) - transformed["subnetwork"] = - flattenAppEngineFlexibleAppVersionNetworkSubnetwork(original["subnetworkName"], d, config) - transformed["session_affinity"] = - flattenAppEngineFlexibleAppVersionNetworkSessionAffinity(original["sessionAffinity"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionNetworkForwardedPorts(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionNetworkInstanceTag(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionNetworkName(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionNetworkSubnetwork(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionNetworkSessionAffinity(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionResources(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cpu"] = - flattenAppEngineFlexibleAppVersionResourcesCpu(original["cpu"], d, config) - transformed["disk_gb"] = - flattenAppEngineFlexibleAppVersionResourcesDiskGb(original["diskGb"], d, config) - transformed["memory_gb"] = - flattenAppEngineFlexibleAppVersionResourcesMemoryGb(original["memoryGb"], d, config) - transformed["volumes"] = - flattenAppEngineFlexibleAppVersionResourcesVolumes(original["volumes"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionResourcesCpu(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionResourcesDiskGb(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionResourcesMemoryGb(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenAppEngineFlexibleAppVersionResourcesVolumesName(original["name"], d, config), - "volume_type": flattenAppEngineFlexibleAppVersionResourcesVolumesVolumeType(original["volumeType"], d, config), - "size_gb": flattenAppEngineFlexibleAppVersionResourcesVolumesSizeGb(original["sizeGb"], d, config), - }) - } - return transformed -} - -func flattenAppEngineFlexibleAppVersionResourcesVolumesName(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionResourcesVolumesVolumeType(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionResourcesVolumesSizeGb(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionRuntime(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionRuntimeChannel(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionServingStatus(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionRuntimeApiVersion(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlers(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "url_regex": flattenAppEngineFlexibleAppVersionHandlersUrlRegex(original["urlRegex"], d, config), - "security_level": flattenAppEngineFlexibleAppVersionHandlersSecurityLevel(original["securityLevel"], d, config), - "login": flattenAppEngineFlexibleAppVersionHandlersLogin(original["login"], d, config), - "auth_fail_action": flattenAppEngineFlexibleAppVersionHandlersAuthFailAction(original["authFailAction"], d, config), - "redirect_http_response_code": flattenAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(original["redirectHttpResponseCode"], d, config), - "script": flattenAppEngineFlexibleAppVersionHandlersScript(original["script"], d, config), - "static_files": flattenAppEngineFlexibleAppVersionHandlersStaticFiles(original["staticFiles"], d, config), - }) - } - return transformed -} - -func flattenAppEngineFlexibleAppVersionHandlersUrlRegex(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersSecurityLevel(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersLogin(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersAuthFailAction(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersScript(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["script_path"] = - flattenAppEngineFlexibleAppVersionHandlersScriptScriptPath(original["scriptPath"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionHandlersScriptScriptPath(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenAppEngineFlexibleAppVersionHandlersStaticFilesPath(original["path"], d, config) - transformed["upload_path_regex"] = - flattenAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(original["uploadPathRegex"], d, config) - transformed["http_headers"] = - flattenAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(original["httpHeaders"], d, config) - transformed["mime_type"] = - flattenAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(original["mimeType"], d, config) - transformed["expiration"] = - flattenAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) - transformed["require_matching_file"] = - flattenAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(original["requireMatchingFile"], d, config) - transformed["application_readable"] = - flattenAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(original["applicationReadable"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesPath(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionRuntimeMainExecutablePath(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionApiConfig(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["auth_fail_action"] = - flattenAppEngineFlexibleAppVersionApiConfigAuthFailAction(original["authFailAction"], d, config) - transformed["login"] = - flattenAppEngineFlexibleAppVersionApiConfigLogin(original["login"], d, config) - transformed["script"] = - flattenAppEngineFlexibleAppVersionApiConfigScript(original["script"], d, config) - transformed["security_level"] = - flattenAppEngineFlexibleAppVersionApiConfigSecurityLevel(original["securityLevel"], d, config) - transformed["url"] = - flattenAppEngineFlexibleAppVersionApiConfigUrl(original["url"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionApiConfigAuthFailAction(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionApiConfigLogin(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionApiConfigScript(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionApiConfigSecurityLevel(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionApiConfigUrl(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionDefaultExpiration(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenAppEngineFlexibleAppVersionReadinessCheckPath(original["path"], d, config) - transformed["host"] = - flattenAppEngineFlexibleAppVersionReadinessCheckHost(original["host"], d, config) - transformed["failure_threshold"] = - flattenAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(original["failureThreshold"], d, config) - transformed["success_threshold"] = - flattenAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(original["successThreshold"], d, config) - transformed["check_interval"] = - flattenAppEngineFlexibleAppVersionReadinessCheckCheckInterval(original["checkInterval"], d, config) - transformed["timeout"] = - flattenAppEngineFlexibleAppVersionReadinessCheckTimeout(original["timeout"], d, config) - transformed["app_start_timeout"] = - flattenAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(original["appStartTimeout"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionReadinessCheckPath(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionReadinessCheckHost(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionReadinessCheckCheckInterval(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionReadinessCheckTimeout(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenAppEngineFlexibleAppVersionLivenessCheckPath(original["path"], d, config) - transformed["host"] = - flattenAppEngineFlexibleAppVersionLivenessCheckHost(original["host"], d, config) - transformed["failure_threshold"] = - flattenAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(original["failureThreshold"], d, config) - transformed["success_threshold"] = - flattenAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(original["successThreshold"], d, config) - transformed["check_interval"] = - flattenAppEngineFlexibleAppVersionLivenessCheckCheckInterval(original["checkInterval"], d, config) - transformed["timeout"] = - flattenAppEngineFlexibleAppVersionLivenessCheckTimeout(original["timeout"], d, config) - transformed["initial_delay"] = - flattenAppEngineFlexibleAppVersionLivenessCheckInitialDelay(original["initialDelay"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionLivenessCheckPath(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionLivenessCheckHost(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionLivenessCheckCheckInterval(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionLivenessCheckTimeout(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionLivenessCheckInitialDelay(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionNobuildFilesRegex(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionDeployment(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["zip"] = d.Get("deployment.0.zip") - transformed["files"] = d.Get("deployment.0.files") - transformed["container"] = - flattenAppEngineFlexibleAppVersionDeploymentContainer(original["container"], d, config) - transformed["cloud_build_options"] = - flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(original["cloudBuildOptions"], d, config) - - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["image"] = - flattenAppEngineFlexibleAppVersionDeploymentContainerImage(original["image"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionDeploymentContainerImage(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["app_yaml_path"] = - flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(original["appYamlPath"], d, config) - transformed["cloud_build_timeout"] = - flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(original["cloudBuildTimeout"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenAppEngineFlexibleAppVersionEndpointsApiServiceName(original["name"], d, config) - transformed["config_id"] = - flattenAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(original["configId"], d, config) - transformed["rollout_strategy"] = - flattenAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(original["rolloutStrategy"], d, config) - transformed["disable_trace_sampling"] = - flattenAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(original["disableTraceSampling"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceName(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionVPCAccessConnector(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenAppEngineFlexibleAppVersionVPCAccessConnectorName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionVPCAccessConnectorName(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cool_down_period"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(original["coolDownPeriod"], d, config) - transformed["cpu_utilization"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(original["cpuUtilization"], d, config) - transformed["max_concurrent_requests"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(original["maxConcurrentRequests"], d, config) - transformed["max_idle_instances"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(original["maxIdleInstances"], d, config) - transformed["max_total_instances"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(original["maxTotalInstances"], d, config) - transformed["max_pending_latency"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(original["maxPendingLatency"], d, config) - transformed["min_idle_instances"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(original["minIdleInstances"], d, config) - transformed["min_total_instances"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(original["minTotalInstances"], d, config) - transformed["min_pending_latency"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(original["minPendingLatency"], d, config) - transformed["request_utilization"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(original["requestUtilization"], d, config) - transformed["disk_utilization"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(original["diskUtilization"], d, config) - transformed["network_utilization"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(original["networkUtilization"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["aggregation_window_length"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(original["aggregationWindowLength"], d, config) - transformed["target_utilization"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(original["targetUtilization"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_request_count_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(original["targetRequestCountPerSecond"], d, config) - transformed["target_concurrent_requests"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(original["targetConcurrentRequests"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_write_bytes_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(original["targetWriteBytesPerSecond"], d, config) - transformed["target_write_ops_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(original["targetWriteOpsPerSecond"], d, config) - transformed["target_read_bytes_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(original["targetReadBytesPerSecond"], d, config) - transformed["target_read_ops_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(original["targetReadOpsPerSecond"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_sent_bytes_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(original["targetSentBytesPerSecond"], d, config) - transformed["target_sent_packets_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(original["targetSentPacketsPerSecond"], d, config) - transformed["target_received_bytes_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(original["targetReceivedBytesPerSecond"], d, config) - transformed["target_received_packets_per_second"] = - flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(original["targetReceivedPacketsPerSecond"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineFlexibleAppVersionManualScaling(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["instances"] = - flattenAppEngineFlexibleAppVersionManualScalingInstances(original["instances"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineFlexibleAppVersionManualScalingInstances(v interface{}, d *resource_app_engine_flexible_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_flexible_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandAppEngineFlexibleAppVersionVersionId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionInboundServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_app_engine_flexible_app_version_schema.Set).List() - return v, nil -} - -func expandAppEngineFlexibleAppVersionInstanceClass(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedForwardedPorts, err := expandAppEngineFlexibleAppVersionNetworkForwardedPorts(original["forwarded_ports"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedForwardedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["forwardedPorts"] = transformedForwardedPorts - } - - transformedInstanceTag, err := expandAppEngineFlexibleAppVersionNetworkInstanceTag(original["instance_tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedInstanceTag); val.IsValid() && !isEmptyValue(val) { - transformed["instanceTag"] = transformedInstanceTag - } - - transformedName, err := expandAppEngineFlexibleAppVersionNetworkName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedSubnetwork, err := expandAppEngineFlexibleAppVersionNetworkSubnetwork(original["subnetwork"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSubnetwork); val.IsValid() && !isEmptyValue(val) { - transformed["subnetworkName"] = transformedSubnetwork - } - - transformedSessionAffinity, err := expandAppEngineFlexibleAppVersionNetworkSessionAffinity(original["session_affinity"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSessionAffinity); val.IsValid() && !isEmptyValue(val) { - transformed["sessionAffinity"] = transformedSessionAffinity - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionNetworkForwardedPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionNetworkInstanceTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionNetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionNetworkSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionNetworkSessionAffinity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCpu, err := expandAppEngineFlexibleAppVersionResourcesCpu(original["cpu"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedCpu); val.IsValid() && !isEmptyValue(val) { - transformed["cpu"] = transformedCpu - } - - transformedDiskGb, err := expandAppEngineFlexibleAppVersionResourcesDiskGb(original["disk_gb"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedDiskGb); val.IsValid() && !isEmptyValue(val) { - transformed["diskGb"] = transformedDiskGb - } - - transformedMemoryGb, err := expandAppEngineFlexibleAppVersionResourcesMemoryGb(original["memory_gb"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMemoryGb); val.IsValid() && !isEmptyValue(val) { - transformed["memoryGb"] = transformedMemoryGb - } - - transformedVolumes, err := expandAppEngineFlexibleAppVersionResourcesVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionResourcesCpu(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionResourcesDiskGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionResourcesMemoryGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionResourcesVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAppEngineFlexibleAppVersionResourcesVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVolumeType, err := expandAppEngineFlexibleAppVersionResourcesVolumesVolumeType(original["volume_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedVolumeType); val.IsValid() && !isEmptyValue(val) { - transformed["volumeType"] = transformedVolumeType - } - - transformedSizeGb, err := expandAppEngineFlexibleAppVersionResourcesVolumesSizeGb(original["size_gb"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSizeGb); val.IsValid() && !isEmptyValue(val) { - transformed["sizeGb"] = transformedSizeGb - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineFlexibleAppVersionResourcesVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionResourcesVolumesVolumeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionResourcesVolumesSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionRuntime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionRuntimeChannel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionBetaSettings(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAppEngineFlexibleAppVersionServingStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionRuntimeApiVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrlRegex, err := expandAppEngineFlexibleAppVersionHandlersUrlRegex(original["url_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedUrlRegex); val.IsValid() && !isEmptyValue(val) { - transformed["urlRegex"] = transformedUrlRegex - } - - transformedSecurityLevel, err := expandAppEngineFlexibleAppVersionHandlersSecurityLevel(original["security_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !isEmptyValue(val) { - transformed["securityLevel"] = transformedSecurityLevel - } - - transformedLogin, err := expandAppEngineFlexibleAppVersionHandlersLogin(original["login"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedLogin); val.IsValid() && !isEmptyValue(val) { - transformed["login"] = transformedLogin - } - - transformedAuthFailAction, err := expandAppEngineFlexibleAppVersionHandlersAuthFailAction(original["auth_fail_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !isEmptyValue(val) { - transformed["authFailAction"] = transformedAuthFailAction - } - - transformedRedirectHttpResponseCode, err := expandAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(original["redirect_http_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedRedirectHttpResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectHttpResponseCode"] = transformedRedirectHttpResponseCode - } - - transformedScript, err := expandAppEngineFlexibleAppVersionHandlersScript(original["script"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedScript); val.IsValid() && !isEmptyValue(val) { - transformed["script"] = transformedScript - } - - transformedStaticFiles, err := expandAppEngineFlexibleAppVersionHandlersStaticFiles(original["static_files"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedStaticFiles); val.IsValid() && !isEmptyValue(val) { - transformed["staticFiles"] = transformedStaticFiles - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineFlexibleAppVersionHandlersUrlRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersSecurityLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersLogin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersAuthFailAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersRedirectHttpResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedScriptPath, err := expandAppEngineFlexibleAppVersionHandlersScriptScriptPath(original["script_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedScriptPath); val.IsValid() && !isEmptyValue(val) { - transformed["scriptPath"] = transformedScriptPath - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionHandlersScriptScriptPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedUploadPathRegex, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(original["upload_path_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedUploadPathRegex); val.IsValid() && !isEmptyValue(val) { - transformed["uploadPathRegex"] = transformedUploadPathRegex - } - - transformedHttpHeaders, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(original["http_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["httpHeaders"] = transformedHttpHeaders - } - - transformedMimeType, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(original["mime_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMimeType); val.IsValid() && !isEmptyValue(val) { - transformed["mimeType"] = transformedMimeType - } - - transformedExpiration, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedExpiration); val.IsValid() && !isEmptyValue(val) { - transformed["expiration"] = transformedExpiration - } - - transformedRequireMatchingFile, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(original["require_matching_file"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedRequireMatchingFile); val.IsValid() && !isEmptyValue(val) { - transformed["requireMatchingFile"] = transformedRequireMatchingFile - } - - transformedApplicationReadable, err := expandAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(original["application_readable"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedApplicationReadable); val.IsValid() && !isEmptyValue(val) { - transformed["applicationReadable"] = transformedApplicationReadable - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFilesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFilesMimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFilesExpiration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionRuntimeMainExecutablePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionApiConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAuthFailAction, err := expandAppEngineFlexibleAppVersionApiConfigAuthFailAction(original["auth_fail_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !isEmptyValue(val) { - transformed["authFailAction"] = transformedAuthFailAction - } - - transformedLogin, err := expandAppEngineFlexibleAppVersionApiConfigLogin(original["login"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedLogin); val.IsValid() && !isEmptyValue(val) { - transformed["login"] = transformedLogin - } - - transformedScript, err := expandAppEngineFlexibleAppVersionApiConfigScript(original["script"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedScript); val.IsValid() && !isEmptyValue(val) { - transformed["script"] = transformedScript - } - - transformedSecurityLevel, err := expandAppEngineFlexibleAppVersionApiConfigSecurityLevel(original["security_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !isEmptyValue(val) { - transformed["securityLevel"] = transformedSecurityLevel - } - - transformedUrl, err := expandAppEngineFlexibleAppVersionApiConfigUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionApiConfigAuthFailAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionApiConfigLogin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionApiConfigScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionApiConfigSecurityLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionApiConfigUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionEnvVariables(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAppEngineFlexibleAppVersionDefaultExpiration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandAppEngineFlexibleAppVersionReadinessCheckPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedHost, err := expandAppEngineFlexibleAppVersionReadinessCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedFailureThreshold, err := expandAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(original["failure_threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["failureThreshold"] = transformedFailureThreshold - } - - transformedSuccessThreshold, err := expandAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(original["success_threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSuccessThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["successThreshold"] = transformedSuccessThreshold - } - - transformedCheckInterval, err := expandAppEngineFlexibleAppVersionReadinessCheckCheckInterval(original["check_interval"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedCheckInterval); val.IsValid() && !isEmptyValue(val) { - transformed["checkInterval"] = transformedCheckInterval - } - - transformedTimeout, err := expandAppEngineFlexibleAppVersionReadinessCheckTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedAppStartTimeout, err := expandAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(original["app_start_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedAppStartTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["appStartTimeout"] = transformedAppStartTimeout - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheckPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheckFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheckSuccessThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheckCheckInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheckTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionReadinessCheckAppStartTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandAppEngineFlexibleAppVersionLivenessCheckPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedHost, err := expandAppEngineFlexibleAppVersionLivenessCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedFailureThreshold, err := expandAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(original["failure_threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["failureThreshold"] = transformedFailureThreshold - } - - transformedSuccessThreshold, err := expandAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(original["success_threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSuccessThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["successThreshold"] = transformedSuccessThreshold - } - - transformedCheckInterval, err := expandAppEngineFlexibleAppVersionLivenessCheckCheckInterval(original["check_interval"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedCheckInterval); val.IsValid() && !isEmptyValue(val) { - transformed["checkInterval"] = transformedCheckInterval - } - - transformedTimeout, err := expandAppEngineFlexibleAppVersionLivenessCheckTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedInitialDelay, err := expandAppEngineFlexibleAppVersionLivenessCheckInitialDelay(original["initial_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedInitialDelay); val.IsValid() && !isEmptyValue(val) { - transformed["initialDelay"] = transformedInitialDelay - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheckPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheckFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheckSuccessThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheckCheckInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheckTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionLivenessCheckInitialDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionNobuildFilesRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionDeployment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedZip, err := expandAppEngineFlexibleAppVersionDeploymentZip(original["zip"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedZip); val.IsValid() && !isEmptyValue(val) { - transformed["zip"] = transformedZip - } - - transformedFiles, err := expandAppEngineFlexibleAppVersionDeploymentFiles(original["files"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedFiles); val.IsValid() && !isEmptyValue(val) { - transformed["files"] = transformedFiles - } - - transformedContainer, err := expandAppEngineFlexibleAppVersionDeploymentContainer(original["container"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedContainer); val.IsValid() && !isEmptyValue(val) { - transformed["container"] = transformedContainer - } - - transformedCloudBuildOptions, err := expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(original["cloud_build_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedCloudBuildOptions); val.IsValid() && !isEmptyValue(val) { - transformed["cloudBuildOptions"] = transformedCloudBuildOptions - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentZip(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSourceUrl, err := expandAppEngineFlexibleAppVersionDeploymentZipSourceUrl(original["source_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { - transformed["sourceUrl"] = transformedSourceUrl - } - - transformedFilesCount, err := expandAppEngineFlexibleAppVersionDeploymentZipFilesCount(original["files_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedFilesCount); val.IsValid() && !isEmptyValue(val) { - transformed["filesCount"] = transformedFilesCount - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentZipSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentZipFilesCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentFiles(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_app_engine_flexible_app_version_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSha1Sum, err := expandAppEngineFlexibleAppVersionDeploymentFilesSha1Sum(original["sha1_sum"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSha1Sum); val.IsValid() && !isEmptyValue(val) { - transformed["sha1Sum"] = transformedSha1Sum - } - - transformedSourceUrl, err := expandAppEngineFlexibleAppVersionDeploymentFilesSourceUrl(original["source_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { - transformed["sourceUrl"] = transformedSourceUrl - } - - transformedName, err := expandString(original["name"], d, config) - if err != nil { - return nil, err - } - m[transformedName] = transformed - } - return m, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentFilesSha1Sum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentFilesSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentContainer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedImage, err := expandAppEngineFlexibleAppVersionDeploymentContainerImage(original["image"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedImage); val.IsValid() && !isEmptyValue(val) { - transformed["image"] = transformedImage - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentContainerImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAppYamlPath, err := expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(original["app_yaml_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedAppYamlPath); val.IsValid() && !isEmptyValue(val) { - transformed["appYamlPath"] = transformedAppYamlPath - } - - transformedCloudBuildTimeout, err := expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(original["cloud_build_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedCloudBuildTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["cloudBuildTimeout"] = transformedCloudBuildTimeout - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsAppYamlPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionDeploymentCloudBuildOptionsCloudBuildTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionEndpointsApiService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedConfigId, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(original["config_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedConfigId); val.IsValid() && !isEmptyValue(val) { - transformed["configId"] = transformedConfigId - } - - transformedRolloutStrategy, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(original["rollout_strategy"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedRolloutStrategy); val.IsValid() && !isEmptyValue(val) { - transformed["rolloutStrategy"] = transformedRolloutStrategy - } - - transformedDisableTraceSampling, err := expandAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(original["disable_trace_sampling"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedDisableTraceSampling); val.IsValid() && !isEmptyValue(val) { - transformed["disableTraceSampling"] = transformedDisableTraceSampling - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionEndpointsApiServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionEndpointsApiServiceConfigId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionEndpointsApiServiceRolloutStrategy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionEndpointsApiServiceDisableTraceSampling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionEntrypoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedShell, err := expandAppEngineFlexibleAppVersionEntrypointShell(original["shell"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedShell); val.IsValid() && !isEmptyValue(val) { - transformed["shell"] = transformedShell - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionEntrypointShell(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionVPCAccessConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAppEngineFlexibleAppVersionVPCAccessConnectorName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionVPCAccessConnectorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCoolDownPeriod, err := expandAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(original["cool_down_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedCoolDownPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["coolDownPeriod"] = transformedCoolDownPeriod - } - - transformedCpuUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(original["cpu_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["cpuUtilization"] = transformedCpuUtilization - } - - transformedMaxConcurrentRequests, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(original["max_concurrent_requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMaxConcurrentRequests); val.IsValid() && !isEmptyValue(val) { - transformed["maxConcurrentRequests"] = transformedMaxConcurrentRequests - } - - transformedMaxIdleInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(original["max_idle_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMaxIdleInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxIdleInstances"] = transformedMaxIdleInstances - } - - transformedMaxTotalInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(original["max_total_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMaxTotalInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxTotalInstances"] = transformedMaxTotalInstances - } - - transformedMaxPendingLatency, err := expandAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(original["max_pending_latency"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMaxPendingLatency); val.IsValid() && !isEmptyValue(val) { - transformed["maxPendingLatency"] = transformedMaxPendingLatency - } - - transformedMinIdleInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(original["min_idle_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMinIdleInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minIdleInstances"] = transformedMinIdleInstances - } - - transformedMinTotalInstances, err := expandAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(original["min_total_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMinTotalInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minTotalInstances"] = transformedMinTotalInstances - } - - transformedMinPendingLatency, err := expandAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(original["min_pending_latency"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedMinPendingLatency); val.IsValid() && !isEmptyValue(val) { - transformed["minPendingLatency"] = transformedMinPendingLatency - } - - transformedRequestUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(original["request_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedRequestUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["requestUtilization"] = transformedRequestUtilization - } - - transformedDiskUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(original["disk_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedDiskUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["diskUtilization"] = transformedDiskUtilization - } - - transformedNetworkUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(original["network_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedNetworkUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["networkUtilization"] = transformedNetworkUtilization - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingCoolDownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAggregationWindowLength, err := expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(original["aggregation_window_length"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedAggregationWindowLength); val.IsValid() && !isEmptyValue(val) { - transformed["aggregationWindowLength"] = transformedAggregationWindowLength - } - - transformedTargetUtilization, err := expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(original["target_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["targetUtilization"] = transformedTargetUtilization - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationAggregationWindowLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingCpuUtilizationTargetUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxTotalInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingMinIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingMinTotalInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingMinPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetRequestCountPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(original["target_request_count_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetRequestCountPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetRequestCountPerSecond"] = transformedTargetRequestCountPerSecond - } - - transformedTargetConcurrentRequests, err := expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(original["target_concurrent_requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetConcurrentRequests); val.IsValid() && !isEmptyValue(val) { - transformed["targetConcurrentRequests"] = transformedTargetConcurrentRequests - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetRequestCountPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingRequestUtilizationTargetConcurrentRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetWriteBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(original["target_write_bytes_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetWriteBytesPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetWriteBytesPerSecond"] = transformedTargetWriteBytesPerSecond - } - - transformedTargetWriteOpsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(original["target_write_ops_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetWriteOpsPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetWriteOpsPerSecond"] = transformedTargetWriteOpsPerSecond - } - - transformedTargetReadBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(original["target_read_bytes_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetReadBytesPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetReadBytesPerSecond"] = transformedTargetReadBytesPerSecond - } - - transformedTargetReadOpsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(original["target_read_ops_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetReadOpsPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetReadOpsPerSecond"] = transformedTargetReadOpsPerSecond - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetWriteOpsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingDiskUtilizationTargetReadOpsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetSentBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(original["target_sent_bytes_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetSentBytesPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetSentBytesPerSecond"] = transformedTargetSentBytesPerSecond - } - - transformedTargetSentPacketsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(original["target_sent_packets_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetSentPacketsPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetSentPacketsPerSecond"] = transformedTargetSentPacketsPerSecond - } - - transformedTargetReceivedBytesPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(original["target_received_bytes_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetReceivedBytesPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetReceivedBytesPerSecond"] = transformedTargetReceivedBytesPerSecond - } - - transformedTargetReceivedPacketsPerSecond, err := expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(original["target_received_packets_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedTargetReceivedPacketsPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["targetReceivedPacketsPerSecond"] = transformedTargetReceivedPacketsPerSecond - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetSentPacketsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedBytesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionAutomaticScalingNetworkUtilizationTargetReceivedPacketsPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineFlexibleAppVersionManualScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInstances, err := expandAppEngineFlexibleAppVersionManualScalingInstances(original["instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_flexible_app_version_reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { - transformed["instances"] = transformedInstances - } - - return transformed, nil -} - -func expandAppEngineFlexibleAppVersionManualScalingInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAppEngineFlexibleAppVersionEncoder(d *resource_app_engine_flexible_app_version_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - obj["env"] = "flex" - return obj, nil -} - -func resourceAppEngineServiceNetworkSettings() *resource_app_engine_service_network_settings_schema.Resource { - return &resource_app_engine_service_network_settings_schema.Resource{ - Create: resourceAppEngineServiceNetworkSettingsCreate, - Read: resourceAppEngineServiceNetworkSettingsRead, - Update: resourceAppEngineServiceNetworkSettingsUpdate, - Delete: resourceAppEngineServiceNetworkSettingsDelete, - - Importer: &resource_app_engine_service_network_settings_schema.ResourceImporter{ - State: resourceAppEngineServiceNetworkSettingsImport, - }, - - Timeouts: &resource_app_engine_service_network_settings_schema.ResourceTimeout{ - Create: resource_app_engine_service_network_settings_schema.DefaultTimeout(4 * resource_app_engine_service_network_settings_time.Minute), - Update: resource_app_engine_service_network_settings_schema.DefaultTimeout(4 * resource_app_engine_service_network_settings_time.Minute), - Delete: resource_app_engine_service_network_settings_schema.DefaultTimeout(4 * resource_app_engine_service_network_settings_time.Minute), - }, - - Schema: map[string]*resource_app_engine_service_network_settings_schema.Schema{ - "network_settings": { - Type: resource_app_engine_service_network_settings_schema.TypeList, - Required: true, - Description: `Ingress settings for this service. Will apply to all versions.`, - MaxItems: 1, - Elem: &resource_app_engine_service_network_settings_schema.Resource{ - Schema: map[string]*resource_app_engine_service_network_settings_schema.Schema{ - "ingress_traffic_allowed": { - Type: resource_app_engine_service_network_settings_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_service_network_settings_validation.StringInSlice([]string{"INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", "INGRESS_TRAFFIC_ALLOWED_ALL", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB", ""}, false), - Description: `The ingress settings for version or service. Default value: "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED" Possible values: ["INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", "INGRESS_TRAFFIC_ALLOWED_ALL", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY", "INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB"]`, - Default: "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED", - }, - }, - }, - }, - "service": { - Type: resource_app_engine_service_network_settings_schema.TypeString, - Required: true, - Description: `The name of the service these settings apply to.`, - }, - "project": { - Type: resource_app_engine_service_network_settings_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineServiceNetworkSettingsCreate(d *resource_app_engine_service_network_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceNetworkSettingsService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(resource_app_engine_service_network_settings_reflect.ValueOf(idProp)) && (ok || !resource_app_engine_service_network_settings_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - networkSettingsProp, err := expandAppEngineServiceNetworkSettingsNetworkSettings(d.Get("network_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_settings"); !isEmptyValue(resource_app_engine_service_network_settings_reflect.ValueOf(networkSettingsProp)) && (ok || !resource_app_engine_service_network_settings_reflect.DeepEqual(v, networkSettingsProp)) { - obj["networkSettings"] = networkSettingsProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?updateMask=networkSettings") - if err != nil { - return err - } - - resource_app_engine_service_network_settings_log.Printf("[DEBUG] Creating new ServiceNetworkSettings: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_service_network_settings_schema.TimeoutCreate)) - if err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error creating ServiceNetworkSettings: %s", err) - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = appEngineOperationWaitTime( - config, res, project, "Creating ServiceNetworkSettings", userAgent, - d.Timeout(resource_app_engine_service_network_settings_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_app_engine_service_network_settings_fmt.Errorf("Error waiting to create ServiceNetworkSettings: %s", err) - } - - resource_app_engine_service_network_settings_log.Printf("[DEBUG] Finished creating ServiceNetworkSettings %q: %#v", d.Id(), res) - - return resourceAppEngineServiceNetworkSettingsRead(d, meta) -} - -func resourceAppEngineServiceNetworkSettingsRead(d *resource_app_engine_service_network_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_service_network_settings_fmt.Sprintf("AppEngineServiceNetworkSettings %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) - } - - if err := d.Set("service", flattenAppEngineServiceNetworkSettingsService(res["id"], d, config)); err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) - } - if err := d.Set("network_settings", flattenAppEngineServiceNetworkSettingsNetworkSettings(res["networkSettings"], d, config)); err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error reading ServiceNetworkSettings: %s", err) - } - - return nil -} - -func resourceAppEngineServiceNetworkSettingsUpdate(d *resource_app_engine_service_network_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error fetching project for ServiceNetworkSettings: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceNetworkSettingsService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(resource_app_engine_service_network_settings_reflect.ValueOf(v)) && (ok || !resource_app_engine_service_network_settings_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - networkSettingsProp, err := expandAppEngineServiceNetworkSettingsNetworkSettings(d.Get("network_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_settings"); !isEmptyValue(resource_app_engine_service_network_settings_reflect.ValueOf(v)) && (ok || !resource_app_engine_service_network_settings_reflect.DeepEqual(v, networkSettingsProp)) { - obj["networkSettings"] = networkSettingsProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - - resource_app_engine_service_network_settings_log.Printf("[DEBUG] Updating ServiceNetworkSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("service") { - updateMask = append(updateMask, "id") - } - - if d.HasChange("network_settings") { - updateMask = append(updateMask, "networkSettings") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_app_engine_service_network_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_service_network_settings_schema.TimeoutUpdate)) - - if err != nil { - return resource_app_engine_service_network_settings_fmt.Errorf("Error updating ServiceNetworkSettings %q: %s", d.Id(), err) - } else { - resource_app_engine_service_network_settings_log.Printf("[DEBUG] Finished updating ServiceNetworkSettings %q: %#v", d.Id(), res) - } - - err = appEngineOperationWaitTime( - config, res, project, "Updating ServiceNetworkSettings", userAgent, - d.Timeout(resource_app_engine_service_network_settings_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineServiceNetworkSettingsRead(d, meta) -} - -func resourceAppEngineServiceNetworkSettingsDelete(d *resource_app_engine_service_network_settings_schema.ResourceData, meta interface{}) error { - resource_app_engine_service_network_settings_log.Printf("[WARNING] AppEngine ServiceNetworkSettings resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceAppEngineServiceNetworkSettingsImport(d *resource_app_engine_service_network_settings_schema.ResourceData, meta interface{}) ([]*resource_app_engine_service_network_settings_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return nil, resource_app_engine_service_network_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_app_engine_service_network_settings_schema.ResourceData{d}, nil -} - -func flattenAppEngineServiceNetworkSettingsService(v interface{}, d *resource_app_engine_service_network_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineServiceNetworkSettingsNetworkSettings(v interface{}, d *resource_app_engine_service_network_settings_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ingress_traffic_allowed"] = - flattenAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(original["ingressTrafficAllowed"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(v interface{}, d *resource_app_engine_service_network_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineServiceNetworkSettingsService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineServiceNetworkSettingsNetworkSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIngressTrafficAllowed, err := expandAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(original["ingress_traffic_allowed"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_service_network_settings_reflect.ValueOf(transformedIngressTrafficAllowed); val.IsValid() && !isEmptyValue(val) { - transformed["ingressTrafficAllowed"] = transformedIngressTrafficAllowed - } - - return transformed, nil -} - -func expandAppEngineServiceNetworkSettingsNetworkSettingsIngressTrafficAllowed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAppEngineServiceSplitTraffic() *resource_app_engine_service_split_traffic_schema.Resource { - return &resource_app_engine_service_split_traffic_schema.Resource{ - Create: resourceAppEngineServiceSplitTrafficCreate, - Read: resourceAppEngineServiceSplitTrafficRead, - Update: resourceAppEngineServiceSplitTrafficUpdate, - Delete: resourceAppEngineServiceSplitTrafficDelete, - - Importer: &resource_app_engine_service_split_traffic_schema.ResourceImporter{ - State: resourceAppEngineServiceSplitTrafficImport, - }, - - Timeouts: &resource_app_engine_service_split_traffic_schema.ResourceTimeout{ - Create: resource_app_engine_service_split_traffic_schema.DefaultTimeout(4 * resource_app_engine_service_split_traffic_time.Minute), - Update: resource_app_engine_service_split_traffic_schema.DefaultTimeout(4 * resource_app_engine_service_split_traffic_time.Minute), - Delete: resource_app_engine_service_split_traffic_schema.DefaultTimeout(4 * resource_app_engine_service_split_traffic_time.Minute), - }, - - Schema: map[string]*resource_app_engine_service_split_traffic_schema.Schema{ - "service": { - Type: resource_app_engine_service_split_traffic_schema.TypeString, - Required: true, - Description: `The name of the service these settings apply to.`, - }, - "split": { - Type: resource_app_engine_service_split_traffic_schema.TypeList, - Required: true, - Description: `Mapping that defines fractional HTTP traffic diversion to different versions within the service.`, - MaxItems: 1, - Elem: &resource_app_engine_service_split_traffic_schema.Resource{ - Schema: map[string]*resource_app_engine_service_split_traffic_schema.Schema{ - "allocations": { - Type: resource_app_engine_service_split_traffic_schema.TypeMap, - Required: true, - Description: `Mapping from version IDs within the service to fractional (0.000, 1] allocations of traffic for that version. Each version can be specified only once, but some versions in the service may not have any traffic allocation. Services that have traffic allocated cannot be deleted until either the service is deleted or their traffic allocation is removed. Allocations must sum to 1. Up to two decimal place precision is supported for IP-based splits and up to three decimal places is supported for cookie-based splits.`, - Elem: &resource_app_engine_service_split_traffic_schema.Schema{Type: resource_app_engine_service_split_traffic_schema.TypeString}, - }, - "shard_by": { - Type: resource_app_engine_service_split_traffic_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_service_split_traffic_validation.StringInSlice([]string{"UNSPECIFIED", "COOKIE", "IP", "RANDOM", ""}, false), - Description: `Mechanism used to determine which version a request is sent to. The traffic selection algorithm will be stable for either type until allocations are changed. Possible values: ["UNSPECIFIED", "COOKIE", "IP", "RANDOM"]`, - }, - }, - }, - }, - "migrate_traffic": { - Type: resource_app_engine_service_split_traffic_schema.TypeBool, - Optional: true, - Description: `If set to true traffic will be migrated to this version.`, - }, - "project": { - Type: resource_app_engine_service_split_traffic_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineServiceSplitTrafficCreate(d *resource_app_engine_service_split_traffic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceSplitTrafficService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(resource_app_engine_service_split_traffic_reflect.ValueOf(idProp)) && (ok || !resource_app_engine_service_split_traffic_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - splitProp, err := expandAppEngineServiceSplitTrafficSplit(d.Get("split"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("split"); !isEmptyValue(resource_app_engine_service_split_traffic_reflect.ValueOf(splitProp)) && (ok || !resource_app_engine_service_split_traffic_reflect.DeepEqual(v, splitProp)) { - obj["split"] = splitProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}&updateMask=split") - if err != nil { - return err - } - - resource_app_engine_service_split_traffic_log.Printf("[DEBUG] Creating new ServiceSplitTraffic: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_service_split_traffic_schema.TimeoutCreate)) - if err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error creating ServiceSplitTraffic: %s", err) - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = appEngineOperationWaitTime( - config, res, project, "Creating ServiceSplitTraffic", userAgent, - d.Timeout(resource_app_engine_service_split_traffic_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_app_engine_service_split_traffic_fmt.Errorf("Error waiting to create ServiceSplitTraffic: %s", err) - } - - resource_app_engine_service_split_traffic_log.Printf("[DEBUG] Finished creating ServiceSplitTraffic %q: %#v", d.Id(), res) - - return resourceAppEngineServiceSplitTrafficRead(d, meta) -} - -func resourceAppEngineServiceSplitTrafficRead(d *resource_app_engine_service_split_traffic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_service_split_traffic_fmt.Sprintf("AppEngineServiceSplitTraffic %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error reading ServiceSplitTraffic: %s", err) - } - - if err := d.Set("service", flattenAppEngineServiceSplitTrafficService(res["id"], d, config)); err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error reading ServiceSplitTraffic: %s", err) - } - - return nil -} - -func resourceAppEngineServiceSplitTrafficUpdate(d *resource_app_engine_service_split_traffic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error fetching project for ServiceSplitTraffic: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineServiceSplitTrafficService(d.Get("service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service"); !isEmptyValue(resource_app_engine_service_split_traffic_reflect.ValueOf(v)) && (ok || !resource_app_engine_service_split_traffic_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - splitProp, err := expandAppEngineServiceSplitTrafficSplit(d.Get("split"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("split"); !isEmptyValue(resource_app_engine_service_split_traffic_reflect.ValueOf(v)) && (ok || !resource_app_engine_service_split_traffic_reflect.DeepEqual(v, splitProp)) { - obj["split"] = splitProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}") - if err != nil { - return err - } - - resource_app_engine_service_split_traffic_log.Printf("[DEBUG] Updating ServiceSplitTraffic %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("service") { - updateMask = append(updateMask, "id") - } - - if d.HasChange("split") { - updateMask = append(updateMask, "split") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_app_engine_service_split_traffic_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_service_split_traffic_schema.TimeoutUpdate)) - - if err != nil { - return resource_app_engine_service_split_traffic_fmt.Errorf("Error updating ServiceSplitTraffic %q: %s", d.Id(), err) - } else { - resource_app_engine_service_split_traffic_log.Printf("[DEBUG] Finished updating ServiceSplitTraffic %q: %#v", d.Id(), res) - } - - err = appEngineOperationWaitTime( - config, res, project, "Updating ServiceSplitTraffic", userAgent, - d.Timeout(resource_app_engine_service_split_traffic_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineServiceSplitTrafficRead(d, meta) -} - -func resourceAppEngineServiceSplitTrafficDelete(d *resource_app_engine_service_split_traffic_schema.ResourceData, meta interface{}) error { - resource_app_engine_service_split_traffic_log.Printf("[WARNING] AppEngine ServiceSplitTraffic resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceAppEngineServiceSplitTrafficImport(d *resource_app_engine_service_split_traffic_schema.ResourceData, meta interface{}) ([]*resource_app_engine_service_split_traffic_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return nil, resource_app_engine_service_split_traffic_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_app_engine_service_split_traffic_schema.ResourceData{d}, nil -} - -func flattenAppEngineServiceSplitTrafficService(v interface{}, d *resource_app_engine_service_split_traffic_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAppEngineServiceSplitTrafficService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineServiceSplitTrafficSplit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedShardBy, err := expandAppEngineServiceSplitTrafficSplitShardBy(original["shard_by"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_service_split_traffic_reflect.ValueOf(transformedShardBy); val.IsValid() && !isEmptyValue(val) { - transformed["shardBy"] = transformedShardBy - } - - transformedAllocations, err := expandAppEngineServiceSplitTrafficSplitAllocations(original["allocations"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_service_split_traffic_reflect.ValueOf(transformedAllocations); val.IsValid() && !isEmptyValue(val) { - transformed["allocations"] = transformedAllocations - } - - return transformed, nil -} - -func expandAppEngineServiceSplitTrafficSplitShardBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineServiceSplitTrafficSplitAllocations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceAppEngineStandardAppVersion() *resource_app_engine_standard_app_version_schema.Resource { - return &resource_app_engine_standard_app_version_schema.Resource{ - Create: resourceAppEngineStandardAppVersionCreate, - Read: resourceAppEngineStandardAppVersionRead, - Update: resourceAppEngineStandardAppVersionUpdate, - Delete: resourceAppEngineStandardAppVersionDelete, - - Importer: &resource_app_engine_standard_app_version_schema.ResourceImporter{ - State: resourceAppEngineStandardAppVersionImport, - }, - - Timeouts: &resource_app_engine_standard_app_version_schema.ResourceTimeout{ - Create: resource_app_engine_standard_app_version_schema.DefaultTimeout(4 * resource_app_engine_standard_app_version_time.Minute), - Update: resource_app_engine_standard_app_version_schema.DefaultTimeout(4 * resource_app_engine_standard_app_version_time.Minute), - Delete: resource_app_engine_standard_app_version_schema.DefaultTimeout(4 * resource_app_engine_standard_app_version_time.Minute), - }, - - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "deployment": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Required: true, - Description: `Code and application artifacts that make up this version.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "files": { - Type: resource_app_engine_standard_app_version_schema.TypeSet, - Optional: true, - Description: `Manifest of the files stored in Google Cloud Storage that are included as part of this version. -All files must be readable using the credentials supplied with this call.`, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - }, - "source_url": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - Description: `Source URL`, - }, - "sha1_sum": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `SHA1 checksum of the file`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, - }, - "zip": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Zip File`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "source_url": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - Description: `Source URL`, - }, - "files_count": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Optional: true, - Description: `files count`, - }, - }, - }, - AtLeastOneOf: []string{"deployment.0.zip", "deployment.0.files"}, - }, - }, - }, - }, - "entrypoint": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Required: true, - Description: `The entrypoint for the application.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "shell": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - Description: `The format should be a shell command that can be fed to bash -c.`, - }, - }, - }, - }, - "runtime": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - Description: `Desired runtime. Example python27.`, - }, - "service": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `AppEngine service resource`, - }, - "automatic_scaling": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Automatic scaling is based on request rate, response latencies, and other application metrics.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "max_concurrent_requests": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Optional: true, - Description: `Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. - -Defaults to a runtime-specific value.`, - }, - "max_idle_instances": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Optional: true, - Description: `Maximum number of idle instances that should be maintained for this version.`, - }, - "max_pending_latency": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "min_idle_instances": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Optional: true, - Description: `Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service.`, - }, - "min_pending_latency": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "standard_scheduler_settings": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Scheduler settings for standard environment.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "max_instances": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Optional: true, - Description: `Maximum number of instances to run for this version. Set to zero to disable maxInstances configuration.`, - }, - "min_instances": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Optional: true, - Description: `Minimum number of instances to run for this version. Set to zero to disable minInstances configuration.`, - }, - "target_cpu_utilization": { - Type: resource_app_engine_standard_app_version_schema.TypeFloat, - Optional: true, - Description: `Target CPU utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value.`, - }, - "target_throughput_utilization": { - Type: resource_app_engine_standard_app_version_schema.TypeFloat, - Optional: true, - Description: `Target throughput utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value.`, - }, - }, - }, - }, - }, - }, - ConflictsWith: []string{"basic_scaling", "manual_scaling"}, - }, - "basic_scaling": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Basic scaling creates instances when your application receives requests. Each instance will be shut down when the application becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "max_instances": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Required: true, - Description: `Maximum number of instances to create for this version. Must be in the range [1.0, 200.0].`, - }, - "idle_timeout": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Duration of time after the last request that an instance must wait before the instance is shut down. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s.`, - Default: "900s", - }, - }, - }, - ConflictsWith: []string{"automatic_scaling", "manual_scaling"}, - }, - "env_variables": { - Type: resource_app_engine_standard_app_version_schema.TypeMap, - Optional: true, - Description: `Environment variables available to the application.`, - Elem: &resource_app_engine_standard_app_version_schema.Schema{Type: resource_app_engine_standard_app_version_schema.TypeString}, - }, - "handlers": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Computed: true, - Optional: true, - Description: `An ordered list of URL-matching patterns that should be applied to incoming requests. -The first matching URL handles the request and other request handlers are not attempted.`, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "auth_fail_action": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_standard_app_version_validation.StringInSlice([]string{"AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED", ""}, false), - Description: `Actions to take when the user is not logged in. Possible values: ["AUTH_FAIL_ACTION_REDIRECT", "AUTH_FAIL_ACTION_UNAUTHORIZED"]`, - }, - "login": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_standard_app_version_validation.StringInSlice([]string{"LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED", ""}, false), - Description: `Methods to restrict access to a URL based on login status. Possible values: ["LOGIN_OPTIONAL", "LOGIN_ADMIN", "LOGIN_REQUIRED"]`, - }, - "redirect_http_response_code": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_standard_app_version_validation.StringInSlice([]string{"REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307", ""}, false), - Description: `30x code to use when performing redirects for the secure field. Possible values: ["REDIRECT_HTTP_RESPONSE_CODE_301", "REDIRECT_HTTP_RESPONSE_CODE_302", "REDIRECT_HTTP_RESPONSE_CODE_303", "REDIRECT_HTTP_RESPONSE_CODE_307"]`, - }, - "script": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Executes a script to handle the requests that match this URL pattern. -Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto".`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "script_path": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - Description: `Path to the script from the application root directory.`, - }, - }, - }, - }, - "security_level": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_app_engine_standard_app_version_validation.StringInSlice([]string{"SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS", ""}, false), - Description: `Security (HTTPS) enforcement for this URL. Possible values: ["SECURE_DEFAULT", "SECURE_NEVER", "SECURE_OPTIONAL", "SECURE_ALWAYS"]`, - }, - "static_files": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "application_readable": { - Type: resource_app_engine_standard_app_version_schema.TypeBool, - Optional: true, - Description: `Whether files should also be uploaded as code data. By default, files declared in static file handlers are uploaded as -static data and are only served to end users; they cannot be read by the application. If enabled, uploads are charged -against both your code and static data storage resource quotas.`, - }, - "expiration": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Time a static file served by this handler should be cached by web proxies and browsers. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s".`, - }, - "http_headers": { - Type: resource_app_engine_standard_app_version_schema.TypeMap, - Optional: true, - Description: `HTTP headers to use for all responses from these URLs. -An object containing a list of "key:value" value pairs.".`, - Elem: &resource_app_engine_standard_app_version_schema.Schema{Type: resource_app_engine_standard_app_version_schema.TypeString}, - }, - "mime_type": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `MIME type used to serve all files served by this handler. -Defaults to file-specific MIME types, which are derived from each file's filename extension.`, - }, - "path": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Path to the static files matched by the URL pattern, from the application root directory. The path can refer to text matched in groupings in the URL pattern.`, - }, - "require_matching_file": { - Type: resource_app_engine_standard_app_version_schema.TypeBool, - Optional: true, - Description: `Whether this handler should match the request if the file referenced by the handler does not exist.`, - }, - "upload_path_regex": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Regular expression that matches the file paths for all files that should be referenced by this handler.`, - }, - }, - }, - }, - "url_regex": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. -All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path.`, - }, - }, - }, - }, - "inbound_services": { - Type: resource_app_engine_standard_app_version_schema.TypeSet, - Optional: true, - Description: `A list of the types of messages that this application is able to receive. Possible values: ["INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"]`, - Elem: &resource_app_engine_standard_app_version_schema.Schema{ - Type: resource_app_engine_standard_app_version_schema.TypeString, - ValidateFunc: resource_app_engine_standard_app_version_validation.StringInSlice([]string{"INBOUND_SERVICE_MAIL", "INBOUND_SERVICE_MAIL_BOUNCE", "INBOUND_SERVICE_XMPP_ERROR", "INBOUND_SERVICE_XMPP_MESSAGE", "INBOUND_SERVICE_XMPP_SUBSCRIBE", "INBOUND_SERVICE_XMPP_PRESENCE", "INBOUND_SERVICE_CHANNEL_PRESENCE", "INBOUND_SERVICE_WARMUP"}, false), - }, - Set: resource_app_engine_standard_app_version_schema.HashString, - }, - "instance_class": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Computed: true, - Optional: true, - Description: `Instance class that is used to run this version. Valid values are -AutomaticScaling: F1, F2, F4, F4_1G -BasicScaling or ManualScaling: B1, B2, B4, B4_1G, B8 -Defaults to F1 for AutomaticScaling and B2 for ManualScaling and BasicScaling. If no scaling is specified, AutomaticScaling is chosen.`, - }, - "libraries": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Configuration for third-party Python runtime libraries that are required by the application.`, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Name of the library. Example "django".`, - }, - "version": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `Version of the library to select, or "latest".`, - }, - }, - }, - }, - "manual_scaling": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "instances": { - Type: resource_app_engine_standard_app_version_schema.TypeInt, - Required: true, - Description: `Number of instances to assign to the service at the start. - -**Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 -Modules API set_num_instances() you must use 'lifecycle.ignore_changes = ["manual_scaling"[0].instances]' to prevent drift detection.`, - }, - }, - }, - ConflictsWith: []string{"automatic_scaling", "basic_scaling"}, - }, - "runtime_api_version": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Description: `The version of the API in the given runtime environment. -Please see the app.yaml reference for valid values at https://cloud.google.com/appengine/docs/standard//config/appref`, - }, - "threadsafe": { - Type: resource_app_engine_standard_app_version_schema.TypeBool, - Optional: true, - Description: `Whether multiple requests can be dispatched to this version at once.`, - }, - "version_id": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Relative name of the version within the service. For example, 'v1'. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names,"default", "latest", and any name with the prefix "ah-".`, - }, - "vpc_access_connector": { - Type: resource_app_engine_standard_app_version_schema.TypeList, - Optional: true, - Description: `Enables VPC connectivity for standard apps.`, - MaxItems: 1, - Elem: &resource_app_engine_standard_app_version_schema.Resource{ - Schema: map[string]*resource_app_engine_standard_app_version_schema.Schema{ - "name": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Required: true, - Description: `Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1.`, - }, - }, - }, - }, - "name": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Computed: true, - Description: `Full path to the Version resource in the API. Example, "v1".`, - }, - "noop_on_destroy": { - Type: resource_app_engine_standard_app_version_schema.TypeBool, - Optional: true, - Default: false, - }, - "delete_service_on_destroy": { - Type: resource_app_engine_standard_app_version_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_app_engine_standard_app_version_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceAppEngineStandardAppVersionCreate(d *resource_app_engine_standard_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineStandardAppVersionVersionId(d.Get("version_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(idProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - runtimeProp, err := expandAppEngineStandardAppVersionRuntime(d.Get("runtime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(runtimeProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, runtimeProp)) { - obj["runtime"] = runtimeProp - } - threadsafeProp, err := expandAppEngineStandardAppVersionThreadsafe(d.Get("threadsafe"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threadsafe"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(threadsafeProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, threadsafeProp)) { - obj["threadsafe"] = threadsafeProp - } - runtimeApiVersionProp, err := expandAppEngineStandardAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(runtimeApiVersionProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, runtimeApiVersionProp)) { - obj["runtimeApiVersion"] = runtimeApiVersionProp - } - handlersProp, err := expandAppEngineStandardAppVersionHandlers(d.Get("handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(handlersProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, handlersProp)) { - obj["handlers"] = handlersProp - } - librariesProp, err := expandAppEngineStandardAppVersionLibraries(d.Get("libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("libraries"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(librariesProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, librariesProp)) { - obj["libraries"] = librariesProp - } - envVariablesProp, err := expandAppEngineStandardAppVersionEnvVariables(d.Get("env_variables"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(envVariablesProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, envVariablesProp)) { - obj["envVariables"] = envVariablesProp - } - deploymentProp, err := expandAppEngineStandardAppVersionDeployment(d.Get("deployment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(deploymentProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, deploymentProp)) { - obj["deployment"] = deploymentProp - } - entrypointProp, err := expandAppEngineStandardAppVersionEntrypoint(d.Get("entrypoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(entrypointProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, entrypointProp)) { - obj["entrypoint"] = entrypointProp - } - vpcAccessConnectorProp, err := expandAppEngineStandardAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(vpcAccessConnectorProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, vpcAccessConnectorProp)) { - obj["vpcAccessConnector"] = vpcAccessConnectorProp - } - inboundServicesProp, err := expandAppEngineStandardAppVersionInboundServices(d.Get("inbound_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(inboundServicesProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, inboundServicesProp)) { - obj["inboundServices"] = inboundServicesProp - } - instanceClassProp, err := expandAppEngineStandardAppVersionInstanceClass(d.Get("instance_class"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(instanceClassProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, instanceClassProp)) { - obj["instanceClass"] = instanceClassProp - } - automaticScalingProp, err := expandAppEngineStandardAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(automaticScalingProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, automaticScalingProp)) { - obj["automaticScaling"] = automaticScalingProp - } - basicScalingProp, err := expandAppEngineStandardAppVersionBasicScaling(d.Get("basic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic_scaling"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(basicScalingProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, basicScalingProp)) { - obj["basicScaling"] = basicScalingProp - } - manualScalingProp, err := expandAppEngineStandardAppVersionManualScaling(d.Get("manual_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(manualScalingProp)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, manualScalingProp)) { - obj["manualScaling"] = manualScalingProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") - if err != nil { - return err - } - - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Creating new StandardAppVersion: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutCreate), isAppEngineRetryableError) - if err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error creating StandardAppVersion: %s", err) - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = appEngineOperationWaitTime( - config, res, project, "Creating StandardAppVersion", userAgent, - d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_app_engine_standard_app_version_fmt.Errorf("Error waiting to create StandardAppVersion: %s", err) - } - - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Finished creating StandardAppVersion %q: %#v", d.Id(), res) - - return resourceAppEngineStandardAppVersionRead(d, meta) -} - -func resourceAppEngineStandardAppVersionRead(d *resource_app_engine_standard_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, resource_app_engine_standard_app_version_fmt.Sprintf("AppEngineStandardAppVersion %q", d.Id())) - } - - if _, ok := d.GetOkExists("noop_on_destroy"); !ok { - if err := d.Set("noop_on_destroy", false); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error setting noop_on_destroy: %s", err) - } - } - if _, ok := d.GetOkExists("delete_service_on_destroy"); !ok { - if err := d.Set("delete_service_on_destroy", false); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error setting delete_service_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - - if err := d.Set("name", flattenAppEngineStandardAppVersionName(res["name"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("version_id", flattenAppEngineStandardAppVersionVersionId(res["id"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("runtime", flattenAppEngineStandardAppVersionRuntime(res["runtime"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("runtime_api_version", flattenAppEngineStandardAppVersionRuntimeApiVersion(res["runtimeApiVersion"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("handlers", flattenAppEngineStandardAppVersionHandlers(res["handlers"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("libraries", flattenAppEngineStandardAppVersionLibraries(res["libraries"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("vpc_access_connector", flattenAppEngineStandardAppVersionVPCAccessConnector(res["vpcAccessConnector"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("inbound_services", flattenAppEngineStandardAppVersionInboundServices(res["inboundServices"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("instance_class", flattenAppEngineStandardAppVersionInstanceClass(res["instanceClass"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("automatic_scaling", flattenAppEngineStandardAppVersionAutomaticScaling(res["automaticScaling"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("basic_scaling", flattenAppEngineStandardAppVersionBasicScaling(res["basicScaling"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - if err := d.Set("manual_scaling", flattenAppEngineStandardAppVersionManualScaling(res["manualScaling"], d, config)); err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error reading StandardAppVersion: %s", err) - } - - return nil -} - -func resourceAppEngineStandardAppVersionUpdate(d *resource_app_engine_standard_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error fetching project for StandardAppVersion: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandAppEngineStandardAppVersionVersionId(d.Get("version_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_id"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - runtimeProp, err := expandAppEngineStandardAppVersionRuntime(d.Get("runtime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, runtimeProp)) { - obj["runtime"] = runtimeProp - } - threadsafeProp, err := expandAppEngineStandardAppVersionThreadsafe(d.Get("threadsafe"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threadsafe"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, threadsafeProp)) { - obj["threadsafe"] = threadsafeProp - } - runtimeApiVersionProp, err := expandAppEngineStandardAppVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("runtime_api_version"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, runtimeApiVersionProp)) { - obj["runtimeApiVersion"] = runtimeApiVersionProp - } - handlersProp, err := expandAppEngineStandardAppVersionHandlers(d.Get("handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("handlers"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, handlersProp)) { - obj["handlers"] = handlersProp - } - librariesProp, err := expandAppEngineStandardAppVersionLibraries(d.Get("libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("libraries"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, librariesProp)) { - obj["libraries"] = librariesProp - } - envVariablesProp, err := expandAppEngineStandardAppVersionEnvVariables(d.Get("env_variables"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("env_variables"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, envVariablesProp)) { - obj["envVariables"] = envVariablesProp - } - deploymentProp, err := expandAppEngineStandardAppVersionDeployment(d.Get("deployment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deployment"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, deploymentProp)) { - obj["deployment"] = deploymentProp - } - entrypointProp, err := expandAppEngineStandardAppVersionEntrypoint(d.Get("entrypoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entrypoint"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, entrypointProp)) { - obj["entrypoint"] = entrypointProp - } - vpcAccessConnectorProp, err := expandAppEngineStandardAppVersionVPCAccessConnector(d.Get("vpc_access_connector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpc_access_connector"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, vpcAccessConnectorProp)) { - obj["vpcAccessConnector"] = vpcAccessConnectorProp - } - inboundServicesProp, err := expandAppEngineStandardAppVersionInboundServices(d.Get("inbound_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inbound_services"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, inboundServicesProp)) { - obj["inboundServices"] = inboundServicesProp - } - instanceClassProp, err := expandAppEngineStandardAppVersionInstanceClass(d.Get("instance_class"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_class"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, instanceClassProp)) { - obj["instanceClass"] = instanceClassProp - } - automaticScalingProp, err := expandAppEngineStandardAppVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("automatic_scaling"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, automaticScalingProp)) { - obj["automaticScaling"] = automaticScalingProp - } - basicScalingProp, err := expandAppEngineStandardAppVersionBasicScaling(d.Get("basic_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic_scaling"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, basicScalingProp)) { - obj["basicScaling"] = basicScalingProp - } - manualScalingProp, err := expandAppEngineStandardAppVersionManualScaling(d.Get("manual_scaling"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("manual_scaling"); !isEmptyValue(resource_app_engine_standard_app_version_reflect.ValueOf(v)) && (ok || !resource_app_engine_standard_app_version_reflect.DeepEqual(v, manualScalingProp)) { - obj["manualScaling"] = manualScalingProp - } - - lockName, err := replaceVars(d, config, "apps/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions") - if err != nil { - return err - } - - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Updating StandardAppVersion %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutUpdate), isAppEngineRetryableError) - - if err != nil { - return resource_app_engine_standard_app_version_fmt.Errorf("Error updating StandardAppVersion %q: %s", d.Id(), err) - } else { - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Finished updating StandardAppVersion %q: %#v", d.Id(), res) - } - - err = appEngineOperationWaitTime( - config, res, project, "Updating StandardAppVersion", userAgent, - d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceAppEngineStandardAppVersionRead(d, meta) -} - -func resourceAppEngineStandardAppVersionDelete(d *resource_app_engine_standard_app_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - if d.Get("noop_on_destroy") == true { - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Keeping the AppVersion %q", d.Id()) - return nil - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - if d.Get("delete_service_on_destroy") == true { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}") - if err != nil { - return err - } - var obj map[string]interface{} - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Deleting Service %q", d.Id()) - res, err := sendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - err = appEngineOperationWaitTime( - config, res, project, "Deleting Service", userAgent, - d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutDelete)) - - if err != nil { - return err - } - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil - } else { - url, err := replaceVars(d, config, "{{AppEngineBasePath}}apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return err - } - var obj map[string]interface{} - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Deleting AppVersion %q", d.Id()) - res, err := sendRequestWithTimeout(config, "DELETE", project, url, userAgent, obj, d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutDelete), isAppEngineRetryableError) - if err != nil { - return handleNotFoundError(err, d, "AppVersion") - } - err = appEngineOperationWaitTime( - config, res, project, "Deleting AppVersion", userAgent, - d.Timeout(resource_app_engine_standard_app_version_schema.TimeoutDelete)) - - if err != nil { - return err - } - resource_app_engine_standard_app_version_log.Printf("[DEBUG] Finished deleting AppVersion %q: %#v", d.Id(), res) - return nil - - } -} - -func resourceAppEngineStandardAppVersionImport(d *resource_app_engine_standard_app_version_schema.ResourceData, meta interface{}) ([]*resource_app_engine_standard_app_version_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "apps/(?P[^/]+)/services/(?P[^/]+)/versions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "apps/{{project}}/services/{{service}}/versions/{{version_id}}") - if err != nil { - return nil, resource_app_engine_standard_app_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("noop_on_destroy", false); err != nil { - return nil, resource_app_engine_standard_app_version_fmt.Errorf("Error setting noop_on_destroy: %s", err) - } - if err := d.Set("delete_service_on_destroy", false); err != nil { - return nil, resource_app_engine_standard_app_version_fmt.Errorf("Error setting delete_service_on_destroy: %s", err) - } - - return []*resource_app_engine_standard_app_version_schema.ResourceData{d}, nil -} - -func flattenAppEngineStandardAppVersionName(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionVersionId(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionRuntime(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionRuntimeApiVersion(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlers(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "url_regex": flattenAppEngineStandardAppVersionHandlersUrlRegex(original["urlRegex"], d, config), - "security_level": flattenAppEngineStandardAppVersionHandlersSecurityLevel(original["securityLevel"], d, config), - "login": flattenAppEngineStandardAppVersionHandlersLogin(original["login"], d, config), - "auth_fail_action": flattenAppEngineStandardAppVersionHandlersAuthFailAction(original["authFailAction"], d, config), - "redirect_http_response_code": flattenAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(original["redirectHttpResponseCode"], d, config), - "script": flattenAppEngineStandardAppVersionHandlersScript(original["script"], d, config), - "static_files": flattenAppEngineStandardAppVersionHandlersStaticFiles(original["staticFiles"], d, config), - }) - } - return transformed -} - -func flattenAppEngineStandardAppVersionHandlersUrlRegex(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersSecurityLevel(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersLogin(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersAuthFailAction(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersScript(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["script_path"] = - flattenAppEngineStandardAppVersionHandlersScriptScriptPath(original["scriptPath"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineStandardAppVersionHandlersScriptScriptPath(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFiles(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesPath(original["path"], d, config) - transformed["upload_path_regex"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(original["uploadPathRegex"], d, config) - transformed["http_headers"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(original["httpHeaders"], d, config) - transformed["mime_type"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesMimeType(original["mimeType"], d, config) - transformed["expiration"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) - transformed["require_matching_file"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(original["requireMatchingFile"], d, config) - transformed["application_readable"] = - flattenAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(original["applicationReadable"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesPath(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesMimeType(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesExpiration(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionLibraries(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenAppEngineStandardAppVersionLibrariesName(original["name"], d, config), - "version": flattenAppEngineStandardAppVersionLibrariesVersion(original["version"], d, config), - }) - } - return transformed -} - -func flattenAppEngineStandardAppVersionLibrariesName(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionLibrariesVersion(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionVPCAccessConnector(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenAppEngineStandardAppVersionVPCAccessConnectorName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineStandardAppVersionVPCAccessConnectorName(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionInboundServices(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_app_engine_standard_app_version_schema.NewSet(resource_app_engine_standard_app_version_schema.HashString, v.([]interface{})) -} - -func flattenAppEngineStandardAppVersionInstanceClass(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScaling(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_concurrent_requests"] = - flattenAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(original["maxConcurrentRequests"], d, config) - transformed["max_idle_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(original["maxIdleInstances"], d, config) - transformed["max_pending_latency"] = - flattenAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(original["maxPendingLatency"], d, config) - transformed["min_idle_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(original["minIdleInstances"], d, config) - transformed["min_pending_latency"] = - flattenAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(original["minPendingLatency"], d, config) - transformed["standard_scheduler_settings"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(original["standardSchedulerSettings"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_standard_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_standard_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_standard_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_cpu_utilization"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(original["targetCpuUtilization"], d, config) - transformed["target_throughput_utilization"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(original["targetThroughputUtilization"], d, config) - transformed["min_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(original["minInstances"], d, config) - transformed["max_instances"] = - flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(original["maxInstances"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_standard_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_standard_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineStandardAppVersionBasicScaling(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["idle_timeout"] = - flattenAppEngineStandardAppVersionBasicScalingIdleTimeout(original["idleTimeout"], d, config) - transformed["max_instances"] = - flattenAppEngineStandardAppVersionBasicScalingMaxInstances(original["maxInstances"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineStandardAppVersionBasicScalingIdleTimeout(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAppEngineStandardAppVersionBasicScalingMaxInstances(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_standard_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenAppEngineStandardAppVersionManualScaling(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["instances"] = - flattenAppEngineStandardAppVersionManualScalingInstances(original["instances"], d, config) - return []interface{}{transformed} -} - -func flattenAppEngineStandardAppVersionManualScalingInstances(v interface{}, d *resource_app_engine_standard_app_version_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_app_engine_standard_app_version_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandAppEngineStandardAppVersionVersionId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionRuntime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionThreadsafe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionRuntimeApiVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrlRegex, err := expandAppEngineStandardAppVersionHandlersUrlRegex(original["url_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedUrlRegex); val.IsValid() && !isEmptyValue(val) { - transformed["urlRegex"] = transformedUrlRegex - } - - transformedSecurityLevel, err := expandAppEngineStandardAppVersionHandlersSecurityLevel(original["security_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !isEmptyValue(val) { - transformed["securityLevel"] = transformedSecurityLevel - } - - transformedLogin, err := expandAppEngineStandardAppVersionHandlersLogin(original["login"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedLogin); val.IsValid() && !isEmptyValue(val) { - transformed["login"] = transformedLogin - } - - transformedAuthFailAction, err := expandAppEngineStandardAppVersionHandlersAuthFailAction(original["auth_fail_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !isEmptyValue(val) { - transformed["authFailAction"] = transformedAuthFailAction - } - - transformedRedirectHttpResponseCode, err := expandAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(original["redirect_http_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedRedirectHttpResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectHttpResponseCode"] = transformedRedirectHttpResponseCode - } - - transformedScript, err := expandAppEngineStandardAppVersionHandlersScript(original["script"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedScript); val.IsValid() && !isEmptyValue(val) { - transformed["script"] = transformedScript - } - - transformedStaticFiles, err := expandAppEngineStandardAppVersionHandlersStaticFiles(original["static_files"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedStaticFiles); val.IsValid() && !isEmptyValue(val) { - transformed["staticFiles"] = transformedStaticFiles - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineStandardAppVersionHandlersUrlRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersSecurityLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersLogin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersAuthFailAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersRedirectHttpResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedScriptPath, err := expandAppEngineStandardAppVersionHandlersScriptScriptPath(original["script_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedScriptPath); val.IsValid() && !isEmptyValue(val) { - transformed["scriptPath"] = transformedScriptPath - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionHandlersScriptScriptPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandAppEngineStandardAppVersionHandlersStaticFilesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedUploadPathRegex, err := expandAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(original["upload_path_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedUploadPathRegex); val.IsValid() && !isEmptyValue(val) { - transformed["uploadPathRegex"] = transformedUploadPathRegex - } - - transformedHttpHeaders, err := expandAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(original["http_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["httpHeaders"] = transformedHttpHeaders - } - - transformedMimeType, err := expandAppEngineStandardAppVersionHandlersStaticFilesMimeType(original["mime_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMimeType); val.IsValid() && !isEmptyValue(val) { - transformed["mimeType"] = transformedMimeType - } - - transformedExpiration, err := expandAppEngineStandardAppVersionHandlersStaticFilesExpiration(original["expiration"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedExpiration); val.IsValid() && !isEmptyValue(val) { - transformed["expiration"] = transformedExpiration - } - - transformedRequireMatchingFile, err := expandAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(original["require_matching_file"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedRequireMatchingFile); val.IsValid() && !isEmptyValue(val) { - transformed["requireMatchingFile"] = transformedRequireMatchingFile - } - - transformedApplicationReadable, err := expandAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(original["application_readable"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedApplicationReadable); val.IsValid() && !isEmptyValue(val) { - transformed["applicationReadable"] = transformedApplicationReadable - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesUploadPathRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesMimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesExpiration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesRequireMatchingFile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionHandlersStaticFilesApplicationReadable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionLibraries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAppEngineStandardAppVersionLibrariesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedVersion, err := expandAppEngineStandardAppVersionLibrariesVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAppEngineStandardAppVersionLibrariesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionLibrariesVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionEnvVariables(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandAppEngineStandardAppVersionDeployment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedZip, err := expandAppEngineStandardAppVersionDeploymentZip(original["zip"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedZip); val.IsValid() && !isEmptyValue(val) { - transformed["zip"] = transformedZip - } - - transformedFiles, err := expandAppEngineStandardAppVersionDeploymentFiles(original["files"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedFiles); val.IsValid() && !isEmptyValue(val) { - transformed["files"] = transformedFiles - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionDeploymentZip(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSourceUrl, err := expandAppEngineStandardAppVersionDeploymentZipSourceUrl(original["source_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { - transformed["sourceUrl"] = transformedSourceUrl - } - - transformedFilesCount, err := expandAppEngineStandardAppVersionDeploymentZipFilesCount(original["files_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedFilesCount); val.IsValid() && !isEmptyValue(val) { - transformed["filesCount"] = transformedFilesCount - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionDeploymentZipSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionDeploymentZipFilesCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionDeploymentFiles(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_app_engine_standard_app_version_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSha1Sum, err := expandAppEngineStandardAppVersionDeploymentFilesSha1Sum(original["sha1_sum"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedSha1Sum); val.IsValid() && !isEmptyValue(val) { - transformed["sha1Sum"] = transformedSha1Sum - } - - transformedSourceUrl, err := expandAppEngineStandardAppVersionDeploymentFilesSourceUrl(original["source_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedSourceUrl); val.IsValid() && !isEmptyValue(val) { - transformed["sourceUrl"] = transformedSourceUrl - } - - transformedName, err := expandString(original["name"], d, config) - if err != nil { - return nil, err - } - m[transformedName] = transformed - } - return m, nil -} - -func expandAppEngineStandardAppVersionDeploymentFilesSha1Sum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionDeploymentFilesSourceUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionEntrypoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedShell, err := expandAppEngineStandardAppVersionEntrypointShell(original["shell"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedShell); val.IsValid() && !isEmptyValue(val) { - transformed["shell"] = transformedShell - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionEntrypointShell(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionVPCAccessConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandAppEngineStandardAppVersionVPCAccessConnectorName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionVPCAccessConnectorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionInboundServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_app_engine_standard_app_version_schema.Set).List() - return v, nil -} - -func expandAppEngineStandardAppVersionInstanceClass(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxConcurrentRequests, err := expandAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(original["max_concurrent_requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMaxConcurrentRequests); val.IsValid() && !isEmptyValue(val) { - transformed["maxConcurrentRequests"] = transformedMaxConcurrentRequests - } - - transformedMaxIdleInstances, err := expandAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(original["max_idle_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMaxIdleInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxIdleInstances"] = transformedMaxIdleInstances - } - - transformedMaxPendingLatency, err := expandAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(original["max_pending_latency"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMaxPendingLatency); val.IsValid() && !isEmptyValue(val) { - transformed["maxPendingLatency"] = transformedMaxPendingLatency - } - - transformedMinIdleInstances, err := expandAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(original["min_idle_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMinIdleInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minIdleInstances"] = transformedMinIdleInstances - } - - transformedMinPendingLatency, err := expandAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(original["min_pending_latency"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMinPendingLatency); val.IsValid() && !isEmptyValue(val) { - transformed["minPendingLatency"] = transformedMinPendingLatency - } - - transformedStandardSchedulerSettings, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(original["standard_scheduler_settings"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedStandardSchedulerSettings); val.IsValid() && !isEmptyValue(val) { - transformed["standardSchedulerSettings"] = transformedStandardSchedulerSettings - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMaxConcurrentRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMaxIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMaxPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMinIdleInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingMinPendingLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetCpuUtilization, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(original["target_cpu_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedTargetCpuUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["targetCpuUtilization"] = transformedTargetCpuUtilization - } - - transformedTargetThroughputUtilization, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(original["target_throughput_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedTargetThroughputUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["targetThroughputUtilization"] = transformedTargetThroughputUtilization - } - - transformedMinInstances, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(original["min_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minInstances"] = transformedMinInstances - } - - transformedMaxInstances, err := expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(original["max_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstances"] = transformedMaxInstances - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsTargetThroughputUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionAutomaticScalingStandardSchedulerSettingsMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionBasicScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdleTimeout, err := expandAppEngineStandardAppVersionBasicScalingIdleTimeout(original["idle_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedIdleTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["idleTimeout"] = transformedIdleTimeout - } - - transformedMaxInstances, err := expandAppEngineStandardAppVersionBasicScalingMaxInstances(original["max_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstances"] = transformedMaxInstances - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionBasicScalingIdleTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionBasicScalingMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAppEngineStandardAppVersionManualScaling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInstances, err := expandAppEngineStandardAppVersionManualScalingInstances(original["instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_app_engine_standard_app_version_reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { - transformed["instances"] = transformedInstances - } - - return transformed, nil -} - -func expandAppEngineStandardAppVersionManualScalingInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceAssuredWorkloadsWorkload() *resource_assured_workloads_workload_schema.Resource { - return &resource_assured_workloads_workload_schema.Resource{ - Create: resourceAssuredWorkloadsWorkloadCreate, - Read: resourceAssuredWorkloadsWorkloadRead, - Update: resourceAssuredWorkloadsWorkloadUpdate, - Delete: resourceAssuredWorkloadsWorkloadDelete, - - Importer: &resource_assured_workloads_workload_schema.ResourceImporter{ - State: resourceAssuredWorkloadsWorkloadImport, - }, - - Timeouts: &resource_assured_workloads_workload_schema.ResourceTimeout{ - Create: resource_assured_workloads_workload_schema.DefaultTimeout(10 * resource_assured_workloads_workload_time.Minute), - Update: resource_assured_workloads_workload_schema.DefaultTimeout(10 * resource_assured_workloads_workload_time.Minute), - Delete: resource_assured_workloads_workload_schema.DefaultTimeout(10 * resource_assured_workloads_workload_time.Minute), - }, - - Schema: map[string]*resource_assured_workloads_workload_schema.Schema{ - "billing_account": { - Type: resource_assured_workloads_workload_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Required. Input only. The billing account used for the resources which are direct children of workload. This billing account is initially associated with the resources created as part of Workload creation. After the initial creation of these resources, the customer can change the assigned billing account. The resource name has the form `billingAccounts/{billing_account_id}`. For example, 'billingAccounts/012345-567890-ABCDEF`.", - }, - - "compliance_regime": { - Type: resource_assured_workloads_workload_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. Immutable. Compliance Regime associated with this workload. Possible values: COMPLIANCE_REGIME_UNSPECIFIED, IL4, CJIS, FEDRAMP_HIGH, FEDRAMP_MODERATE, US_REGIONAL_ACCESS", - }, - - "display_name": { - Type: resource_assured_workloads_workload_schema.TypeString, - Required: true, - Description: "Required. The user-assigned display name of the Workload. When present it must be between 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, and spaces. Example: My Workload", - }, - - "location": { - Type: resource_assured_workloads_workload_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The location for the resource", - }, - - "organization": { - Type: resource_assured_workloads_workload_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The organization for the resource", - }, - - "kms_settings": { - Type: resource_assured_workloads_workload_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Input only. Settings used to create a CMEK crypto key. When set a project with a KMS CMEK key is provisioned. This field is mandatory for a subset of Compliance Regimes.", - MaxItems: 1, - Elem: AssuredWorkloadsWorkloadKmsSettingsSchema(), - }, - - "labels": { - Type: resource_assured_workloads_workload_schema.TypeMap, - Optional: true, - Description: "Optional. Labels applied to the workload.", - Elem: &resource_assured_workloads_workload_schema.Schema{Type: resource_assured_workloads_workload_schema.TypeString}, - }, - - "provisioned_resources_parent": { - Type: resource_assured_workloads_workload_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Input only. The parent resource for the resources managed by this Assured Workload. May be either an organization or a folder. Must be the same or a child of the Workload parent. If not specified all resources are created under the Workload parent. Formats: folders/{folder_id}, organizations/{organization_id}", - }, - - "resource_settings": { - Type: resource_assured_workloads_workload_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Input only. Resource properties that are used to customize workload resources. These properties (such as custom project id) will be used to create workload resources if possible. This field is optional.", - Elem: AssuredWorkloadsWorkloadResourceSettingsSchema(), - }, - - "create_time": { - Type: resource_assured_workloads_workload_schema.TypeString, - Computed: true, - Description: "Output only. Immutable. The Workload creation timestamp.", - }, - - "name": { - Type: resource_assured_workloads_workload_schema.TypeString, - Computed: true, - Description: "Output only. The resource name of the workload.", - }, - - "resources": { - Type: resource_assured_workloads_workload_schema.TypeList, - Computed: true, - Description: "Output only. The resources associated with this workload. These resources will be created when creating the workload. If any of the projects already exist, the workload creation will fail. Always read only.", - Elem: AssuredWorkloadsWorkloadResourcesSchema(), - }, - }, - } -} - -func AssuredWorkloadsWorkloadKmsSettingsSchema() *resource_assured_workloads_workload_schema.Resource { - return &resource_assured_workloads_workload_schema.Resource{ - Schema: map[string]*resource_assured_workloads_workload_schema.Schema{ - "next_rotation_time": { - Type: resource_assured_workloads_workload_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. Input only. Immutable. The time at which the Key Management Service will automatically create a new version of the crypto key and mark it as the primary.", - }, - - "rotation_period": { - Type: resource_assured_workloads_workload_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. Input only. Immutable. will be advanced by this period when the Key Management Service automatically rotates a key. Must be at least 24 hours and at most 876,000 hours.", - }, - }, - } -} - -func AssuredWorkloadsWorkloadResourceSettingsSchema() *resource_assured_workloads_workload_schema.Resource { - return &resource_assured_workloads_workload_schema.Resource{ - Schema: map[string]*resource_assured_workloads_workload_schema.Schema{ - "resource_id": { - Type: resource_assured_workloads_workload_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Resource identifier. For a project this represents project_number. If the project is already taken, the workload creation will fail.", - }, - - "resource_type": { - Type: resource_assured_workloads_workload_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Indicates the type of resource. This field should be specified to correspond the id to the right project type (CONSUMER_PROJECT or ENCRYPTION_KEYS_PROJECT) Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", - }, - }, - } -} - -func AssuredWorkloadsWorkloadResourcesSchema() *resource_assured_workloads_workload_schema.Resource { - return &resource_assured_workloads_workload_schema.Resource{ - Schema: map[string]*resource_assured_workloads_workload_schema.Schema{ - "resource_id": { - Type: resource_assured_workloads_workload_schema.TypeInt, - Computed: true, - Description: "Resource identifier. For a project this represents project_number.", - }, - - "resource_type": { - Type: resource_assured_workloads_workload_schema.TypeString, - Computed: true, - Description: "Indicates the type of resource. Possible values: RESOURCE_TYPE_UNSPECIFIED, CONSUMER_PROJECT, ENCRYPTION_KEYS_PROJECT, KEYRING, CONSUMER_FOLDER", - }, - }, - } -} - -func resourceAssuredWorkloadsWorkloadCreate(d *resource_assured_workloads_workload_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_assured_workloads_workload_assuredworkloadsassuredworkloads.Workload{ - BillingAccount: resource_assured_workloads_workload_dcldcl.String(d.Get("billing_account").(string)), - ComplianceRegime: resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), - DisplayName: resource_assured_workloads_workload_dcldcl.String(d.Get("display_name").(string)), - Location: resource_assured_workloads_workload_dcldcl.String(d.Get("location").(string)), - Organization: resource_assured_workloads_workload_dcldcl.String(d.Get("organization").(string)), - KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), - ProvisionedResourcesParent: resource_assured_workloads_workload_dcldcl.String(d.Get("provisioned_resources_parent").(string)), - ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), - } - - id, err := replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") - if err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(resource_assured_workloads_workload_schema.TimeoutCreate)) - res, err := client.ApplyWorkload(resource_assured_workloads_workload_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_assured_workloads_workload_dcldcl.DiffAfterApplyError); ok { - resource_assured_workloads_workload_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_assured_workloads_workload_fmt.Errorf("Error creating Workload: %s", err) - } - - resource_assured_workloads_workload_log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) - - if err = d.Set("name", res.Name); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting name in state: %s", err) - } - - id, err = replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") - if err != nil { - return resource_assured_workloads_workload_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceAssuredWorkloadsWorkloadRead(d, meta) -} - -func resourceAssuredWorkloadsWorkloadRead(d *resource_assured_workloads_workload_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_assured_workloads_workload_assuredworkloadsassuredworkloads.Workload{ - BillingAccount: resource_assured_workloads_workload_dcldcl.String(d.Get("billing_account").(string)), - ComplianceRegime: resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), - DisplayName: resource_assured_workloads_workload_dcldcl.String(d.Get("display_name").(string)), - Location: resource_assured_workloads_workload_dcldcl.String(d.Get("location").(string)), - Organization: resource_assured_workloads_workload_dcldcl.String(d.Get("organization").(string)), - KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), - ProvisionedResourcesParent: resource_assured_workloads_workload_dcldcl.String(d.Get("provisioned_resources_parent").(string)), - ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), - Name: resource_assured_workloads_workload_dcldcl.StringOrNil(d.Get("name").(string)), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(resource_assured_workloads_workload_schema.TimeoutRead)) - res, err := client.GetWorkload(resource_assured_workloads_workload_context.Background(), obj) - if err != nil { - resourceName := resource_assured_workloads_workload_fmt.Sprintf("AssuredWorkloadsWorkload %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("billing_account", res.BillingAccount); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting billing_account in state: %s", err) - } - if err = d.Set("compliance_regime", res.ComplianceRegime); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting compliance_regime in state: %s", err) - } - if err = d.Set("display_name", res.DisplayName); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting display_name in state: %s", err) - } - if err = d.Set("location", res.Location); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting location in state: %s", err) - } - if err = d.Set("organization", res.Organization); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting organization in state: %s", err) - } - if err = d.Set("kms_settings", flattenAssuredWorkloadsWorkloadKmsSettings(res.KmsSettings)); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting kms_settings in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("provisioned_resources_parent", res.ProvisionedResourcesParent); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting provisioned_resources_parent in state: %s", err) - } - if err = d.Set("resource_settings", flattenAssuredWorkloadsWorkloadResourceSettingsArray(res.ResourceSettings)); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting resource_settings in state: %s", err) - } - if err = d.Set("create_time", res.CreateTime); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting create_time in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("resources", flattenAssuredWorkloadsWorkloadResourcesArray(res.Resources)); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("error setting resources in state: %s", err) - } - - return nil -} - -func resourceAssuredWorkloadsWorkloadUpdate(d *resource_assured_workloads_workload_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_assured_workloads_workload_assuredworkloadsassuredworkloads.Workload{ - BillingAccount: resource_assured_workloads_workload_dcldcl.String(d.Get("billing_account").(string)), - ComplianceRegime: resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), - DisplayName: resource_assured_workloads_workload_dcldcl.String(d.Get("display_name").(string)), - Location: resource_assured_workloads_workload_dcldcl.String(d.Get("location").(string)), - Organization: resource_assured_workloads_workload_dcldcl.String(d.Get("organization").(string)), - KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), - ProvisionedResourcesParent: resource_assured_workloads_workload_dcldcl.String(d.Get("provisioned_resources_parent").(string)), - ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), - Name: resource_assured_workloads_workload_dcldcl.StringOrNil(d.Get("name").(string)), - } - - old := &resource_assured_workloads_workload_assuredworkloadsassuredworkloads.Workload{ - BillingAccount: resource_assured_workloads_workload_dcldcl.String(oldValue(d.GetChange("billing_account")).(string)), - ComplianceRegime: resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadComplianceRegimeEnumRef(oldValue(d.GetChange("compliance_regime")).(string)), - DisplayName: resource_assured_workloads_workload_dcldcl.String(oldValue(d.GetChange("display_name")).(string)), - Location: resource_assured_workloads_workload_dcldcl.String(oldValue(d.GetChange("location")).(string)), - Organization: resource_assured_workloads_workload_dcldcl.String(oldValue(d.GetChange("organization")).(string)), - KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(oldValue(d.GetChange("kms_settings"))), - Labels: checkStringMap(oldValue(d.GetChange("labels"))), - ProvisionedResourcesParent: resource_assured_workloads_workload_dcldcl.String(oldValue(d.GetChange("provisioned_resources_parent")).(string)), - ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(oldValue(d.GetChange("resource_settings"))), - Name: resource_assured_workloads_workload_dcldcl.StringOrNil(oldValue(d.GetChange("name")).(string)), - } - directive := UpdateDirective - directive = append(directive, resource_assured_workloads_workload_dcldcl.WithStateHint(old)) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(resource_assured_workloads_workload_schema.TimeoutUpdate)) - res, err := client.ApplyWorkload(resource_assured_workloads_workload_context.Background(), obj, directive...) - - if _, ok := err.(resource_assured_workloads_workload_dcldcl.DiffAfterApplyError); ok { - resource_assured_workloads_workload_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_assured_workloads_workload_fmt.Errorf("Error updating Workload: %s", err) - } - - resource_assured_workloads_workload_log.Printf("[DEBUG] Finished creating Workload %q: %#v", d.Id(), res) - - return resourceAssuredWorkloadsWorkloadRead(d, meta) -} - -func resourceAssuredWorkloadsWorkloadDelete(d *resource_assured_workloads_workload_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_assured_workloads_workload_assuredworkloadsassuredworkloads.Workload{ - BillingAccount: resource_assured_workloads_workload_dcldcl.String(d.Get("billing_account").(string)), - ComplianceRegime: resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadComplianceRegimeEnumRef(d.Get("compliance_regime").(string)), - DisplayName: resource_assured_workloads_workload_dcldcl.String(d.Get("display_name").(string)), - Location: resource_assured_workloads_workload_dcldcl.String(d.Get("location").(string)), - Organization: resource_assured_workloads_workload_dcldcl.String(d.Get("organization").(string)), - KmsSettings: expandAssuredWorkloadsWorkloadKmsSettings(d.Get("kms_settings")), - Labels: checkStringMap(d.Get("labels")), - ProvisionedResourcesParent: resource_assured_workloads_workload_dcldcl.String(d.Get("provisioned_resources_parent").(string)), - ResourceSettings: expandAssuredWorkloadsWorkloadResourceSettingsArray(d.Get("resource_settings")), - Name: resource_assured_workloads_workload_dcldcl.StringOrNil(d.Get("name").(string)), - } - - resource_assured_workloads_workload_log.Printf("[DEBUG] Deleting Workload %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLAssuredWorkloadsClient(config, userAgent, billingProject, d.Timeout(resource_assured_workloads_workload_schema.TimeoutDelete)) - if err := client.DeleteWorkload(resource_assured_workloads_workload_context.Background(), obj); err != nil { - return resource_assured_workloads_workload_fmt.Errorf("Error deleting Workload: %s", err) - } - - resource_assured_workloads_workload_log.Printf("[DEBUG] Finished deleting Workload %q", d.Id()) - return nil -} - -func resourceAssuredWorkloadsWorkloadImport(d *resource_assured_workloads_workload_schema.ResourceData, meta interface{}) ([]*resource_assured_workloads_workload_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "organizations/(?P[^/]+)/locations/(?P[^/]+)/workloads/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "organizations/{{organization}}/locations/{{location}}/workloads/{{name}}") - if err != nil { - return nil, resource_assured_workloads_workload_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_assured_workloads_workload_schema.ResourceData{d}, nil -} - -func expandAssuredWorkloadsWorkloadKmsSettings(o interface{}) *resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadKmsSettings { - if o == nil { - return resource_assured_workloads_workload_assuredworkloadsassuredworkloads.EmptyWorkloadKmsSettings - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_assured_workloads_workload_assuredworkloadsassuredworkloads.EmptyWorkloadKmsSettings - } - obj := objArr[0].(map[string]interface{}) - return &resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadKmsSettings{ - NextRotationTime: resource_assured_workloads_workload_dcldcl.String(obj["next_rotation_time"].(string)), - RotationPeriod: resource_assured_workloads_workload_dcldcl.String(obj["rotation_period"].(string)), - } -} - -func flattenAssuredWorkloadsWorkloadKmsSettings(obj *resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadKmsSettings) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "next_rotation_time": obj.NextRotationTime, - "rotation_period": obj.RotationPeriod, - } - - return []interface{}{transformed} - -} - -func expandAssuredWorkloadsWorkloadResourceSettingsArray(o interface{}) []resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings { - if o == nil { - return make([]resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings, 0) - } - - items := make([]resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings, 0, len(objs)) - for _, item := range objs { - i := expandAssuredWorkloadsWorkloadResourceSettings(item) - items = append(items, *i) - } - - return items -} - -func expandAssuredWorkloadsWorkloadResourceSettings(o interface{}) *resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings { - if o == nil { - return resource_assured_workloads_workload_assuredworkloadsassuredworkloads.EmptyWorkloadResourceSettings - } - - obj := o.(map[string]interface{}) - return &resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings{ - ResourceId: resource_assured_workloads_workload_dcldcl.String(obj["resource_id"].(string)), - ResourceType: resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettingsResourceTypeEnumRef(obj["resource_type"].(string)), - } -} - -func flattenAssuredWorkloadsWorkloadResourceSettingsArray(objs []resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenAssuredWorkloadsWorkloadResourceSettings(&item) - items = append(items, i) - } - - return items -} - -func flattenAssuredWorkloadsWorkloadResourceSettings(obj *resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResourceSettings) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "resource_id": obj.ResourceId, - "resource_type": obj.ResourceType, - } - - return transformed - -} - -func flattenAssuredWorkloadsWorkloadResourcesArray(objs []resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResources) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenAssuredWorkloadsWorkloadResources(&item) - items = append(items, i) - } - - return items -} - -func flattenAssuredWorkloadsWorkloadResources(obj *resource_assured_workloads_workload_assuredworkloadsassuredworkloads.WorkloadResources) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "resource_id": obj.ResourceId, - "resource_type": obj.ResourceType, - } - - return transformed - -} - -var sensitiveParams = []string{"secret_access_key"} - -func sensitiveParamCustomizeDiff(_ resource_bigquery_data_transfer_config_context.Context, diff *resource_bigquery_data_transfer_config_schema.ResourceDiff, v interface{}) error { - for _, sp := range sensitiveParams { - mapLabel := diff.Get("params." + sp).(string) - authLabel := diff.Get("sensitive_params.0." + sp).(string) - if mapLabel != "" && authLabel != "" { - return resource_bigquery_data_transfer_config_fmt.Errorf("Sensitive param [%s] cannot be set in both `params` and the `sensitive_params` block.", sp) - } - } - return nil -} - -func resourceBigqueryDataTransferConfig() *resource_bigquery_data_transfer_config_schema.Resource { - return &resource_bigquery_data_transfer_config_schema.Resource{ - Create: resourceBigqueryDataTransferConfigCreate, - Read: resourceBigqueryDataTransferConfigRead, - Update: resourceBigqueryDataTransferConfigUpdate, - Delete: resourceBigqueryDataTransferConfigDelete, - - Importer: &resource_bigquery_data_transfer_config_schema.ResourceImporter{ - State: resourceBigqueryDataTransferConfigImport, - }, - - Timeouts: &resource_bigquery_data_transfer_config_schema.ResourceTimeout{ - Create: resource_bigquery_data_transfer_config_schema.DefaultTimeout(4 * resource_bigquery_data_transfer_config_time.Minute), - Update: resource_bigquery_data_transfer_config_schema.DefaultTimeout(4 * resource_bigquery_data_transfer_config_time.Minute), - Delete: resource_bigquery_data_transfer_config_schema.DefaultTimeout(4 * resource_bigquery_data_transfer_config_time.Minute), - }, - - CustomizeDiff: sensitiveParamCustomizeDiff, - - Schema: map[string]*resource_bigquery_data_transfer_config_schema.Schema{ - "data_source_id": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The data source id. Cannot be changed once the transfer config is created.`, - }, - "display_name": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The user specified display name for the transfer config.`, - }, - "params": { - Type: resource_bigquery_data_transfer_config_schema.TypeMap, - Required: true, - Description: `These parameters are specific to each data source.`, - Elem: &resource_bigquery_data_transfer_config_schema.Schema{Type: resource_bigquery_data_transfer_config_schema.TypeString}, - }, - "data_refresh_window_days": { - Type: resource_bigquery_data_transfer_config_schema.TypeInt, - Optional: true, - Description: `The number of days to look back to automatically refresh the data. -For example, if dataRefreshWindowDays = 10, then every day BigQuery -reingests data for [today-10, today-1], rather than ingesting data for -just [today-1]. Only valid if the data source supports the feature. -Set the value to 0 to use the default value.`, - }, - "destination_dataset_id": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - Description: `The BigQuery target dataset id.`, - }, - "disabled": { - Type: resource_bigquery_data_transfer_config_schema.TypeBool, - Optional: true, - Description: `When set to true, no runs are scheduled for a given transfer.`, - }, - "email_preferences": { - Type: resource_bigquery_data_transfer_config_schema.TypeList, - Optional: true, - Description: `Email notifications will be sent according to these preferences to the -email address of the user who owns this transfer config.`, - MaxItems: 1, - Elem: &resource_bigquery_data_transfer_config_schema.Resource{ - Schema: map[string]*resource_bigquery_data_transfer_config_schema.Schema{ - "enable_failure_email": { - Type: resource_bigquery_data_transfer_config_schema.TypeBool, - Required: true, - Description: `If true, email notifications will be sent on transfer run failures.`, - }, - }, - }, - }, - "location": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The geographic location where the transfer config should reside. -Examples: US, EU, asia-northeast1. The default value is US.`, - Default: "US", - }, - "notification_pubsub_topic": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - Description: `Pub/Sub topic where notifications will be sent after transfer runs -associated with this transfer config finish.`, - }, - "schedule": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - Description: `Data transfer schedule. If the data source does not support a custom -schedule, this should be empty. If it is empty, the default value for -the data source will be used. The specified times are in UTC. Examples -of valid format: 1st,3rd monday of month 15:30, every wed,fri of jan, -jun 13:15, and first sunday of quarter 00:00. See more explanation -about the format here: -https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format -NOTE: the granularity should be at least 8 hours, or less frequent.`, - }, - "schedule_options": { - Type: resource_bigquery_data_transfer_config_schema.TypeList, - Optional: true, - Description: `Options customizing the data transfer schedule.`, - MaxItems: 1, - Elem: &resource_bigquery_data_transfer_config_schema.Resource{ - Schema: map[string]*resource_bigquery_data_transfer_config_schema.Schema{ - "disable_auto_scheduling": { - Type: resource_bigquery_data_transfer_config_schema.TypeBool, - Optional: true, - Description: `If true, automatic scheduling of data transfer runs for this -configuration will be disabled. The runs can be started on ad-hoc -basis using transferConfigs.startManualRuns API. When automatic -scheduling is disabled, the TransferConfig.schedule field will -be ignored.`, - AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, - }, - "end_time": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - Description: `Defines time to stop scheduling transfer runs. A transfer run cannot be -scheduled at or after the end time. The end time can be changed at any -moment. The time when a data transfer can be triggered manually is not -limited by this option.`, - AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, - }, - "start_time": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - Description: `Specifies time to start scheduling transfer runs. The first run will be -scheduled at or after the start time according to a recurrence pattern -defined in the schedule string. The start time can be changed at any -moment. The time when a data transfer can be triggered manually is not -limited by this option.`, - AtLeastOneOf: []string{"schedule_options.0.disable_auto_scheduling", "schedule_options.0.start_time", "schedule_options.0.end_time"}, - }, - }, - }, - }, - "sensitive_params": { - Type: resource_bigquery_data_transfer_config_schema.TypeList, - Optional: true, - Description: `Different parameters are configured primarily using the the 'params' field on this -resource. This block contains the parameters which contain secrets or passwords so that they can be marked -sensitive and hidden from plan output. The name of the field, eg: secret_access_key, will be the key -in the 'params' map in the api request. - -Credentials may not be specified in both locations and will cause an error. Changing from one location -to a different credential configuration in the config will require an apply to update state.`, - MaxItems: 1, - Elem: &resource_bigquery_data_transfer_config_schema.Resource{ - Schema: map[string]*resource_bigquery_data_transfer_config_schema.Schema{ - "secret_access_key": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Required: true, - Description: `The Secret Access Key of the AWS account transferring data from.`, - Sensitive: true, - }, - }, - }, - }, - "service_account_name": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional service account name. If this field is set, transfer config will -be created with this service account credentials. It requires that -requesting user calling this API has permissions to act as this service account.`, - Default: "", - }, - "name": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Computed: true, - Description: `The resource name of the transfer config. Transfer config names have the -form projects/{projectId}/locations/{location}/transferConfigs/{configId}. -Where configId is usually a uuid, but this is not required. -The name is ignored when creating a transfer config.`, - }, - "project": { - Type: resource_bigquery_data_transfer_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryDataTransferConfigCreate(d *resource_bigquery_data_transfer_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandBigqueryDataTransferConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(displayNameProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_dataset_id"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(destinationDatasetIdProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, destinationDatasetIdProp)) { - obj["destinationDatasetId"] = destinationDatasetIdProp - } - dataSourceIdProp, err := expandBigqueryDataTransferConfigDataSourceId(d.Get("data_source_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_source_id"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(dataSourceIdProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, dataSourceIdProp)) { - obj["dataSourceId"] = dataSourceIdProp - } - scheduleProp, err := expandBigqueryDataTransferConfigSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(scheduleProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - scheduleOptionsProp, err := expandBigqueryDataTransferConfigScheduleOptions(d.Get("schedule_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule_options"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(scheduleOptionsProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, scheduleOptionsProp)) { - obj["scheduleOptions"] = scheduleOptionsProp - } - emailPreferencesProp, err := expandBigqueryDataTransferConfigEmailPreferences(d.Get("email_preferences"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("email_preferences"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(emailPreferencesProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, emailPreferencesProp)) { - obj["emailPreferences"] = emailPreferencesProp - } - notificationPubsubTopicProp, err := expandBigqueryDataTransferConfigNotificationPubsubTopic(d.Get("notification_pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_pubsub_topic"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(notificationPubsubTopicProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, notificationPubsubTopicProp)) { - obj["notificationPubsubTopic"] = notificationPubsubTopicProp - } - dataRefreshWindowDaysProp, err := expandBigqueryDataTransferConfigDataRefreshWindowDays(d.Get("data_refresh_window_days"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_refresh_window_days"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(dataRefreshWindowDaysProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { - obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp - } - disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(disabledProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - paramsProp, err := expandBigqueryDataTransferConfigParams(d.Get("params"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("params"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(paramsProp)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, paramsProp)) { - obj["params"] = paramsProp - } - - obj, err = resourceBigqueryDataTransferConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}projects/{{project}}/locations/{{location}}/transferConfigs?serviceAccountName={{service_account_name}}") - if err != nil { - return err - } - - resource_bigquery_data_transfer_config_log.Printf("[DEBUG] Creating new Config: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_data_transfer_config_schema.TimeoutCreate), iamMemberMissing) - if err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error creating Config: %s", err) - } - if err := d.Set("name", flattenBigqueryDataTransferConfigName(res["name"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_bigquery_data_transfer_config_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_bigquery_data_transfer_config_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_bigquery_data_transfer_config_log.Printf("[DEBUG] Finished creating Config %q: %#v", d.Id(), res) - - return resourceBigqueryDataTransferConfigRead(d, meta) -} - -func resourceBigqueryDataTransferConfigRead(d *resource_bigquery_data_transfer_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, iamMemberMissing) - if err != nil { - return handleNotFoundError(err, d, resource_bigquery_data_transfer_config_fmt.Sprintf("BigqueryDataTransferConfig %q", d.Id())) - } - - res, err = resourceBigqueryDataTransferConfigDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_bigquery_data_transfer_config_log.Printf("[DEBUG] Removing BigqueryDataTransferConfig because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - - if err := d.Set("display_name", flattenBigqueryDataTransferConfigDisplayName(res["displayName"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("name", flattenBigqueryDataTransferConfigName(res["name"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("destination_dataset_id", flattenBigqueryDataTransferConfigDestinationDatasetId(res["destinationDatasetId"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("data_source_id", flattenBigqueryDataTransferConfigDataSourceId(res["dataSourceId"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("schedule", flattenBigqueryDataTransferConfigSchedule(res["schedule"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("schedule_options", flattenBigqueryDataTransferConfigScheduleOptions(res["scheduleOptions"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("email_preferences", flattenBigqueryDataTransferConfigEmailPreferences(res["emailPreferences"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("notification_pubsub_topic", flattenBigqueryDataTransferConfigNotificationPubsubTopic(res["notificationPubsubTopic"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("data_refresh_window_days", flattenBigqueryDataTransferConfigDataRefreshWindowDays(res["dataRefreshWindowDays"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("disabled", flattenBigqueryDataTransferConfigDisabled(res["disabled"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - if err := d.Set("params", flattenBigqueryDataTransferConfigParams(res["params"], d, config)); err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error reading Config: %s", err) - } - - return nil -} - -func resourceBigqueryDataTransferConfigUpdate(d *resource_bigquery_data_transfer_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - destinationDatasetIdProp, err := expandBigqueryDataTransferConfigDestinationDatasetId(d.Get("destination_dataset_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_dataset_id"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, destinationDatasetIdProp)) { - obj["destinationDatasetId"] = destinationDatasetIdProp - } - scheduleProp, err := expandBigqueryDataTransferConfigSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - scheduleOptionsProp, err := expandBigqueryDataTransferConfigScheduleOptions(d.Get("schedule_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule_options"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, scheduleOptionsProp)) { - obj["scheduleOptions"] = scheduleOptionsProp - } - emailPreferencesProp, err := expandBigqueryDataTransferConfigEmailPreferences(d.Get("email_preferences"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("email_preferences"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, emailPreferencesProp)) { - obj["emailPreferences"] = emailPreferencesProp - } - notificationPubsubTopicProp, err := expandBigqueryDataTransferConfigNotificationPubsubTopic(d.Get("notification_pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_pubsub_topic"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, notificationPubsubTopicProp)) { - obj["notificationPubsubTopic"] = notificationPubsubTopicProp - } - dataRefreshWindowDaysProp, err := expandBigqueryDataTransferConfigDataRefreshWindowDays(d.Get("data_refresh_window_days"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_refresh_window_days"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, dataRefreshWindowDaysProp)) { - obj["dataRefreshWindowDays"] = dataRefreshWindowDaysProp - } - disabledProp, err := expandBigqueryDataTransferConfigDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - paramsProp, err := expandBigqueryDataTransferConfigParams(d.Get("params"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("params"); !isEmptyValue(resource_bigquery_data_transfer_config_reflect.ValueOf(v)) && (ok || !resource_bigquery_data_transfer_config_reflect.DeepEqual(v, paramsProp)) { - obj["params"] = paramsProp - } - - obj, err = resourceBigqueryDataTransferConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") - if err != nil { - return err - } - - resource_bigquery_data_transfer_config_log.Printf("[DEBUG] Updating Config %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("destination_dataset_id") { - updateMask = append(updateMask, "destinationDatasetId") - } - - if d.HasChange("schedule") { - updateMask = append(updateMask, "schedule") - } - - if d.HasChange("schedule_options") { - updateMask = append(updateMask, "scheduleOptions") - } - - if d.HasChange("email_preferences") { - updateMask = append(updateMask, "emailPreferences") - } - - if d.HasChange("notification_pubsub_topic") { - updateMask = append(updateMask, "notificationPubsubTopic") - } - - if d.HasChange("data_refresh_window_days") { - updateMask = append(updateMask, "dataRefreshWindowDays") - } - - if d.HasChange("disabled") { - updateMask = append(updateMask, "disabled") - } - - if d.HasChange("params") { - updateMask = append(updateMask, "params") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_bigquery_data_transfer_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_data_transfer_config_schema.TimeoutUpdate), iamMemberMissing) - - if err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error updating Config %q: %s", d.Id(), err) - } else { - resource_bigquery_data_transfer_config_log.Printf("[DEBUG] Finished updating Config %q: %#v", d.Id(), res) - } - - return resourceBigqueryDataTransferConfigRead(d, meta) -} - -func resourceBigqueryDataTransferConfigDelete(d *resource_bigquery_data_transfer_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_data_transfer_config_fmt.Errorf("Error fetching project for Config: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryDataTransferBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_bigquery_data_transfer_config_log.Printf("[DEBUG] Deleting Config %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_data_transfer_config_schema.TimeoutDelete), iamMemberMissing) - if err != nil { - return handleNotFoundError(err, d, "Config") - } - - resource_bigquery_data_transfer_config_log.Printf("[DEBUG] Finished deleting Config %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryDataTransferConfigImport(d *resource_bigquery_data_transfer_config_schema.ResourceData, meta interface{}) ([]*resource_bigquery_data_transfer_config_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_bigquery_data_transfer_config_schema.ResourceData{d}, nil -} - -func flattenBigqueryDataTransferConfigDisplayName(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigName(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigDestinationDatasetId(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigDataSourceId(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigSchedule(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigScheduleOptions(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["disable_auto_scheduling"] = - flattenBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(original["disableAutoScheduling"], d, config) - transformed["start_time"] = - flattenBigqueryDataTransferConfigScheduleOptionsStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenBigqueryDataTransferConfigScheduleOptionsEndTime(original["endTime"], d, config) - return []interface{}{transformed} -} - -func flattenBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigScheduleOptionsStartTime(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigScheduleOptionsEndTime(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigEmailPreferences(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_failure_email"] = - flattenBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(original["enableFailureEmail"], d, config) - return []interface{}{transformed} -} - -func flattenBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigNotificationPubsubTopic(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_data_transfer_config_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigqueryDataTransferConfigDisabled(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigqueryDataTransferConfigParams(v interface{}, d *resource_bigquery_data_transfer_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - kv := v.(map[string]interface{}) - - res := make(map[string]string) - for key, value := range kv { - res[key] = resource_bigquery_data_transfer_config_fmt.Sprintf("%v", value) - } - return res -} - -func expandBigqueryDataTransferConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDestinationDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDataSourceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigScheduleOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisableAutoScheduling, err := expandBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(original["disable_auto_scheduling"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_data_transfer_config_reflect.ValueOf(transformedDisableAutoScheduling); val.IsValid() && !isEmptyValue(val) { - transformed["disableAutoScheduling"] = transformedDisableAutoScheduling - } - - transformedStartTime, err := expandBigqueryDataTransferConfigScheduleOptionsStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_data_transfer_config_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandBigqueryDataTransferConfigScheduleOptionsEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_data_transfer_config_reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - return transformed, nil -} - -func expandBigqueryDataTransferConfigScheduleOptionsDisableAutoScheduling(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigScheduleOptionsStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigScheduleOptionsEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigEmailPreferences(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableFailureEmail, err := expandBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(original["enable_failure_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_data_transfer_config_reflect.ValueOf(transformedEnableFailureEmail); val.IsValid() && !isEmptyValue(val) { - transformed["enableFailureEmail"] = transformedEnableFailureEmail - } - - return transformed, nil -} - -func expandBigqueryDataTransferConfigEmailPreferencesEnableFailureEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigNotificationPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDataRefreshWindowDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryDataTransferConfigParams(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceBigqueryDataTransferConfigEncoder(d *resource_bigquery_data_transfer_config_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - paramMap, ok := obj["params"] - if !ok { - paramMap = make(map[string]string) - } - - var params map[string]string - params = paramMap.(map[string]string) - - for _, sp := range sensitiveParams { - if auth, _ := d.GetOkExists("sensitive_params.0." + sp); auth != "" { - params[sp] = auth.(string) - } - } - - obj["params"] = params - - return obj, nil -} - -func resourceBigqueryDataTransferConfigDecoder(d *resource_bigquery_data_transfer_config_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if paramMap, ok := res["params"]; ok { - params := paramMap.(map[string]interface{}) - for _, sp := range sensitiveParams { - if _, apiOk := params[sp]; apiOk { - if _, exists := d.GetOkExists("sensitive_params.0." + sp); exists { - delete(params, sp) - } else { - params[sp] = d.Get("params." + sp) - } - } - } - } - - return res, nil -} - -const datasetIdRegexp = `[0-9A-Za-z_]+` - -func validateDatasetId(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !resource_bigquery_dataset_regexp.MustCompile(datasetIdRegexp).MatchString(value) { - errors = append(errors, resource_bigquery_dataset_fmt.Errorf( - "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) - } - - if len(value) > 1024 { - errors = append(errors, resource_bigquery_dataset_fmt.Errorf( - "%q cannot be greater than 1,024 characters", k)) - } - - return -} - -func validateDefaultTableExpirationMs(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 3600000 { - errors = append(errors, resource_bigquery_dataset_fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) - } - - return -} - -func resourceBigQueryDataset() *resource_bigquery_dataset_schema.Resource { - return &resource_bigquery_dataset_schema.Resource{ - Create: resourceBigQueryDatasetCreate, - Read: resourceBigQueryDatasetRead, - Update: resourceBigQueryDatasetUpdate, - Delete: resourceBigQueryDatasetDelete, - - Importer: &resource_bigquery_dataset_schema.ResourceImporter{ - State: resourceBigQueryDatasetImport, - }, - - Timeouts: &resource_bigquery_dataset_schema.ResourceTimeout{ - Create: resource_bigquery_dataset_schema.DefaultTimeout(4 * resource_bigquery_dataset_time.Minute), - Update: resource_bigquery_dataset_schema.DefaultTimeout(4 * resource_bigquery_dataset_time.Minute), - Delete: resource_bigquery_dataset_schema.DefaultTimeout(4 * resource_bigquery_dataset_time.Minute), - }, - - Schema: map[string]*resource_bigquery_dataset_schema.Schema{ - "dataset_id": { - Type: resource_bigquery_dataset_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateDatasetId, - Description: `A unique ID for this dataset, without the project name. The ID -must contain only letters (a-z, A-Z), numbers (0-9), or -underscores (_). The maximum length is 1,024 characters.`, - }, - - "access": { - Type: resource_bigquery_dataset_schema.TypeSet, - Computed: true, - Optional: true, - Description: `An array of objects that define dataset access for one or more entities.`, - Elem: bigqueryDatasetAccessSchema(), - }, - "default_encryption_configuration": { - Type: resource_bigquery_dataset_schema.TypeList, - Optional: true, - Description: `The default encryption key for all tables in the dataset. Once this property is set, -all newly-created partitioned tables in the dataset will have encryption key set to -this value, unless table creation request (or query) overrides the key.`, - MaxItems: 1, - Elem: &resource_bigquery_dataset_schema.Resource{ - Schema: map[string]*resource_bigquery_dataset_schema.Schema{ - "kms_key_name": { - Type: resource_bigquery_dataset_schema.TypeString, - Required: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect destination -BigQuery table. The BigQuery Service Account associated with your project requires -access to this encryption key.`, - }, - }, - }, - }, - "default_partition_expiration_ms": { - Type: resource_bigquery_dataset_schema.TypeInt, - Optional: true, - Description: `The default partition expiration for all partitioned tables in -the dataset, in milliseconds. - - -Once this property is set, all newly-created partitioned tables in -the dataset will have an 'expirationMs' property in the 'timePartitioning' -settings set to this value, and changing the value will only -affect new tables, not existing ones. The storage in a partition will -have an expiration time of its partition time plus this value. -Setting this property overrides the use of 'defaultTableExpirationMs' -for partitioned tables: only one of 'defaultTableExpirationMs' and -'defaultPartitionExpirationMs' will be used for any new partitioned -table. If you provide an explicit 'timePartitioning.expirationMs' when -creating or updating a partitioned table, that value takes precedence -over the default partition expiration time indicated by this property.`, - }, - "default_table_expiration_ms": { - Type: resource_bigquery_dataset_schema.TypeInt, - Optional: true, - ValidateFunc: validateDefaultTableExpirationMs, - Description: `The default lifetime of all tables in the dataset, in milliseconds. -The minimum value is 3600000 milliseconds (one hour). - - -Once this property is set, all newly-created tables in the dataset -will have an 'expirationTime' property set to the creation time plus -the value in this property, and changing the value will only affect -new tables, not existing ones. When the 'expirationTime' for a given -table is reached, that table will be deleted automatically. -If a table's 'expirationTime' is modified or removed before the -table expires, or if you provide an explicit 'expirationTime' when -creating a table, that value takes precedence over the default -expiration time indicated by this property.`, - }, - "description": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Description: `A user-friendly description of the dataset`, - }, - "friendly_name": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Description: `A descriptive name for the dataset`, - }, - "labels": { - Type: resource_bigquery_dataset_schema.TypeMap, - Optional: true, - Description: `The labels associated with this dataset. You can use these to -organize and group your datasets`, - Elem: &resource_bigquery_dataset_schema.Schema{Type: resource_bigquery_dataset_schema.TypeString}, - }, - "location": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: caseDiffSuppress, - Description: `The geographic location where the dataset should reside. -See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). - - -There are two types of locations, regional or multi-regional. A regional -location is a specific geographic place, such as Tokyo, and a multi-regional -location is a large geographic area, such as the United States, that -contains at least two geographic places. - - -The default value is multi-regional location 'US'. -Changing this forces a new resource to be created.`, - Default: "US", - }, - "creation_time": { - Type: resource_bigquery_dataset_schema.TypeInt, - Computed: true, - Description: `The time when this dataset was created, in milliseconds since the -epoch.`, - }, - "etag": { - Type: resource_bigquery_dataset_schema.TypeString, - Computed: true, - Description: `A hash of the resource.`, - }, - "last_modified_time": { - Type: resource_bigquery_dataset_schema.TypeInt, - Computed: true, - Description: `The date when this dataset or any of its tables was last modified, in -milliseconds since the epoch.`, - }, - "delete_contents_on_destroy": { - Type: resource_bigquery_dataset_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_bigquery_dataset_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func bigqueryDatasetAccessSchema() *resource_bigquery_dataset_schema.Resource { - return &resource_bigquery_dataset_schema.Resource{ - Schema: map[string]*resource_bigquery_dataset_schema.Schema{ - "domain": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Description: `A domain to grant access to. Any users signed in with the -domain specified will be granted the specified access`, - }, - "group_by_email": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Description: `An email address of a Google Group to grant access to.`, - }, - "role": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Description: `Describes the rights granted to the user specified by the other -member of the access object. Basic, predefined, and custom roles -are supported. Predefined roles that have equivalent basic roles -are swapped by the API to their basic counterparts. See -[official docs](https://cloud.google.com/bigquery/docs/access-control).`, - }, - "special_group": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Description: `A special group to grant access to. Possible values include: - - -* 'projectOwners': Owners of the enclosing project. - - -* 'projectReaders': Readers of the enclosing project. - - -* 'projectWriters': Writers of the enclosing project. - - -* 'allAuthenticatedUsers': All authenticated BigQuery users.`, - }, - "user_by_email": { - Type: resource_bigquery_dataset_schema.TypeString, - Optional: true, - Description: `An email address of a user to grant access to. For example: -fred@example.com`, - }, - "view": { - Type: resource_bigquery_dataset_schema.TypeList, - Optional: true, - Description: `A view from a different dataset to grant access to. Queries -executed against that view will have read access to tables in -this dataset. The role field is not required when this field is -set. If that view is updated by any user, access to the view -needs to be granted again via an update operation.`, - MaxItems: 1, - Elem: &resource_bigquery_dataset_schema.Resource{ - Schema: map[string]*resource_bigquery_dataset_schema.Schema{ - "dataset_id": { - Type: resource_bigquery_dataset_schema.TypeString, - Required: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: resource_bigquery_dataset_schema.TypeString, - Required: true, - Description: `The ID of the project containing this table.`, - }, - "table_id": { - Type: resource_bigquery_dataset_schema.TypeString, - Required: true, - Description: `The ID of the table. The ID must contain only letters (a-z, -A-Z), numbers (0-9), or underscores (_). The maximum length -is 1,024 characters.`, - }, - }, - }, - }, - }, - } -} - -func resourceBigQueryDatasetCreate(d *resource_bigquery_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(accessProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, accessProp)) { - obj["access"] = accessProp - } - datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(datasetReferenceProp)) { - obj["datasetReference"] = datasetReferenceProp - } - defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(defaultTableExpirationMsProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, defaultTableExpirationMsProp)) { - obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp - } - defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(defaultPartitionExpirationMsProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { - obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp - } - descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(descriptionProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("friendly_name"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(friendlyNameProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, friendlyNameProp)) { - obj["friendlyName"] = friendlyNameProp - } - labelsProp, err := expandBigQueryDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(labelsProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(locationProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_encryption_configuration"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(defaultEncryptionConfigurationProp)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { - obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets") - if err != nil { - return err - } - - resource_bigquery_dataset_log.Printf("[DEBUG] Creating new Dataset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_dataset_schema.TimeoutCreate)) - if err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error creating Dataset: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_bigquery_dataset_log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) - - return resourceBigQueryDatasetRead(d, meta) -} - -func resourceBigQueryDatasetRead(d *resource_bigquery_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_bigquery_dataset_fmt.Sprintf("BigQueryDataset %q", d.Id())) - } - - if _, ok := d.GetOkExists("delete_contents_on_destroy"); !ok { - if err := d.Set("delete_contents_on_destroy", false); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - - if err := d.Set("access", flattenBigQueryDatasetAccess(res["access"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("creation_time", flattenBigQueryDatasetCreationTime(res["creationTime"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - - if flattenedProp := flattenBigQueryDatasetDatasetReference(res["datasetReference"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_bigquery_dataset_googleapi.Error); ok { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("default_table_expiration_ms", flattenBigQueryDatasetDefaultTableExpirationMs(res["defaultTableExpirationMs"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("default_partition_expiration_ms", flattenBigQueryDatasetDefaultPartitionExpirationMs(res["defaultPartitionExpirationMs"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("description", flattenBigQueryDatasetDescription(res["description"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("etag", flattenBigQueryDatasetEtag(res["etag"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("friendly_name", flattenBigQueryDatasetFriendlyName(res["friendlyName"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("labels", flattenBigQueryDatasetLabels(res["labels"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("last_modified_time", flattenBigQueryDatasetLastModifiedTime(res["lastModifiedTime"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("location", flattenBigQueryDatasetLocation(res["location"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("default_encryption_configuration", flattenBigQueryDatasetDefaultEncryptionConfiguration(res["defaultEncryptionConfiguration"], d, config)); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - - return nil -} - -func resourceBigQueryDatasetUpdate(d *resource_bigquery_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - accessProp, err := expandBigQueryDatasetAccess(d.Get("access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("access"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, accessProp)) { - obj["access"] = accessProp - } - datasetReferenceProp, err := expandBigQueryDatasetDatasetReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(datasetReferenceProp)) { - obj["datasetReference"] = datasetReferenceProp - } - defaultTableExpirationMsProp, err := expandBigQueryDatasetDefaultTableExpirationMs(d.Get("default_table_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_table_expiration_ms"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, defaultTableExpirationMsProp)) { - obj["defaultTableExpirationMs"] = defaultTableExpirationMsProp - } - defaultPartitionExpirationMsProp, err := expandBigQueryDatasetDefaultPartitionExpirationMs(d.Get("default_partition_expiration_ms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_partition_expiration_ms"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, defaultPartitionExpirationMsProp)) { - obj["defaultPartitionExpirationMs"] = defaultPartitionExpirationMsProp - } - descriptionProp, err := expandBigQueryDatasetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - friendlyNameProp, err := expandBigQueryDatasetFriendlyName(d.Get("friendly_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("friendly_name"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, friendlyNameProp)) { - obj["friendlyName"] = friendlyNameProp - } - labelsProp, err := expandBigQueryDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - locationProp, err := expandBigQueryDatasetLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - defaultEncryptionConfigurationProp, err := expandBigQueryDatasetDefaultEncryptionConfiguration(d.Get("default_encryption_configuration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_encryption_configuration"); !isEmptyValue(resource_bigquery_dataset_reflect.ValueOf(v)) && (ok || !resource_bigquery_dataset_reflect.DeepEqual(v, defaultEncryptionConfigurationProp)) { - obj["defaultEncryptionConfiguration"] = defaultEncryptionConfigurationProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - resource_bigquery_dataset_log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_dataset_schema.TimeoutUpdate)) - - if err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) - } else { - resource_bigquery_dataset_log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) - } - - return resourceBigQueryDatasetRead(d, meta) -} - -func resourceBigQueryDatasetDelete(d *resource_bigquery_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}?deleteContents={{delete_contents_on_destroy}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_bigquery_dataset_log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_dataset_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Dataset") - } - - resource_bigquery_dataset_log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) - return nil -} - -func resourceBigQueryDatasetImport(d *resource_bigquery_dataset_schema.ResourceData, meta interface{}) ([]*resource_bigquery_dataset_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/datasets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return nil, resource_bigquery_dataset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("delete_contents_on_destroy", false); err != nil { - return nil, resource_bigquery_dataset_fmt.Errorf("Error setting delete_contents_on_destroy: %s", err) - } - - return []*resource_bigquery_dataset_schema.ResourceData{d}, nil -} - -func flattenBigQueryDatasetAccess(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_bigquery_dataset_schema.NewSet(resource_bigquery_dataset_schema.HashResource(bigqueryDatasetAccessSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "domain": flattenBigQueryDatasetAccessDomain(original["domain"], d, config), - "group_by_email": flattenBigQueryDatasetAccessGroupByEmail(original["groupByEmail"], d, config), - "role": flattenBigQueryDatasetAccessRole(original["role"], d, config), - "special_group": flattenBigQueryDatasetAccessSpecialGroup(original["specialGroup"], d, config), - "user_by_email": flattenBigQueryDatasetAccessUserByEmail(original["userByEmail"], d, config), - "view": flattenBigQueryDatasetAccessView(original["view"], d, config), - }) - } - return transformed -} - -func flattenBigQueryDatasetAccessDomain(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessGroupByEmail(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessRole(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessSpecialGroup(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessUserByEmail(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessView(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryDatasetAccessViewDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenBigQueryDatasetAccessViewProjectId(original["projectId"], d, config) - transformed["table_id"] = - flattenBigQueryDatasetAccessViewTableId(original["tableId"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryDatasetAccessViewDatasetId(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessViewProjectId(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetAccessViewTableId(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetCreationTime(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_dataset_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryDatasetDatasetReference(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryDatasetDatasetReferenceDatasetId(original["datasetId"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetDefaultTableExpirationMs(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_dataset_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_dataset_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryDatasetDescription(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetEtag(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetFriendlyName(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetLabels(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryDatasetLastModifiedTime(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_dataset_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryDatasetLocation(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - if v == nil { - return "US" - } - return v -} - -func flattenBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d *resource_bigquery_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigQueryDatasetAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_bigquery_dataset_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomain, err := expandBigQueryDatasetAccessDomain(original["domain"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedDomain); val.IsValid() && !isEmptyValue(val) { - transformed["domain"] = transformedDomain - } - - transformedGroupByEmail, err := expandBigQueryDatasetAccessGroupByEmail(original["group_by_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedGroupByEmail); val.IsValid() && !isEmptyValue(val) { - transformed["groupByEmail"] = transformedGroupByEmail - } - - transformedRole, err := expandBigQueryDatasetAccessRole(original["role"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedRole); val.IsValid() && !isEmptyValue(val) { - transformed["role"] = transformedRole - } - - transformedSpecialGroup, err := expandBigQueryDatasetAccessSpecialGroup(original["special_group"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedSpecialGroup); val.IsValid() && !isEmptyValue(val) { - transformed["specialGroup"] = transformedSpecialGroup - } - - transformedUserByEmail, err := expandBigQueryDatasetAccessUserByEmail(original["user_by_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedUserByEmail); val.IsValid() && !isEmptyValue(val) { - transformed["userByEmail"] = transformedUserByEmail - } - - transformedView, err := expandBigQueryDatasetAccessView(original["view"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedView); val.IsValid() && !isEmptyValue(val) { - transformed["view"] = transformedView - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBigQueryDatasetAccessDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessGroupByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessSpecialGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessUserByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessView(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandBigQueryDatasetAccessViewDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandBigQueryDatasetAccessViewProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedTableId, err := expandBigQueryDatasetAccessViewTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandBigQueryDatasetAccessViewDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessViewProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetAccessViewTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDatasetReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedDatasetId, err := expandBigQueryDatasetDatasetReferenceDatasetId(d.Get("dataset_id"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - return transformed, nil -} - -func expandBigQueryDatasetDatasetReferenceDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDefaultTableExpirationMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDefaultPartitionExpirationMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetFriendlyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandBigQueryDatasetLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryDatasetDefaultEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandBigQueryDatasetDefaultEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -var bigqueryAccessRoleToPrimitiveMap = map[string]string{ - "roles/bigquery.dataOwner": "OWNER", - "roles/bigquery.dataEditor": "WRITER", - "roles/bigquery.dataViewer": "READER", -} - -func resourceBigQueryDatasetAccessRoleDiffSuppress(k, old, new string, d *resource_bigquery_dataset_access_schema.ResourceData) bool { - if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[new]; ok { - return primitiveRole == old - } - return false -} - -func resourceBigQueryDatasetAccessIamMemberDiffSuppress(k, old, new string, d *resource_bigquery_dataset_access_schema.ResourceData) bool { - if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[new]; ok { - return primitiveRole == old - } - - if d.Get("api_updated_member") == true { - expectedIamMember := d.Get("iam_member").(string) - parts := resource_bigquery_dataset_access_strings.SplitAfter(expectedIamMember, ":") - - strippedIamMember := parts[0] - if len(parts) > 1 { - strippedIamMember = parts[1] - } - - if memberInState := d.Get("user_by_email").(string); memberInState != "" { - return memberInState == strippedIamMember - } - - if memberInState := d.Get("group_by_email").(string); memberInState != "" { - return memberInState == strippedIamMember - } - - if memberInState := d.Get("domain").(string); memberInState != "" { - return memberInState == strippedIamMember - } - - if memberInState := d.Get("special_group").(string); memberInState != "" { - return memberInState == strippedIamMember - } - } - - return false -} - -func resourceBigQueryDatasetAccessReassignIamMemberInNestedObjectList(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}, items []interface{}) (member_type string, member interface{}, err error) { - expectedRole, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, meta.(*Config)) - if err != nil { - return "", nil, err - } - expectedFlattenedRole := flattenNestedBigQueryDatasetAccessRole(expectedRole, d, meta.(*Config)) - - expectedIamMember, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, meta.(*Config)) - if err != nil { - return "", nil, err - } - expectedFlattenedIamMember := flattenNestedBigQueryDatasetAccessIamMember(expectedIamMember, d, meta.(*Config)) - - parts := resource_bigquery_dataset_access_strings.SplitAfter(expectedFlattenedIamMember.(string), ":") - - expectedStrippedIamMember := parts[0] - if len(parts) > 1 { - expectedStrippedIamMember = parts[1] - } - - for _, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemRole := flattenNestedBigQueryDatasetAccessRole(item["role"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemRole)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedRole))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemRole, expectedFlattenedRole) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with role= %#v, looking for %#v)", itemRole, expectedFlattenedRole) - continue - } - - itemUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(item["userByEmail"], d, meta.(*Config)) - if resource_bigquery_dataset_access_reflect.DeepEqual(itemUserByEmail, expectedStrippedIamMember) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Iam Member changed to userByEmail= %#v)", itemUserByEmail) - return "user_by_email", itemUserByEmail, nil - } - itemGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(item["groupByEmail"], d, meta.(*Config)) - if resource_bigquery_dataset_access_reflect.DeepEqual(itemGroupByEmail, expectedStrippedIamMember) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Iam Member changed to groupByEmail= %#v)", itemGroupByEmail) - return "group_by_email", itemGroupByEmail, nil - } - itemDomain := flattenNestedBigQueryDatasetAccessDomain(item["domain"], d, meta.(*Config)) - if resource_bigquery_dataset_access_reflect.DeepEqual(itemDomain, expectedStrippedIamMember) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Iam Member changed to domain= %#v)", itemDomain) - return "domain", itemDomain, nil - } - itemSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(item["specialGroup"], d, meta.(*Config)) - if resource_bigquery_dataset_access_reflect.DeepEqual(itemSpecialGroup, expectedStrippedIamMember) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Iam Member changed to specialGroup= %#v)", itemSpecialGroup) - return "special_group", itemSpecialGroup, nil - } - itemIamMember := flattenNestedBigQueryDatasetAccessIamMember(item["iamMember"], d, meta.(*Config)) - if resource_bigquery_dataset_access_reflect.DeepEqual(itemIamMember, expectedFlattenedIamMember) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Iam Member stayed as iamMember= %#v)", itemIamMember) - return "", nil, nil - } - continue - } - resource_bigquery_dataset_access_log.Printf("[DEBUG] Did not find item for resource %q)", d.Id()) - return "", nil, nil -} - -func resourceBigQueryDatasetAccess() *resource_bigquery_dataset_access_schema.Resource { - return &resource_bigquery_dataset_access_schema.Resource{ - Create: resourceBigQueryDatasetAccessCreate, - Read: resourceBigQueryDatasetAccessRead, - Delete: resourceBigQueryDatasetAccessDelete, - - Timeouts: &resource_bigquery_dataset_access_schema.ResourceTimeout{ - Create: resource_bigquery_dataset_access_schema.DefaultTimeout(4 * resource_bigquery_dataset_access_time.Minute), - Delete: resource_bigquery_dataset_access_schema.DefaultTimeout(4 * resource_bigquery_dataset_access_time.Minute), - }, - - Schema: map[string]*resource_bigquery_dataset_access_schema.Schema{ - "dataset_id": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique ID for this dataset, without the project name. The ID -must contain only letters (a-z, A-Z), numbers (0-9), or -underscores (_). The maximum length is 1,024 characters.`, - }, - "domain": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `A domain to grant access to. Any users signed in with the -domain specified will be granted the specified access`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view"}, - }, - "group_by_email": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `An email address of a Google Group to grant access to.`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view"}, - }, - "iam_member": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `Some other type of member that appears in the IAM Policy but isn't a user, -group, domain, or special group. For example: 'allUsers'`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view"}, - }, - "role": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessRoleDiffSuppress, - Description: `Describes the rights granted to the user specified by the other -member of the access object. Basic, predefined, and custom roles are -supported. Predefined roles that have equivalent basic roles are -swapped by the API to their basic counterparts, and will show a diff -post-create. See -[official docs](https://cloud.google.com/bigquery/docs/access-control).`, - }, - "special_group": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `A special group to grant access to. Possible values include: - - -* 'projectOwners': Owners of the enclosing project. - - -* 'projectReaders': Readers of the enclosing project. - - -* 'projectWriters': Writers of the enclosing project. - - -* 'allAuthenticatedUsers': All authenticated BigQuery users.`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view"}, - }, - "user_by_email": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: resourceBigQueryDatasetAccessIamMemberDiffSuppress, - Description: `An email address of a user to grant access to. For example: -fred@example.com`, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view"}, - }, - "view": { - Type: resource_bigquery_dataset_access_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A view from a different dataset to grant access to. Queries -executed against that view will have read access to tables in -this dataset. The role field is not required when this field is -set. If that view is updated by any user, access to the view -needs to be granted again via an update operation.`, - MaxItems: 1, - Elem: &resource_bigquery_dataset_access_schema.Resource{ - Schema: map[string]*resource_bigquery_dataset_access_schema.Schema{ - "dataset_id": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - "table_id": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the table. The ID must contain only letters (a-z, -A-Z), numbers (0-9), or underscores (_). The maximum length -is 1,024 characters.`, - }, - }, - }, - ExactlyOneOf: []string{"user_by_email", "group_by_email", "domain", "special_group", "iam_member", "view"}, - }, - "api_updated_member": { - Type: resource_bigquery_dataset_access_schema.TypeBool, - Computed: true, - Description: "If true, represents that that the iam_member in the config was translated to a different member type by the API, and is stored in state as a different member type", - }, - "project": { - Type: resource_bigquery_dataset_access_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigQueryDatasetAccessCreate(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - datasetIdProp, err := expandNestedBigQueryDatasetAccessDatasetId(d.Get("dataset_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dataset_id"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(datasetIdProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, datasetIdProp)) { - obj["datasetId"] = datasetIdProp - } - roleProp, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(roleProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - userByEmailProp, err := expandNestedBigQueryDatasetAccessUserByEmail(d.Get("user_by_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_by_email"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(userByEmailProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, userByEmailProp)) { - obj["userByEmail"] = userByEmailProp - } - groupByEmailProp, err := expandNestedBigQueryDatasetAccessGroupByEmail(d.Get("group_by_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("group_by_email"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(groupByEmailProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, groupByEmailProp)) { - obj["groupByEmail"] = groupByEmailProp - } - domainProp, err := expandNestedBigQueryDatasetAccessDomain(d.Get("domain"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("domain"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(domainProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, domainProp)) { - obj["domain"] = domainProp - } - specialGroupProp, err := expandNestedBigQueryDatasetAccessSpecialGroup(d.Get("special_group"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("special_group"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(specialGroupProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, specialGroupProp)) { - obj["specialGroup"] = specialGroupProp - } - iamMemberProp, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("iam_member"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(iamMemberProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, iamMemberProp)) { - obj["iamMember"] = iamMemberProp - } - viewProp, err := expandNestedBigQueryDatasetAccessView(d.Get("view"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("view"); !isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(viewProp)) && (ok || !resource_bigquery_dataset_access_reflect.DeepEqual(v, viewProp)) { - obj["view"] = viewProp - } - - lockName, err := replaceVars(d, config, "{{dataset_id}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - resource_bigquery_dataset_access_log.Printf("[DEBUG] Creating new DatasetAccess: %#v", obj) - - obj, err = resourceBigQueryDatasetAccessPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error fetching project for DatasetAccess: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_dataset_access_schema.TimeoutCreate), isBigqueryIAMQuotaError) - if err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error creating DatasetAccess: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("api_updated_member", false); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error setting api_updated_member: %s", err) - } - - if iamMemberProp != "" { - member_type, member, err := resourceBigQueryDatasetAccessReassignIamMemberInNestedObjectList(d, meta, res["access"].([]interface{})) - if err != nil { - resource_bigquery_dataset_access_fmt.Println(err) - } - - if member_type != "" { - if err := d.Set(member_type, member.(string)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error setting member_type: %s", err) - } - if err := d.Set("iam_member", ""); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error setting iam_member: %s", err) - } - if err := d.Set("api_updated_member", true); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error setting api_updated_member: %s", err) - } - } - } - - resource_bigquery_dataset_access_log.Printf("[DEBUG] Finished creating DatasetAccess %q: %#v", d.Id(), res) - - return resourceBigQueryDatasetAccessRead(d, meta) -} - -func resourceBigQueryDatasetAccessRead(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error fetching project for DatasetAccess: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isBigqueryIAMQuotaError) - if err != nil { - return handleNotFoundError(err, d, resource_bigquery_dataset_access_fmt.Sprintf("BigQueryDatasetAccess %q", d.Id())) - } - - res, err = flattenNestedBigQueryDatasetAccess(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_bigquery_dataset_access_log.Printf("[DEBUG] Removing BigQueryDatasetAccess because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - - if err := d.Set("role", flattenNestedBigQueryDatasetAccessRole(res["role"], d, config)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("user_by_email", flattenNestedBigQueryDatasetAccessUserByEmail(res["userByEmail"], d, config)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("group_by_email", flattenNestedBigQueryDatasetAccessGroupByEmail(res["groupByEmail"], d, config)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("domain", flattenNestedBigQueryDatasetAccessDomain(res["domain"], d, config)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("special_group", flattenNestedBigQueryDatasetAccessSpecialGroup(res["specialGroup"], d, config)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("iam_member", flattenNestedBigQueryDatasetAccessIamMember(res["iamMember"], d, config)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - if err := d.Set("view", flattenNestedBigQueryDatasetAccessView(res["view"], d, config)); err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error reading DatasetAccess: %s", err) - } - - return nil -} - -func resourceBigQueryDatasetAccessDelete(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_dataset_access_fmt.Errorf("Error fetching project for DatasetAccess: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "{{dataset_id}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceBigQueryDatasetAccessPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "DatasetAccess") - } - resource_bigquery_dataset_access_log.Printf("[DEBUG] Deleting DatasetAccess %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_dataset_access_schema.TimeoutDelete), isBigqueryIAMQuotaError) - if err != nil { - return handleNotFoundError(err, d, "DatasetAccess") - } - - resource_bigquery_dataset_access_log.Printf("[DEBUG] Finished deleting DatasetAccess %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedBigQueryDatasetAccessRole(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessUserByEmail(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessDomain(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessSpecialGroup(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessIamMember(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessView(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenNestedBigQueryDatasetAccessViewDatasetId(original["datasetId"], d, config) - transformed["project_id"] = - flattenNestedBigQueryDatasetAccessViewProjectId(original["projectId"], d, config) - transformed["table_id"] = - flattenNestedBigQueryDatasetAccessViewTableId(original["tableId"], d, config) - return []interface{}{transformed} -} - -func flattenNestedBigQueryDatasetAccessViewDatasetId(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessViewProjectId(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedBigQueryDatasetAccessViewTableId(v interface{}, d *resource_bigquery_dataset_access_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedBigQueryDatasetAccessDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - if primitiveRole, ok := bigqueryAccessRoleToPrimitiveMap[v.(string)]; ok { - return primitiveRole, nil - } - return v, nil -} - -func expandNestedBigQueryDatasetAccessUserByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessGroupByEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessSpecialGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessIamMember(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessView(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetId, err := expandNestedBigQueryDatasetAccessViewDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_access_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedProjectId, err := expandNestedBigQueryDatasetAccessViewProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_access_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedTableId, err := expandNestedBigQueryDatasetAccessViewTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_dataset_access_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandNestedBigQueryDatasetAccessViewDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessViewProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedBigQueryDatasetAccessViewTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedBigQueryDatasetAccess(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["access"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_bigquery_dataset_access_fmt.Errorf("expected list or map for value access. Actual value: %v", v) - } - - _, item, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceBigQueryDatasetAccessFindNestedObjectInList(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedRole, err := expandNestedBigQueryDatasetAccessRole(d.Get("role"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedRole := flattenNestedBigQueryDatasetAccessRole(expectedRole, d, meta.(*Config)) - expectedUserByEmail, err := expandNestedBigQueryDatasetAccessUserByEmail(d.Get("user_by_email"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(expectedUserByEmail, d, meta.(*Config)) - expectedGroupByEmail, err := expandNestedBigQueryDatasetAccessGroupByEmail(d.Get("group_by_email"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(expectedGroupByEmail, d, meta.(*Config)) - expectedDomain, err := expandNestedBigQueryDatasetAccessDomain(d.Get("domain"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedDomain := flattenNestedBigQueryDatasetAccessDomain(expectedDomain, d, meta.(*Config)) - expectedSpecialGroup, err := expandNestedBigQueryDatasetAccessSpecialGroup(d.Get("special_group"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(expectedSpecialGroup, d, meta.(*Config)) - expectedIamMember, err := expandNestedBigQueryDatasetAccessIamMember(d.Get("iam_member"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIamMember := flattenNestedBigQueryDatasetAccessIamMember(expectedIamMember, d, meta.(*Config)) - expectedView, err := expandNestedBigQueryDatasetAccessView(d.Get("view"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedView := flattenNestedBigQueryDatasetAccessView(expectedView, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemRole := flattenNestedBigQueryDatasetAccessRole(item["role"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemRole)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedRole))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemRole, expectedFlattenedRole) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with role= %#v, looking for %#v)", itemRole, expectedFlattenedRole) - continue - } - itemUserByEmail := flattenNestedBigQueryDatasetAccessUserByEmail(item["userByEmail"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemUserByEmail)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedUserByEmail))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemUserByEmail, expectedFlattenedUserByEmail) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with userByEmail= %#v, looking for %#v)", itemUserByEmail, expectedFlattenedUserByEmail) - continue - } - itemGroupByEmail := flattenNestedBigQueryDatasetAccessGroupByEmail(item["groupByEmail"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemGroupByEmail)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedGroupByEmail))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemGroupByEmail, expectedFlattenedGroupByEmail) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with groupByEmail= %#v, looking for %#v)", itemGroupByEmail, expectedFlattenedGroupByEmail) - continue - } - itemDomain := flattenNestedBigQueryDatasetAccessDomain(item["domain"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemDomain)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedDomain))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemDomain, expectedFlattenedDomain) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with domain= %#v, looking for %#v)", itemDomain, expectedFlattenedDomain) - continue - } - itemSpecialGroup := flattenNestedBigQueryDatasetAccessSpecialGroup(item["specialGroup"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemSpecialGroup)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedSpecialGroup))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemSpecialGroup, expectedFlattenedSpecialGroup) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with specialGroup= %#v, looking for %#v)", itemSpecialGroup, expectedFlattenedSpecialGroup) - continue - } - itemIamMember := flattenNestedBigQueryDatasetAccessIamMember(item["iamMember"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemIamMember)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedIamMember))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemIamMember, expectedFlattenedIamMember) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with iamMember= %#v, looking for %#v)", itemIamMember, expectedFlattenedIamMember) - continue - } - itemView := flattenNestedBigQueryDatasetAccessView(item["view"], d, meta.(*Config)) - - if !(isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(itemView)) && isEmptyValue(resource_bigquery_dataset_access_reflect.ValueOf(expectedFlattenedView))) && !resource_bigquery_dataset_access_reflect.DeepEqual(itemView, expectedFlattenedView) { - resource_bigquery_dataset_access_log.Printf("[DEBUG] Skipping item with view= %#v, looking for %#v)", itemView, expectedFlattenedView) - continue - } - resource_bigquery_dataset_access_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceBigQueryDatasetAccessPatchCreateEncoder(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceBigQueryDatasetAccessListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - if found != nil { - return nil, resource_bigquery_dataset_access_fmt.Errorf("Unable to create DatasetAccess, existing object already found: %+v", found) - } - - res := map[string]interface{}{ - "access": append(currItems, obj), - } - - return res, nil -} - -func resourceBigQueryDatasetAccessPatchDeleteEncoder(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceBigQueryDatasetAccessListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceBigQueryDatasetAccessFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - - return nil, &resource_bigquery_dataset_access_googleapi.Error{ - Code: 404, - Message: "DatasetAccess not found in list", - } - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "access": updatedItems, - } - - return res, nil -} - -func resourceBigQueryDatasetAccessListForPatch(d *resource_bigquery_dataset_access_schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}") - if err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", project, url, userAgent, nil, isBigqueryIAMQuotaError) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - - v, ok = res["access"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, resource_bigquery_dataset_access_fmt.Errorf(`expected list for nested field "access"`) - } - return ls, nil - } - return nil, nil -} - -var ( - bigqueryDatasetRegexp = resource_bigquery_job_regexp.MustCompile("projects/(.+)/datasets/(.+)") - bigqueryTableRegexp = resource_bigquery_job_regexp.MustCompile("projects/(.+)/datasets/(.+)/tables/(.+)") -) - -func resourceBigQueryJob() *resource_bigquery_job_schema.Resource { - return &resource_bigquery_job_schema.Resource{ - Create: resourceBigQueryJobCreate, - Read: resourceBigQueryJobRead, - Delete: resourceBigQueryJobDelete, - - Importer: &resource_bigquery_job_schema.ResourceImporter{ - State: resourceBigQueryJobImport, - }, - - Timeouts: &resource_bigquery_job_schema.ResourceTimeout{ - Create: resource_bigquery_job_schema.DefaultTimeout(4 * resource_bigquery_job_time.Minute), - Delete: resource_bigquery_job_schema.DefaultTimeout(4 * resource_bigquery_job_time.Minute), - }, - - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "copy": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Copies a table.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "source_tables": { - Type: resource_bigquery_job_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Source tables to copy.`, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "table_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, -or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, - }, - "dataset_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - }, - "create_disposition": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}, false), - Description: `Specifies whether the job is allowed to create new tables. The following values are supported: -CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. -CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. -Creation, truncation and append actions occur as one atomic update upon job completion Default value: "CREATE_IF_NEEDED" Possible values: ["CREATE_IF_NEEDED", "CREATE_NEVER"]`, - Default: "CREATE_IF_NEEDED", - }, - "destination_encryption_configuration": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Custom encryption configuration (e.g., Cloud KMS keys)`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "kms_key_name": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. -The BigQuery Service Account associated with your project requires access to this encryption key.`, - }, - "kms_key_version": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Description: `Describes the Cloud KMS encryption key version used to protect destination BigQuery table.`, - }, - }, - }, - }, - "destination_table": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The destination table.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "table_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, -or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, - }, - "dataset_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - }, - "write_disposition": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}, false), - Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: -WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. -WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. -WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. -Each action is atomic and only occurs if BigQuery is able to complete the job successfully. -Creation, truncation and append actions occur as one atomic update upon job completion. Default value: "WRITE_EMPTY" Possible values: ["WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY"]`, - Default: "WRITE_EMPTY", - }, - }, - }, - ExactlyOneOf: []string{"query", "load", "copy", "extract"}, - }, - "extract": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Configures an extract job.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "destination_uris": { - Type: resource_bigquery_job_schema.TypeList, - Required: true, - ForceNew: true, - Description: `A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written.`, - Elem: &resource_bigquery_job_schema.Schema{ - Type: resource_bigquery_job_schema.TypeString, - }, - }, - "compression": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. -The default value is NONE. DEFLATE and SNAPPY are only supported for Avro.`, - Default: "NONE", - }, - "destination_format": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. -The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. -The default value for models is SAVED_MODEL.`, - }, - "field_delimiter": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. -Default is ','`, - }, - "print_header": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to print out a header row in the results. Default is true.`, - Default: true, - }, - "source_model": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A reference to the model being exported.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "dataset_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the dataset containing this model.`, - }, - "model_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the model.`, - }, - "project_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the project containing this model.`, - }, - }, - }, - ExactlyOneOf: []string{"extract.0.source_table", "extract.0.source_model"}, - }, - "source_table": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A reference to the table being exported.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "table_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, -or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, - }, - "dataset_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - ExactlyOneOf: []string{"extract.0.source_table", "extract.0.source_model"}, - }, - "use_avro_logical_types": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to use logical types when extracting to AVRO format.`, - }, - }, - }, - ExactlyOneOf: []string{"query", "load", "copy", "extract"}, - }, - "job_timeout_ms": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job.`, - }, - "labels": { - Type: resource_bigquery_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `The labels associated with this job. You can use these to organize and group your jobs.`, - Elem: &resource_bigquery_job_schema.Schema{Type: resource_bigquery_job_schema.TypeString}, - }, - "load": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Configures a load job.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "destination_table": { - Type: resource_bigquery_job_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The destination table to load the data into.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "table_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, -or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, - }, - "dataset_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - }, - "source_uris": { - Type: resource_bigquery_job_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The fully-qualified URIs that point to your data in Google Cloud. -For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character -and it must come after the 'bucket' name. Size limits related to load jobs apply -to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be -specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. -For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '*' wildcard character is not allowed.`, - Elem: &resource_bigquery_job_schema.Schema{ - Type: resource_bigquery_job_schema.TypeString, - }, - }, - "allow_jagged_rows": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Accept rows that are missing trailing optional columns. The missing values are treated as nulls. -If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, -an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats.`, - Default: false, - }, - "allow_quoted_newlines": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. -The default value is false.`, - Default: false, - }, - "autodetect": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Indicates if we should automatically infer the options and schema for CSV and JSON sources.`, - }, - "create_disposition": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}, false), - Description: `Specifies whether the job is allowed to create new tables. The following values are supported: -CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. -CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. -Creation, truncation and append actions occur as one atomic update upon job completion Default value: "CREATE_IF_NEEDED" Possible values: ["CREATE_IF_NEEDED", "CREATE_NEVER"]`, - Default: "CREATE_IF_NEEDED", - }, - "destination_encryption_configuration": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Custom encryption configuration (e.g., Cloud KMS keys)`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "kms_key_name": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. -The BigQuery Service Account associated with your project requires access to this encryption key.`, - }, - "kms_key_version": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Description: `Describes the Cloud KMS encryption key version used to protect destination BigQuery table.`, - }, - }, - }, - }, - "encoding": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. -The default value is UTF-8. BigQuery decodes the data after the raw, binary data -has been split using the values of the quote and fieldDelimiter properties.`, - Default: "UTF-8", - }, - "field_delimiter": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. -To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts -the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the -data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. -The default value is a comma (',').`, - }, - "ignore_unknown_values": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Indicates if BigQuery should allow extra values that are not represented in the table schema. -If true, the extra values are ignored. If false, records with extra columns are treated as bad records, -and if there are too many bad records, an invalid error is returned in the job result. -The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: -CSV: Trailing columns -JSON: Named values that don't match any column names`, - Default: false, - }, - "max_bad_records": { - Type: resource_bigquery_job_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, -an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.`, - Default: 0, - }, - "null_marker": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value -when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an -empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as -an empty value.`, - Default: "", - }, - "projection_fields": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. -Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. -If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result.`, - Elem: &resource_bigquery_job_schema.Schema{ - Type: resource_bigquery_job_schema.TypeString, - }, - }, - "quote": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, -and then uses the first byte of the encoded string to split the data in its raw, binary state. -The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. -If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true.`, - }, - "schema_update_options": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or -supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; -when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. -For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: -ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. -ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.`, - Elem: &resource_bigquery_job_schema.Schema{ - Type: resource_bigquery_job_schema.TypeString, - }, - }, - "skip_leading_rows": { - Type: resource_bigquery_job_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.IntAtLeast(0), - Description: `The number of rows at the top of a CSV file that BigQuery will skip when loading the data. -The default value is 0. This property is useful if you have header rows in the file that should be skipped. -When autodetect is on, the behavior is the following: -skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, -the row is read as data. Otherwise data is read starting from the second row. -skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. -skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, -row N is just skipped. Otherwise row N is used to extract column names for the detected schema.`, - Default: 0, - }, - "source_format": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". -For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". -For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". -The default value is CSV.`, - Default: "CSV", - }, - "time_partitioning": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Time-based partitioning specification for the destination table.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "type": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, -but in OnePlatform the field will be treated as unset.`, - }, - "expiration_ms": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value.`, - }, - "field": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. -The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. -A wrapper is used here because an empty string is an invalid value.`, - }, - }, - }, - }, - "write_disposition": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}, false), - Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: -WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. -WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. -WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. -Each action is atomic and only occurs if BigQuery is able to complete the job successfully. -Creation, truncation and append actions occur as one atomic update upon job completion. Default value: "WRITE_EMPTY" Possible values: ["WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY"]`, - Default: "WRITE_EMPTY", - }, - }, - }, - ExactlyOneOf: []string{"query", "load", "copy", "extract"}, - }, - "query": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Configures a query job.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "query": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. -*NOTE*: queries containing [DML language](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) -('DELETE', 'UPDATE', 'MERGE', 'INSERT') must specify 'create_disposition = ""' and 'write_disposition = ""'.`, - }, - "allow_large_results": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. -Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. -However, you must still set destinationTable when result size exceeds the allowed maximum response size.`, - }, - "create_disposition": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"CREATE_IF_NEEDED", "CREATE_NEVER", ""}, false), - Description: `Specifies whether the job is allowed to create new tables. The following values are supported: -CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. -CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. -Creation, truncation and append actions occur as one atomic update upon job completion Default value: "CREATE_IF_NEEDED" Possible values: ["CREATE_IF_NEEDED", "CREATE_NEVER"]`, - Default: "CREATE_IF_NEEDED", - }, - "default_dataset": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "dataset_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The dataset. Can be specified '{{dataset_id}}' if 'project_id' is also set, -or of the form 'projects/{{project}}/datasets/{{dataset_id}}' if not.`, - }, - "project_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - }, - "destination_encryption_configuration": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Custom encryption configuration (e.g., Cloud KMS keys)`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "kms_key_name": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. -The BigQuery Service Account associated with your project requires access to this encryption key.`, - }, - "kms_key_version": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Description: `Describes the Cloud KMS encryption key version used to protect destination BigQuery table.`, - }, - }, - }, - }, - "destination_table": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Describes the table where the query results should be stored. -This property must be set for large results that exceed the maximum response size. -For queries that produce anonymous (cached) results, this field will be populated by BigQuery.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "table_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The table. Can be specified '{{table_id}}' if 'project_id' and 'dataset_id' are also set, -or of the form 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' if not.`, - }, - "dataset_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the dataset containing this table.`, - }, - "project_id": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project containing this table.`, - }, - }, - }, - }, - "flatten_results": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. -allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened.`, - }, - "maximum_billing_tier": { - Type: resource_bigquery_job_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). -If unspecified, this will be set to your project default.`, - }, - "maximum_bytes_billed": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). -If unspecified, this will be set to your project default.`, - }, - "parameter_mode": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query.`, - }, - "priority": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"INTERACTIVE", "BATCH", ""}, false), - Description: `Specifies a priority for the query. Default value: "INTERACTIVE" Possible values: ["INTERACTIVE", "BATCH"]`, - Default: "INTERACTIVE", - }, - "schema_update_options": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Allows the schema of the destination table to be updated as a side effect of the query job. -Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; -when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, -specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. -One or more of the following values are specified: -ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. -ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.`, - Elem: &resource_bigquery_job_schema.Schema{ - Type: resource_bigquery_job_schema.TypeString, - }, - }, - "script_options": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Options controlling the execution of scripts.`, - MaxItems: 1, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "key_result_statement": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"LAST", "FIRST_SELECT", ""}, false), - Description: `Determines which statement in the script represents the "key result", -used to populate the schema and query results of the script job. Possible values: ["LAST", "FIRST_SELECT"]`, - AtLeastOneOf: []string{"query.0.script_options.0.statement_timeout_ms", "query.0.script_options.0.statement_byte_budget", "query.0.script_options.0.key_result_statement"}, - }, - "statement_byte_budget": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Limit on the number of bytes billed per statement. Exceeding this budget results in an error.`, - AtLeastOneOf: []string{"query.0.script_options.0.statement_timeout_ms", "query.0.script_options.0.statement_byte_budget", "query.0.script_options.0.key_result_statement"}, - }, - "statement_timeout_ms": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Timeout period for each statement in a script.`, - AtLeastOneOf: []string{"query.0.script_options.0.statement_timeout_ms", "query.0.script_options.0.statement_byte_budget", "query.0.script_options.0.key_result_statement"}, - }, - }, - }, - }, - "use_legacy_sql": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. -If set to false, the query will use BigQuery's standard SQL.`, - }, - "use_query_cache": { - Type: resource_bigquery_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever -tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. -The default value is true.`, - Default: true, - }, - "user_defined_function_resources": { - Type: resource_bigquery_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Describes user-defined function resources used in the query.`, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "inline_code": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An inline resource that contains code for a user-defined function (UDF). -Providing a inline code resource is equivalent to providing a URI for a file containing the same code.`, - }, - "resource_uri": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A code resource to load from a Google Cloud Storage URI (gs://bucket/path).`, - }, - }, - }, - }, - "write_disposition": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_job_validation.StringInSlice([]string{"WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY", ""}, false), - Description: `Specifies the action that occurs if the destination table already exists. The following values are supported: -WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. -WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. -WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. -Each action is atomic and only occurs if BigQuery is able to complete the job successfully. -Creation, truncation and append actions occur as one atomic update upon job completion. Default value: "WRITE_EMPTY" Possible values: ["WRITE_TRUNCATE", "WRITE_APPEND", "WRITE_EMPTY"]`, - Default: "WRITE_EMPTY", - }, - }, - }, - ExactlyOneOf: []string{"query", "load", "copy", "extract"}, - }, - "job_type": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Description: `The type of the job.`, - }, - - "job_id": { - Type: resource_bigquery_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters.`, - }, - "location": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The geographic location of the job. The default value is US.`, - Default: "US", - }, - - "status": { - Type: resource_bigquery_job_schema.TypeList, - Computed: true, - Description: `The status of this job. Examine this value when polling an asynchronous job to see if the job is complete.`, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "error_result": { - Type: resource_bigquery_job_schema.TypeList, - Computed: true, - Description: `Final error result of the job. If present, indicates that the job has completed and was unsuccessful.`, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "location": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - Description: `Specifies where the error occurred, if present.`, - }, - "message": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - Description: `A human-readable description of the error.`, - }, - "reason": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - Description: `A short error code that summarizes the error.`, - }, - }, - }, - }, - "errors": { - Type: resource_bigquery_job_schema.TypeList, - Computed: true, - Description: `The first errors encountered during the running of the job. The final message -includes the number of errors that caused the process to stop. Errors here do -not necessarily mean that the job has not completed or was unsuccessful.`, - Elem: &resource_bigquery_job_schema.Resource{ - Schema: map[string]*resource_bigquery_job_schema.Schema{ - "location": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - Description: `Specifies where the error occurred, if present.`, - }, - "message": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - Description: `A human-readable description of the error.`, - }, - "reason": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - Description: `A short error code that summarizes the error.`, - }, - }, - }, - }, - "state": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Description: `Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'.`, - }, - }, - }, - }, - "user_email": { - Type: resource_bigquery_job_schema.TypeString, - Computed: true, - Description: `Email address of the user who ran the job.`, - }, - "project": { - Type: resource_bigquery_job_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigQueryJobCreate(d *resource_bigquery_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - configurationProp, err := expandBigQueryJobConfiguration(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_bigquery_job_reflect.ValueOf(configurationProp)) { - obj["configuration"] = configurationProp - } - jobReferenceProp, err := expandBigQueryJobJobReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_bigquery_job_reflect.ValueOf(jobReferenceProp)) { - obj["jobReference"] = jobReferenceProp - } - - obj, err = resourceBigQueryJobEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs") - if err != nil { - return err - } - - resource_bigquery_job_log.Printf("[DEBUG] Creating new Job: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_job_fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_job_schema.TimeoutCreate)) - if err != nil { - return resource_bigquery_job_fmt.Errorf("Error creating Job: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/jobs/{{job_id}}") - if err != nil { - return resource_bigquery_job_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceBigQueryJobPollRead(d, meta), PollCheckForExistence, "Creating Job", d.Timeout(resource_bigquery_job_schema.TimeoutCreate), 1) - if err != nil { - return resource_bigquery_job_fmt.Errorf("Error waiting to create Job: %s", err) - } - - resource_bigquery_job_log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) - - return resourceBigQueryJobRead(d, meta) -} - -func resourceBigQueryJobPollRead(d *resource_bigquery_job_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs/{{job_id}}?location={{location}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_bigquery_job_fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceBigQueryJobRead(d *resource_bigquery_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/jobs/{{job_id}}?location={{location}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_job_fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_bigquery_job_fmt.Sprintf("BigQueryJob %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_bigquery_job_fmt.Errorf("Error reading Job: %s", err) - } - - if err := d.Set("user_email", flattenBigQueryJobUserEmail(res["user_email"], d, config)); err != nil { - return resource_bigquery_job_fmt.Errorf("Error reading Job: %s", err) - } - - if flattenedProp := flattenBigQueryJobConfiguration(res["configuration"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_bigquery_job_googleapi.Error); ok { - return resource_bigquery_job_fmt.Errorf("Error reading Job: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_bigquery_job_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - - if flattenedProp := flattenBigQueryJobJobReference(res["jobReference"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_bigquery_job_googleapi.Error); ok { - return resource_bigquery_job_fmt.Errorf("Error reading Job: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_bigquery_job_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("status", flattenBigQueryJobStatus(res["status"], d, config)); err != nil { - return resource_bigquery_job_fmt.Errorf("Error reading Job: %s", err) - } - - return nil -} - -func resourceBigQueryJobDelete(d *resource_bigquery_job_schema.ResourceData, meta interface{}) error { - resource_bigquery_job_log.Printf("[WARNING] BigQuery Job resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceBigQueryJobImport(d *resource_bigquery_job_schema.ResourceData, meta interface{}) ([]*resource_bigquery_job_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/jobs/(?P[^/]+)/location/(?P[^/]+)", - "projects/(?P[^/]+)/jobs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/jobs/{{job_id}}") - if err != nil { - return nil, resource_bigquery_job_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_bigquery_job_schema.ResourceData{d}, nil -} - -func flattenBigQueryJobUserEmail(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfiguration(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["job_type"] = - flattenBigQueryJobConfigurationJobType(original["jobType"], d, config) - transformed["job_timeout_ms"] = - flattenBigQueryJobConfigurationJobTimeoutMs(original["jobTimeoutMs"], d, config) - transformed["labels"] = - flattenBigQueryJobConfigurationLabels(original["labels"], d, config) - transformed["query"] = - flattenBigQueryJobConfigurationQuery(original["query"], d, config) - transformed["load"] = - flattenBigQueryJobConfigurationLoad(original["load"], d, config) - transformed["copy"] = - flattenBigQueryJobConfigurationCopy(original["copy"], d, config) - transformed["extract"] = - flattenBigQueryJobConfigurationExtract(original["extract"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationJobType(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationJobTimeoutMs(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLabels(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQuery(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["query"] = - flattenBigQueryJobConfigurationQueryQuery(original["query"], d, config) - transformed["destination_table"] = - flattenBigQueryJobConfigurationQueryDestinationTable(original["destinationTable"], d, config) - transformed["user_defined_function_resources"] = - flattenBigQueryJobConfigurationQueryUserDefinedFunctionResources(original["userDefinedFunctionResources"], d, config) - transformed["create_disposition"] = - flattenBigQueryJobConfigurationQueryCreateDisposition(original["createDisposition"], d, config) - transformed["write_disposition"] = - flattenBigQueryJobConfigurationQueryWriteDisposition(original["writeDisposition"], d, config) - transformed["default_dataset"] = - flattenBigQueryJobConfigurationQueryDefaultDataset(original["defaultDataset"], d, config) - transformed["priority"] = - flattenBigQueryJobConfigurationQueryPriority(original["priority"], d, config) - transformed["allow_large_results"] = - flattenBigQueryJobConfigurationQueryAllowLargeResults(original["allowLargeResults"], d, config) - transformed["use_query_cache"] = - flattenBigQueryJobConfigurationQueryUseQueryCache(original["useQueryCache"], d, config) - transformed["flatten_results"] = - flattenBigQueryJobConfigurationQueryFlattenResults(original["flattenResults"], d, config) - transformed["maximum_billing_tier"] = - flattenBigQueryJobConfigurationQueryMaximumBillingTier(original["maximumBillingTier"], d, config) - transformed["maximum_bytes_billed"] = - flattenBigQueryJobConfigurationQueryMaximumBytesBilled(original["maximumBytesBilled"], d, config) - transformed["use_legacy_sql"] = - flattenBigQueryJobConfigurationQueryUseLegacySql(original["useLegacySql"], d, config) - transformed["parameter_mode"] = - flattenBigQueryJobConfigurationQueryParameterMode(original["parameterMode"], d, config) - transformed["schema_update_options"] = - flattenBigQueryJobConfigurationQuerySchemaUpdateOptions(original["schemaUpdateOptions"], d, config) - transformed["destination_encryption_configuration"] = - flattenBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(original["destinationEncryptionConfiguration"], d, config) - transformed["script_options"] = - flattenBigQueryJobConfigurationQueryScriptOptions(original["scriptOptions"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationQueryQuery(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryDestinationTable(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = original["projectId"] - transformed["dataset_id"] = original["datasetId"] - transformed["table_id"] = original["tableId"] - - if bigqueryTableRegexp.MatchString(d.Get("query.0.destination_table.0.table_id").(string)) { - - transformed["table_id"] = resource_bigquery_job_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", transformed["project_id"], transformed["dataset_id"], transformed["table_id"]) - } - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "resource_uri": flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(original["resourceUri"], d, config), - "inline_code": flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(original["inlineCode"], d, config), - }) - } - return transformed -} - -func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryCreateDisposition(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryWriteDisposition(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = original["projectId"] - transformed["dataset_id"] = original["datasetId"] - - if bigqueryDatasetRegexp.MatchString(d.Get("query.0.default_dataset.0.dataset_id").(string)) { - - transformed["dataset_id"] = resource_bigquery_job_fmt.Sprintf("projects/%s/datasets/%s", transformed["project_id"], transformed["dataset_id"]) - } - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationQueryPriority(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryAllowLargeResults(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryUseQueryCache(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryFlattenResults(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryMaximumBillingTier(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_job_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryJobConfigurationQueryMaximumBytesBilled(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryUseLegacySql(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryParameterMode(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQuerySchemaUpdateOptions(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return []map[string]interface{}{} - } - - kmsKeyName := v.(map[string]interface{})["kmsKeyName"].(string) - re := resource_bigquery_job_regexp.MustCompile(`(projects/.*/locations/.*/keyRings/.*/cryptoKeys/.*)/cryptoKeyVersions/.*`) - paths := re.FindStringSubmatch(kmsKeyName) - - if len(paths) > 0 { - return []map[string]interface{}{ - { - "kms_key_name": paths[1], - "kms_key_version": kmsKeyName, - }, - } - } - - return []map[string]interface{}{{"kms_key_name": kmsKeyName, "kms_key_version": ""}} - -} - -func flattenBigQueryJobConfigurationQueryScriptOptions(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["statement_timeout_ms"] = - flattenBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(original["statementTimeoutMs"], d, config) - transformed["statement_byte_budget"] = - flattenBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(original["statementByteBudget"], d, config) - transformed["key_result_statement"] = - flattenBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(original["keyResultStatement"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoad(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["source_uris"] = - flattenBigQueryJobConfigurationLoadSourceUris(original["sourceUris"], d, config) - transformed["destination_table"] = - flattenBigQueryJobConfigurationLoadDestinationTable(original["destinationTable"], d, config) - transformed["create_disposition"] = - flattenBigQueryJobConfigurationLoadCreateDisposition(original["createDisposition"], d, config) - transformed["write_disposition"] = - flattenBigQueryJobConfigurationLoadWriteDisposition(original["writeDisposition"], d, config) - transformed["null_marker"] = - flattenBigQueryJobConfigurationLoadNullMarker(original["nullMarker"], d, config) - transformed["field_delimiter"] = - flattenBigQueryJobConfigurationLoadFieldDelimiter(original["fieldDelimiter"], d, config) - transformed["skip_leading_rows"] = - flattenBigQueryJobConfigurationLoadSkipLeadingRows(original["skipLeadingRows"], d, config) - transformed["encoding"] = - flattenBigQueryJobConfigurationLoadEncoding(original["encoding"], d, config) - transformed["quote"] = - flattenBigQueryJobConfigurationLoadQuote(original["quote"], d, config) - transformed["max_bad_records"] = - flattenBigQueryJobConfigurationLoadMaxBadRecords(original["maxBadRecords"], d, config) - transformed["allow_quoted_newlines"] = - flattenBigQueryJobConfigurationLoadAllowQuotedNewlines(original["allowQuotedNewlines"], d, config) - transformed["source_format"] = - flattenBigQueryJobConfigurationLoadSourceFormat(original["sourceFormat"], d, config) - transformed["allow_jagged_rows"] = - flattenBigQueryJobConfigurationLoadAllowJaggedRows(original["allowJaggedRows"], d, config) - transformed["ignore_unknown_values"] = - flattenBigQueryJobConfigurationLoadIgnoreUnknownValues(original["ignoreUnknownValues"], d, config) - transformed["projection_fields"] = - flattenBigQueryJobConfigurationLoadProjectionFields(original["projectionFields"], d, config) - transformed["autodetect"] = - flattenBigQueryJobConfigurationLoadAutodetect(original["autodetect"], d, config) - transformed["schema_update_options"] = - flattenBigQueryJobConfigurationLoadSchemaUpdateOptions(original["schemaUpdateOptions"], d, config) - transformed["time_partitioning"] = - flattenBigQueryJobConfigurationLoadTimePartitioning(original["timePartitioning"], d, config) - transformed["destination_encryption_configuration"] = - flattenBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(original["destinationEncryptionConfiguration"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationLoadSourceUris(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadDestinationTable(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = original["projectId"] - transformed["dataset_id"] = original["datasetId"] - transformed["table_id"] = original["tableId"] - - if bigqueryTableRegexp.MatchString(d.Get("load.0.destination_table.0.table_id").(string)) { - - transformed["table_id"] = resource_bigquery_job_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", transformed["project_id"], transformed["dataset_id"], transformed["table_id"]) - } - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationLoadCreateDisposition(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadWriteDisposition(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadNullMarker(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadFieldDelimiter(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadSkipLeadingRows(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_job_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryJobConfigurationLoadEncoding(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadQuote(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadMaxBadRecords(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_job_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryJobConfigurationLoadAllowQuotedNewlines(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadSourceFormat(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadAllowJaggedRows(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadIgnoreUnknownValues(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadProjectionFields(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadAutodetect(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadSchemaUpdateOptions(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenBigQueryJobConfigurationLoadTimePartitioningType(original["type"], d, config) - transformed["expiration_ms"] = - flattenBigQueryJobConfigurationLoadTimePartitioningExpirationMs(original["expirationMs"], d, config) - transformed["field"] = - flattenBigQueryJobConfigurationLoadTimePartitioningField(original["field"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationLoadTimePartitioningType(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadTimePartitioningExpirationMs(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadTimePartitioningField(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return []map[string]interface{}{} - } - - kmsKeyName := v.(map[string]interface{})["kmsKeyName"].(string) - re := resource_bigquery_job_regexp.MustCompile(`(projects/.*/locations/.*/keyRings/.*/cryptoKeys/.*)/cryptoKeyVersions/.*`) - paths := re.FindStringSubmatch(kmsKeyName) - - if len(paths) > 0 { - return []map[string]interface{}{ - { - "kms_key_name": paths[1], - "kms_key_version": kmsKeyName, - }, - } - } - - return []map[string]interface{}{{"kms_key_name": kmsKeyName, "kms_key_version": ""}} - -} - -func flattenBigQueryJobConfigurationCopy(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["source_tables"] = - flattenBigQueryJobConfigurationCopySourceTables(original["sourceTables"], d, config) - transformed["destination_table"] = - flattenBigQueryJobConfigurationCopyDestinationTable(original["destinationTable"], d, config) - transformed["create_disposition"] = - flattenBigQueryJobConfigurationCopyCreateDisposition(original["createDisposition"], d, config) - transformed["write_disposition"] = - flattenBigQueryJobConfigurationCopyWriteDisposition(original["writeDisposition"], d, config) - transformed["destination_encryption_configuration"] = - flattenBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(original["destinationEncryptionConfiguration"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationCopySourceTables(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for i, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - t := map[string]interface{}{ - "project_id": original["projectId"], - "dataset_id": original["datasetId"], - "table_id": original["tableId"], - } - - if bigqueryTableRegexp.MatchString(d.Get(resource_bigquery_job_fmt.Sprintf("copy.0.source_tables.%d.table_id", i)).(string)) { - - t["table_id"] = resource_bigquery_job_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", t["project_id"], t["dataset_id"], t["table_id"]) - } - transformed = append(transformed, t) - } - - return transformed -} - -func flattenBigQueryJobConfigurationCopyDestinationTable(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = original["projectId"] - transformed["dataset_id"] = original["datasetId"] - transformed["table_id"] = original["tableId"] - - if bigqueryTableRegexp.MatchString(d.Get("copy.0.destination_table.0.table_id").(string)) { - - transformed["table_id"] = resource_bigquery_job_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", transformed["project_id"], transformed["dataset_id"], transformed["table_id"]) - } - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationCopyCreateDisposition(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationCopyWriteDisposition(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return []map[string]interface{}{} - } - - kmsKeyName := v.(map[string]interface{})["kmsKeyName"].(string) - re := resource_bigquery_job_regexp.MustCompile(`(projects/.*/locations/.*/keyRings/.*/cryptoKeys/.*)/cryptoKeyVersions/.*`) - paths := re.FindStringSubmatch(kmsKeyName) - - if len(paths) > 0 { - return []map[string]interface{}{ - { - "kms_key_name": paths[1], - "kms_key_version": kmsKeyName, - }, - } - } - - return []map[string]interface{}{{"kms_key_name": kmsKeyName, "kms_key_version": ""}} - -} - -func flattenBigQueryJobConfigurationExtract(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["destination_uris"] = - flattenBigQueryJobConfigurationExtractDestinationUris(original["destinationUris"], d, config) - transformed["print_header"] = - flattenBigQueryJobConfigurationExtractPrintHeader(original["printHeader"], d, config) - transformed["field_delimiter"] = - flattenBigQueryJobConfigurationExtractFieldDelimiter(original["fieldDelimiter"], d, config) - transformed["destination_format"] = - flattenBigQueryJobConfigurationExtractDestinationFormat(original["destinationFormat"], d, config) - transformed["compression"] = - flattenBigQueryJobConfigurationExtractCompression(original["compression"], d, config) - transformed["use_avro_logical_types"] = - flattenBigQueryJobConfigurationExtractUseAvroLogicalTypes(original["useAvroLogicalTypes"], d, config) - transformed["source_table"] = - flattenBigQueryJobConfigurationExtractSourceTable(original["sourceTable"], d, config) - transformed["source_model"] = - flattenBigQueryJobConfigurationExtractSourceModel(original["sourceModel"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationExtractDestinationUris(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractPrintHeader(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractFieldDelimiter(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractDestinationFormat(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractCompression(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractUseAvroLogicalTypes(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractSourceTable(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = original["projectId"] - transformed["dataset_id"] = original["datasetId"] - transformed["table_id"] = original["tableId"] - - if bigqueryTableRegexp.MatchString(d.Get("extract.0.source_table.0.table_id").(string)) { - - transformed["table_id"] = resource_bigquery_job_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", transformed["project_id"], transformed["dataset_id"], transformed["table_id"]) - } - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationExtractSourceModel(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenBigQueryJobConfigurationExtractSourceModelProjectId(original["projectId"], d, config) - transformed["dataset_id"] = - flattenBigQueryJobConfigurationExtractSourceModelDatasetId(original["datasetId"], d, config) - transformed["model_id"] = - flattenBigQueryJobConfigurationExtractSourceModelModelId(original["modelId"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobConfigurationExtractSourceModelProjectId(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractSourceModelDatasetId(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobConfigurationExtractSourceModelModelId(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobJobReference(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["job_id"] = - flattenBigQueryJobJobReferenceJobId(original["jobId"], d, config) - transformed["location"] = - flattenBigQueryJobJobReferenceLocation(original["location"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobJobReferenceJobId(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobJobReferenceLocation(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobStatus(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["error_result"] = - flattenBigQueryJobStatusErrorResult(original["errorResult"], d, config) - transformed["errors"] = - flattenBigQueryJobStatusErrors(original["errors"], d, config) - transformed["state"] = - flattenBigQueryJobStatusState(original["state"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobStatusErrorResult(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["reason"] = - flattenBigQueryJobStatusErrorResultReason(original["reason"], d, config) - transformed["location"] = - flattenBigQueryJobStatusErrorResultLocation(original["location"], d, config) - transformed["message"] = - flattenBigQueryJobStatusErrorResultMessage(original["message"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryJobStatusErrorResultReason(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobStatusErrorResultLocation(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobStatusErrorResultMessage(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobStatusErrors(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "reason": flattenBigQueryJobStatusErrorsReason(original["reason"], d, config), - "location": flattenBigQueryJobStatusErrorsLocation(original["location"], d, config), - "message": flattenBigQueryJobStatusErrorsMessage(original["message"], d, config), - }) - } - return transformed -} - -func flattenBigQueryJobStatusErrorsReason(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobStatusErrorsLocation(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobStatusErrorsMessage(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryJobStatusState(v interface{}, d *resource_bigquery_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigQueryJobConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedJobType, err := expandBigQueryJobConfigurationJobType(d.Get("job_type"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedJobType); val.IsValid() && !isEmptyValue(val) { - transformed["jobType"] = transformedJobType - } - - transformedJobTimeoutMs, err := expandBigQueryJobConfigurationJobTimeoutMs(d.Get("job_timeout_ms"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedJobTimeoutMs); val.IsValid() && !isEmptyValue(val) { - transformed["jobTimeoutMs"] = transformedJobTimeoutMs - } - - transformedLabels, err := expandBigQueryJobConfigurationLabels(d.Get("labels"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedQuery, err := expandBigQueryJobConfigurationQuery(d.Get("query"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedQuery); val.IsValid() && !isEmptyValue(val) { - transformed["query"] = transformedQuery - } - - transformedLoad, err := expandBigQueryJobConfigurationLoad(d.Get("load"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedLoad); val.IsValid() && !isEmptyValue(val) { - transformed["load"] = transformedLoad - } - - transformedCopy, err := expandBigQueryJobConfigurationCopy(d.Get("copy"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedCopy); val.IsValid() && !isEmptyValue(val) { - transformed["copy"] = transformedCopy - } - - transformedExtract, err := expandBigQueryJobConfigurationExtract(d.Get("extract"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedExtract); val.IsValid() && !isEmptyValue(val) { - transformed["extract"] = transformedExtract - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationJobType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationJobTimeoutMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandBigQueryJobConfigurationQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedQuery, err := expandBigQueryJobConfigurationQueryQuery(original["query"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedQuery); val.IsValid() && !isEmptyValue(val) { - transformed["query"] = transformedQuery - } - - transformedDestinationTable, err := expandBigQueryJobConfigurationQueryDestinationTable(original["destination_table"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationTable); val.IsValid() && !isEmptyValue(val) { - transformed["destinationTable"] = transformedDestinationTable - } - - transformedUserDefinedFunctionResources, err := expandBigQueryJobConfigurationQueryUserDefinedFunctionResources(original["user_defined_function_resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedUserDefinedFunctionResources); val.IsValid() && !isEmptyValue(val) { - transformed["userDefinedFunctionResources"] = transformedUserDefinedFunctionResources - } - - transformedCreateDisposition, err := expandBigQueryJobConfigurationQueryCreateDisposition(original["create_disposition"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !isEmptyValue(val) { - transformed["createDisposition"] = transformedCreateDisposition - } - - transformedWriteDisposition, err := expandBigQueryJobConfigurationQueryWriteDisposition(original["write_disposition"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !isEmptyValue(val) { - transformed["writeDisposition"] = transformedWriteDisposition - } - - transformedDefaultDataset, err := expandBigQueryJobConfigurationQueryDefaultDataset(original["default_dataset"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDefaultDataset); val.IsValid() && !isEmptyValue(val) { - transformed["defaultDataset"] = transformedDefaultDataset - } - - transformedPriority, err := expandBigQueryJobConfigurationQueryPriority(original["priority"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { - transformed["priority"] = transformedPriority - } - - transformedAllowLargeResults, err := expandBigQueryJobConfigurationQueryAllowLargeResults(original["allow_large_results"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedAllowLargeResults); val.IsValid() && !isEmptyValue(val) { - transformed["allowLargeResults"] = transformedAllowLargeResults - } - - transformedUseQueryCache, err := expandBigQueryJobConfigurationQueryUseQueryCache(original["use_query_cache"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedUseQueryCache); val.IsValid() && !isEmptyValue(val) { - transformed["useQueryCache"] = transformedUseQueryCache - } - - transformedFlattenResults, err := expandBigQueryJobConfigurationQueryFlattenResults(original["flatten_results"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedFlattenResults); val.IsValid() && !isEmptyValue(val) { - transformed["flattenResults"] = transformedFlattenResults - } - - transformedMaximumBillingTier, err := expandBigQueryJobConfigurationQueryMaximumBillingTier(original["maximum_billing_tier"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedMaximumBillingTier); val.IsValid() && !isEmptyValue(val) { - transformed["maximumBillingTier"] = transformedMaximumBillingTier - } - - transformedMaximumBytesBilled, err := expandBigQueryJobConfigurationQueryMaximumBytesBilled(original["maximum_bytes_billed"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedMaximumBytesBilled); val.IsValid() && !isEmptyValue(val) { - transformed["maximumBytesBilled"] = transformedMaximumBytesBilled - } - - transformedUseLegacySql, err := expandBigQueryJobConfigurationQueryUseLegacySql(original["use_legacy_sql"], d, config) - if err != nil { - return nil, err - } else { - transformed["useLegacySql"] = transformedUseLegacySql - } - - transformedParameterMode, err := expandBigQueryJobConfigurationQueryParameterMode(original["parameter_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedParameterMode); val.IsValid() && !isEmptyValue(val) { - transformed["parameterMode"] = transformedParameterMode - } - - transformedSchemaUpdateOptions, err := expandBigQueryJobConfigurationQuerySchemaUpdateOptions(original["schema_update_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSchemaUpdateOptions); val.IsValid() && !isEmptyValue(val) { - transformed["schemaUpdateOptions"] = transformedSchemaUpdateOptions - } - - transformedDestinationEncryptionConfiguration, err := expandBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(original["destination_encryption_configuration"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !isEmptyValue(val) { - transformed["destinationEncryptionConfiguration"] = transformedDestinationEncryptionConfiguration - } - - transformedScriptOptions, err := expandBigQueryJobConfigurationQueryScriptOptions(original["script_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedScriptOptions); val.IsValid() && !isEmptyValue(val) { - transformed["scriptOptions"] = transformedScriptOptions - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationQueryQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryDestinationTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId := original["project_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId := original["dataset_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId := original["table_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - if parts := bigqueryTableRegexp.FindStringSubmatch(transformedTableId.(string)); parts != nil { - transformed["projectId"] = parts[1] - transformed["datasetId"] = parts[2] - transformed["tableId"] = parts[3] - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationQueryUserDefinedFunctionResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceUri, err := expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(original["resource_uri"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedResourceUri); val.IsValid() && !isEmptyValue(val) { - transformed["resourceUri"] = transformedResourceUri - } - - transformedInlineCode, err := expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(original["inline_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedInlineCode); val.IsValid() && !isEmptyValue(val) { - transformed["inlineCode"] = transformedInlineCode - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesResourceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryUserDefinedFunctionResourcesInlineCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryCreateDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryWriteDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryDefaultDataset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId := original["project_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId := original["dataset_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - if parts := bigqueryDatasetRegexp.FindStringSubmatch(transformedDatasetId.(string)); parts != nil { - transformed["projectId"] = parts[1] - transformed["datasetId"] = parts[2] - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationQueryPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryAllowLargeResults(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryUseQueryCache(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryFlattenResults(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryMaximumBillingTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryMaximumBytesBilled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryUseLegacySql(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryParameterMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQuerySchemaUpdateOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryDestinationEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - transformedKmsKeyVersion, err := expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyVersion(original["kms_key_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyVersion"] = transformedKmsKeyVersion - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryScriptOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStatementTimeoutMs, err := expandBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(original["statement_timeout_ms"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedStatementTimeoutMs); val.IsValid() && !isEmptyValue(val) { - transformed["statementTimeoutMs"] = transformedStatementTimeoutMs - } - - transformedStatementByteBudget, err := expandBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(original["statement_byte_budget"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedStatementByteBudget); val.IsValid() && !isEmptyValue(val) { - transformed["statementByteBudget"] = transformedStatementByteBudget - } - - transformedKeyResultStatement, err := expandBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(original["key_result_statement"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedKeyResultStatement); val.IsValid() && !isEmptyValue(val) { - transformed["keyResultStatement"] = transformedKeyResultStatement - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationQueryScriptOptionsStatementTimeoutMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryScriptOptionsStatementByteBudget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationQueryScriptOptionsKeyResultStatement(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoad(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSourceUris, err := expandBigQueryJobConfigurationLoadSourceUris(original["source_uris"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSourceUris); val.IsValid() && !isEmptyValue(val) { - transformed["sourceUris"] = transformedSourceUris - } - - transformedDestinationTable, err := expandBigQueryJobConfigurationLoadDestinationTable(original["destination_table"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationTable); val.IsValid() && !isEmptyValue(val) { - transformed["destinationTable"] = transformedDestinationTable - } - - transformedCreateDisposition, err := expandBigQueryJobConfigurationLoadCreateDisposition(original["create_disposition"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !isEmptyValue(val) { - transformed["createDisposition"] = transformedCreateDisposition - } - - transformedWriteDisposition, err := expandBigQueryJobConfigurationLoadWriteDisposition(original["write_disposition"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !isEmptyValue(val) { - transformed["writeDisposition"] = transformedWriteDisposition - } - - transformedNullMarker, err := expandBigQueryJobConfigurationLoadNullMarker(original["null_marker"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedNullMarker); val.IsValid() && !isEmptyValue(val) { - transformed["nullMarker"] = transformedNullMarker - } - - transformedFieldDelimiter, err := expandBigQueryJobConfigurationLoadFieldDelimiter(original["field_delimiter"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedFieldDelimiter); val.IsValid() && !isEmptyValue(val) { - transformed["fieldDelimiter"] = transformedFieldDelimiter - } - - transformedSkipLeadingRows, err := expandBigQueryJobConfigurationLoadSkipLeadingRows(original["skip_leading_rows"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSkipLeadingRows); val.IsValid() && !isEmptyValue(val) { - transformed["skipLeadingRows"] = transformedSkipLeadingRows - } - - transformedEncoding, err := expandBigQueryJobConfigurationLoadEncoding(original["encoding"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { - transformed["encoding"] = transformedEncoding - } - - transformedQuote, err := expandBigQueryJobConfigurationLoadQuote(original["quote"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedQuote); val.IsValid() && !isEmptyValue(val) { - transformed["quote"] = transformedQuote - } - - transformedMaxBadRecords, err := expandBigQueryJobConfigurationLoadMaxBadRecords(original["max_bad_records"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedMaxBadRecords); val.IsValid() && !isEmptyValue(val) { - transformed["maxBadRecords"] = transformedMaxBadRecords - } - - transformedAllowQuotedNewlines, err := expandBigQueryJobConfigurationLoadAllowQuotedNewlines(original["allow_quoted_newlines"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedAllowQuotedNewlines); val.IsValid() && !isEmptyValue(val) { - transformed["allowQuotedNewlines"] = transformedAllowQuotedNewlines - } - - transformedSourceFormat, err := expandBigQueryJobConfigurationLoadSourceFormat(original["source_format"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSourceFormat); val.IsValid() && !isEmptyValue(val) { - transformed["sourceFormat"] = transformedSourceFormat - } - - transformedAllowJaggedRows, err := expandBigQueryJobConfigurationLoadAllowJaggedRows(original["allow_jagged_rows"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedAllowJaggedRows); val.IsValid() && !isEmptyValue(val) { - transformed["allowJaggedRows"] = transformedAllowJaggedRows - } - - transformedIgnoreUnknownValues, err := expandBigQueryJobConfigurationLoadIgnoreUnknownValues(original["ignore_unknown_values"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedIgnoreUnknownValues); val.IsValid() && !isEmptyValue(val) { - transformed["ignoreUnknownValues"] = transformedIgnoreUnknownValues - } - - transformedProjectionFields, err := expandBigQueryJobConfigurationLoadProjectionFields(original["projection_fields"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedProjectionFields); val.IsValid() && !isEmptyValue(val) { - transformed["projectionFields"] = transformedProjectionFields - } - - transformedAutodetect, err := expandBigQueryJobConfigurationLoadAutodetect(original["autodetect"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedAutodetect); val.IsValid() && !isEmptyValue(val) { - transformed["autodetect"] = transformedAutodetect - } - - transformedSchemaUpdateOptions, err := expandBigQueryJobConfigurationLoadSchemaUpdateOptions(original["schema_update_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSchemaUpdateOptions); val.IsValid() && !isEmptyValue(val) { - transformed["schemaUpdateOptions"] = transformedSchemaUpdateOptions - } - - transformedTimePartitioning, err := expandBigQueryJobConfigurationLoadTimePartitioning(original["time_partitioning"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedTimePartitioning); val.IsValid() && !isEmptyValue(val) { - transformed["timePartitioning"] = transformedTimePartitioning - } - - transformedDestinationEncryptionConfiguration, err := expandBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(original["destination_encryption_configuration"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !isEmptyValue(val) { - transformed["destinationEncryptionConfiguration"] = transformedDestinationEncryptionConfiguration - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationLoadSourceUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadDestinationTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId := original["project_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId := original["dataset_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId := original["table_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - if parts := bigqueryTableRegexp.FindStringSubmatch(transformedTableId.(string)); parts != nil { - transformed["projectId"] = parts[1] - transformed["datasetId"] = parts[2] - transformed["tableId"] = parts[3] - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationLoadCreateDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadWriteDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadNullMarker(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadFieldDelimiter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadSkipLeadingRows(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadQuote(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadMaxBadRecords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadAllowQuotedNewlines(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadSourceFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadAllowJaggedRows(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadIgnoreUnknownValues(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadProjectionFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadAutodetect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadSchemaUpdateOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadTimePartitioning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandBigQueryJobConfigurationLoadTimePartitioningType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedExpirationMs, err := expandBigQueryJobConfigurationLoadTimePartitioningExpirationMs(original["expiration_ms"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedExpirationMs); val.IsValid() && !isEmptyValue(val) { - transformed["expirationMs"] = transformedExpirationMs - } - - transformedField, err := expandBigQueryJobConfigurationLoadTimePartitioningField(original["field"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedField); val.IsValid() && !isEmptyValue(val) { - transformed["field"] = transformedField - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationLoadTimePartitioningType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadTimePartitioningExpirationMs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadTimePartitioningField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadDestinationEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - transformedKmsKeyVersion, err := expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyVersion(original["kms_key_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyVersion"] = transformedKmsKeyVersion - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationLoadDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationCopy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSourceTables, err := expandBigQueryJobConfigurationCopySourceTables(original["source_tables"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSourceTables); val.IsValid() && !isEmptyValue(val) { - transformed["sourceTables"] = transformedSourceTables - } - - transformedDestinationTable, err := expandBigQueryJobConfigurationCopyDestinationTable(original["destination_table"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationTable); val.IsValid() && !isEmptyValue(val) { - transformed["destinationTable"] = transformedDestinationTable - } - - transformedCreateDisposition, err := expandBigQueryJobConfigurationCopyCreateDisposition(original["create_disposition"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedCreateDisposition); val.IsValid() && !isEmptyValue(val) { - transformed["createDisposition"] = transformedCreateDisposition - } - - transformedWriteDisposition, err := expandBigQueryJobConfigurationCopyWriteDisposition(original["write_disposition"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedWriteDisposition); val.IsValid() && !isEmptyValue(val) { - transformed["writeDisposition"] = transformedWriteDisposition - } - - transformedDestinationEncryptionConfiguration, err := expandBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(original["destination_encryption_configuration"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationEncryptionConfiguration); val.IsValid() && !isEmptyValue(val) { - transformed["destinationEncryptionConfiguration"] = transformedDestinationEncryptionConfiguration - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationCopySourceTables(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId := original["project_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId := original["dataset_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId := original["table_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - tableRef := resource_bigquery_job_regexp.MustCompile("projects/(.+)/datasets/(.+)/tables/(.+)") - if parts := tableRef.FindStringSubmatch(transformedTableId.(string)); parts != nil { - transformed["projectId"] = parts[1] - transformed["datasetId"] = parts[2] - transformed["tableId"] = parts[3] - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBigQueryJobConfigurationCopyDestinationTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId := original["project_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId := original["dataset_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId := original["table_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - if parts := bigqueryTableRegexp.FindStringSubmatch(transformedTableId.(string)); parts != nil { - transformed["projectId"] = parts[1] - transformed["datasetId"] = parts[2] - transformed["tableId"] = parts[3] - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationCopyCreateDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationCopyWriteDisposition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationCopyDestinationEncryptionConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - transformedKmsKeyVersion, err := expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyVersion(original["kms_key_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyVersion"] = transformedKmsKeyVersion - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationCopyDestinationEncryptionConfigurationKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtract(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDestinationUris, err := expandBigQueryJobConfigurationExtractDestinationUris(original["destination_uris"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationUris); val.IsValid() && !isEmptyValue(val) { - transformed["destinationUris"] = transformedDestinationUris - } - - transformedPrintHeader, err := expandBigQueryJobConfigurationExtractPrintHeader(original["print_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedPrintHeader); val.IsValid() && !isEmptyValue(val) { - transformed["printHeader"] = transformedPrintHeader - } - - transformedFieldDelimiter, err := expandBigQueryJobConfigurationExtractFieldDelimiter(original["field_delimiter"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedFieldDelimiter); val.IsValid() && !isEmptyValue(val) { - transformed["fieldDelimiter"] = transformedFieldDelimiter - } - - transformedDestinationFormat, err := expandBigQueryJobConfigurationExtractDestinationFormat(original["destination_format"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDestinationFormat); val.IsValid() && !isEmptyValue(val) { - transformed["destinationFormat"] = transformedDestinationFormat - } - - transformedCompression, err := expandBigQueryJobConfigurationExtractCompression(original["compression"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedCompression); val.IsValid() && !isEmptyValue(val) { - transformed["compression"] = transformedCompression - } - - transformedUseAvroLogicalTypes, err := expandBigQueryJobConfigurationExtractUseAvroLogicalTypes(original["use_avro_logical_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedUseAvroLogicalTypes); val.IsValid() && !isEmptyValue(val) { - transformed["useAvroLogicalTypes"] = transformedUseAvroLogicalTypes - } - - transformedSourceTable, err := expandBigQueryJobConfigurationExtractSourceTable(original["source_table"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSourceTable); val.IsValid() && !isEmptyValue(val) { - transformed["sourceTable"] = transformedSourceTable - } - - transformedSourceModel, err := expandBigQueryJobConfigurationExtractSourceModel(original["source_model"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedSourceModel); val.IsValid() && !isEmptyValue(val) { - transformed["sourceModel"] = transformedSourceModel - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationExtractDestinationUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractPrintHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractFieldDelimiter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractDestinationFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractCompression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractUseAvroLogicalTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractSourceTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId := original["project_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId := original["dataset_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId := original["table_id"] - if val := resource_bigquery_job_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - if parts := bigqueryTableRegexp.FindStringSubmatch(transformedTableId.(string)); parts != nil { - transformed["projectId"] = parts[1] - transformed["datasetId"] = parts[2] - transformed["tableId"] = parts[3] - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationExtractSourceModel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandBigQueryJobConfigurationExtractSourceModelProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId, err := expandBigQueryJobConfigurationExtractSourceModelDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedModelId, err := expandBigQueryJobConfigurationExtractSourceModelModelId(original["model_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedModelId); val.IsValid() && !isEmptyValue(val) { - transformed["modelId"] = transformedModelId - } - - return transformed, nil -} - -func expandBigQueryJobConfigurationExtractSourceModelProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractSourceModelDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobConfigurationExtractSourceModelModelId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobJobReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedJobId, err := expandBigQueryJobJobReferenceJobId(d.Get("job_id"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedJobId); val.IsValid() && !isEmptyValue(val) { - transformed["jobId"] = transformedJobId - } - - transformedLocation, err := expandBigQueryJobJobReferenceLocation(d.Get("location"), d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_job_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandBigQueryJobJobReferenceJobId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryJobJobReferenceLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceBigQueryJobEncoder(d *resource_bigquery_job_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - project, err := getProject(d, meta.(*Config)) - if err != nil { - return nil, err - } - obj["jobReference"].(map[string]interface{})["project"] = project - return obj, nil -} - -func resourceBigqueryReservationReservation() *resource_bigquery_reservation_schema.Resource { - return &resource_bigquery_reservation_schema.Resource{ - Create: resourceBigqueryReservationReservationCreate, - Read: resourceBigqueryReservationReservationRead, - Update: resourceBigqueryReservationReservationUpdate, - Delete: resourceBigqueryReservationReservationDelete, - - Importer: &resource_bigquery_reservation_schema.ResourceImporter{ - State: resourceBigqueryReservationReservationImport, - }, - - Timeouts: &resource_bigquery_reservation_schema.ResourceTimeout{ - Create: resource_bigquery_reservation_schema.DefaultTimeout(4 * resource_bigquery_reservation_time.Minute), - Update: resource_bigquery_reservation_schema.DefaultTimeout(4 * resource_bigquery_reservation_time.Minute), - Delete: resource_bigquery_reservation_schema.DefaultTimeout(4 * resource_bigquery_reservation_time.Minute), - }, - - Schema: map[string]*resource_bigquery_reservation_schema.Schema{ - "name": { - Type: resource_bigquery_reservation_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the reservation. This field must only contain alphanumeric characters or dash.`, - }, - "slot_capacity": { - Type: resource_bigquery_reservation_schema.TypeInt, - Required: true, - Description: `Minimum slots available to this reservation. A slot is a unit of computational power in BigQuery, and serves as the -unit of parallelism. Queries using this reservation might use more slots during runtime if ignoreIdleSlots is set to false.`, - }, - "ignore_idle_slots": { - Type: resource_bigquery_reservation_schema.TypeBool, - Optional: true, - Description: `If false, any query using this reservation will use idle slots from other reservations within -the same admin project. If true, a query using this reservation will execute with the slot -capacity specified above at most.`, - Default: false, - }, - "location": { - Type: resource_bigquery_reservation_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The geographic location where the transfer config should reside. -Examples: US, EU, asia-northeast1. The default value is US.`, - Default: "US", - }, - "project": { - Type: resource_bigquery_reservation_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigqueryReservationReservationCreate(d *resource_bigquery_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - slotCapacityProp, err := expandBigqueryReservationReservationSlotCapacity(d.Get("slot_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("slot_capacity"); !isEmptyValue(resource_bigquery_reservation_reflect.ValueOf(slotCapacityProp)) && (ok || !resource_bigquery_reservation_reflect.DeepEqual(v, slotCapacityProp)) { - obj["slotCapacity"] = slotCapacityProp - } - ignoreIdleSlotsProp, err := expandBigqueryReservationReservationIgnoreIdleSlots(d.Get("ignore_idle_slots"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignore_idle_slots"); !isEmptyValue(resource_bigquery_reservation_reflect.ValueOf(ignoreIdleSlotsProp)) && (ok || !resource_bigquery_reservation_reflect.DeepEqual(v, ignoreIdleSlotsProp)) { - obj["ignoreIdleSlots"] = ignoreIdleSlotsProp - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations?reservationId={{name}}") - if err != nil { - return err - } - - resource_bigquery_reservation_log.Printf("[DEBUG] Creating new Reservation: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_reservation_schema.TimeoutCreate)) - if err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error creating Reservation: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_bigquery_reservation_log.Printf("[DEBUG] Finished creating Reservation %q: %#v", d.Id(), res) - - return resourceBigqueryReservationReservationRead(d, meta) -} - -func resourceBigqueryReservationReservationRead(d *resource_bigquery_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_bigquery_reservation_fmt.Sprintf("BigqueryReservationReservation %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - - if err := d.Set("slot_capacity", flattenBigqueryReservationReservationSlotCapacity(res["slotCapacity"], d, config)); err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("ignore_idle_slots", flattenBigqueryReservationReservationIgnoreIdleSlots(res["ignoreIdleSlots"], d, config)); err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - - return nil -} - -func resourceBigqueryReservationReservationUpdate(d *resource_bigquery_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - slotCapacityProp, err := expandBigqueryReservationReservationSlotCapacity(d.Get("slot_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("slot_capacity"); !isEmptyValue(resource_bigquery_reservation_reflect.ValueOf(v)) && (ok || !resource_bigquery_reservation_reflect.DeepEqual(v, slotCapacityProp)) { - obj["slotCapacity"] = slotCapacityProp - } - ignoreIdleSlotsProp, err := expandBigqueryReservationReservationIgnoreIdleSlots(d.Get("ignore_idle_slots"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignore_idle_slots"); !isEmptyValue(resource_bigquery_reservation_reflect.ValueOf(v)) && (ok || !resource_bigquery_reservation_reflect.DeepEqual(v, ignoreIdleSlotsProp)) { - obj["ignoreIdleSlots"] = ignoreIdleSlotsProp - } - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return err - } - - resource_bigquery_reservation_log.Printf("[DEBUG] Updating Reservation %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("slot_capacity") { - updateMask = append(updateMask, "slotCapacity") - } - - if d.HasChange("ignore_idle_slots") { - updateMask = append(updateMask, "ignoreIdleSlots") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_bigquery_reservation_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_reservation_schema.TimeoutUpdate)) - - if err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) - } else { - resource_bigquery_reservation_log.Printf("[DEBUG] Finished updating Reservation %q: %#v", d.Id(), res) - } - - return resourceBigqueryReservationReservationRead(d, meta) -} - -func resourceBigqueryReservationReservationDelete(d *resource_bigquery_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigqueryReservationBasePath}}projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_bigquery_reservation_log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_reservation_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Reservation") - } - - resource_bigquery_reservation_log.Printf("[DEBUG] Finished deleting Reservation %q: %#v", d.Id(), res) - return nil -} - -func resourceBigqueryReservationReservationImport(d *resource_bigquery_reservation_schema.ResourceData, meta interface{}) ([]*resource_bigquery_reservation_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/reservations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/reservations/{{name}}") - if err != nil { - return nil, resource_bigquery_reservation_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_bigquery_reservation_schema.ResourceData{d}, nil -} - -func flattenBigqueryReservationReservationSlotCapacity(v interface{}, d *resource_bigquery_reservation_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_reservation_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigqueryReservationReservationIgnoreIdleSlots(v interface{}, d *resource_bigquery_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigqueryReservationReservationSlotCapacity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigqueryReservationReservationIgnoreIdleSlots(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceBigQueryRoutine() *resource_bigquery_routine_schema.Resource { - return &resource_bigquery_routine_schema.Resource{ - Create: resourceBigQueryRoutineCreate, - Read: resourceBigQueryRoutineRead, - Update: resourceBigQueryRoutineUpdate, - Delete: resourceBigQueryRoutineDelete, - - Importer: &resource_bigquery_routine_schema.ResourceImporter{ - State: resourceBigQueryRoutineImport, - }, - - Timeouts: &resource_bigquery_routine_schema.ResourceTimeout{ - Create: resource_bigquery_routine_schema.DefaultTimeout(4 * resource_bigquery_routine_time.Minute), - Update: resource_bigquery_routine_schema.DefaultTimeout(4 * resource_bigquery_routine_time.Minute), - Delete: resource_bigquery_routine_schema.DefaultTimeout(4 * resource_bigquery_routine_time.Minute), - }, - - Schema: map[string]*resource_bigquery_routine_schema.Schema{ - "definition_body": { - Type: resource_bigquery_routine_schema.TypeString, - Required: true, - Description: `The body of the routine. For functions, this is the expression in the AS clause. -If language=SQL, it is the substring inside (but excluding) the parentheses.`, - }, - "dataset_id": { - Type: resource_bigquery_routine_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the dataset containing this routine`, - }, - "routine_id": { - Type: resource_bigquery_routine_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the the routine. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 256 characters.`, - }, - - "arguments": { - Type: resource_bigquery_routine_schema.TypeList, - Optional: true, - Description: `Input/output argument of a function or a stored procedure.`, - Elem: &resource_bigquery_routine_schema.Resource{ - Schema: map[string]*resource_bigquery_routine_schema.Schema{ - "argument_kind": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_routine_validation.StringInSlice([]string{"FIXED_TYPE", "ANY_TYPE", ""}, false), - Description: `Defaults to FIXED_TYPE. Default value: "FIXED_TYPE" Possible values: ["FIXED_TYPE", "ANY_TYPE"]`, - Default: "FIXED_TYPE", - }, - "data_type": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_routine_validation.StringIsJSON, - StateFunc: func(v interface{}) string { - s, _ := resource_bigquery_routine_structure.NormalizeJsonString(v) - return s - }, - Description: `A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. -~>**NOTE**: Because this field expects a JSON string, any changes to the string -will create a diff, even if the JSON itself hasn't changed. If the API returns -a different value for the same schema, e.g. it switched the order of values -or replaced STRUCT field type with RECORD field type, we currently cannot -suppress the recurring diff this causes. As a workaround, we recommend using -the schema as returned by the API.`, - }, - "mode": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_routine_validation.StringInSlice([]string{"IN", "OUT", "INOUT", ""}, false), - Description: `Specifies whether the argument is input or output. Can be set for procedures only. Possible values: ["IN", "OUT", "INOUT"]`, - }, - "name": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - Description: `The name of this argument. Can be absent for function return argument.`, - }, - }, - }, - }, - "description": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - Description: `The description of the routine if defined.`, - }, - "determinism_level": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_routine_validation.StringInSlice([]string{"DETERMINISM_LEVEL_UNSPECIFIED", "DETERMINISTIC", "NOT_DETERMINISTIC", ""}, false), - Description: `The determinism level of the JavaScript UDF if defined. Possible values: ["DETERMINISM_LEVEL_UNSPECIFIED", "DETERMINISTIC", "NOT_DETERMINISTIC"]`, - }, - "imported_libraries": { - Type: resource_bigquery_routine_schema.TypeList, - Optional: true, - Description: `Optional. If language = "JAVASCRIPT", this field stores the path of the -imported JAVASCRIPT libraries.`, - Elem: &resource_bigquery_routine_schema.Schema{ - Type: resource_bigquery_routine_schema.TypeString, - }, - }, - "language": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_routine_validation.StringInSlice([]string{"SQL", "JAVASCRIPT", ""}, false), - Description: `The language of the routine. Possible values: ["SQL", "JAVASCRIPT"]`, - }, - "return_type": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_routine_validation.StringIsJSON, - StateFunc: func(v interface{}) string { - s, _ := resource_bigquery_routine_structure.NormalizeJsonString(v) - return s - }, - Description: `A JSON schema for the return type. Optional if language = "SQL"; required otherwise. -If absent, the return type is inferred from definitionBody at query time in each query -that references this routine. If present, then the evaluated result will be cast to -the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON -string, any changes to the string will create a diff, even if the JSON itself hasn't -changed. If the API returns a different value for the same schema, e.g. it switche -d the order of values or replaced STRUCT field type with RECORD field type, we currently -cannot suppress the recurring diff this causes. As a workaround, we recommend using -the schema as returned by the API.`, - }, - "routine_type": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_bigquery_routine_validation.StringInSlice([]string{"SCALAR_FUNCTION", "PROCEDURE", ""}, false), - Description: `The type of routine. Possible values: ["SCALAR_FUNCTION", "PROCEDURE"]`, - }, - "creation_time": { - Type: resource_bigquery_routine_schema.TypeInt, - Computed: true, - Description: `The time when this routine was created, in milliseconds since the -epoch.`, - }, - "last_modified_time": { - Type: resource_bigquery_routine_schema.TypeInt, - Computed: true, - Description: `The time when this routine was modified, in milliseconds since the -epoch.`, - }, - "project": { - Type: resource_bigquery_routine_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigQueryRoutineCreate(d *resource_bigquery_routine_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - routineReferenceProp, err := expandBigQueryRoutineRoutineReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(routineReferenceProp)) { - obj["routineReference"] = routineReferenceProp - } - routineTypeProp, err := expandBigQueryRoutineRoutineType(d.Get("routine_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("routine_type"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(routineTypeProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, routineTypeProp)) { - obj["routineType"] = routineTypeProp - } - languageProp, err := expandBigQueryRoutineLanguage(d.Get("language"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(languageProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, languageProp)) { - obj["language"] = languageProp - } - argumentsProp, err := expandBigQueryRoutineArguments(d.Get("arguments"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("arguments"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(argumentsProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, argumentsProp)) { - obj["arguments"] = argumentsProp - } - returnTypeProp, err := expandBigQueryRoutineReturnType(d.Get("return_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("return_type"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(returnTypeProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, returnTypeProp)) { - obj["returnType"] = returnTypeProp - } - importedLibrariesProp, err := expandBigQueryRoutineImportedLibraries(d.Get("imported_libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("imported_libraries"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(importedLibrariesProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, importedLibrariesProp)) { - obj["importedLibraries"] = importedLibrariesProp - } - definitionBodyProp, err := expandBigQueryRoutineDefinitionBody(d.Get("definition_body"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("definition_body"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(definitionBodyProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, definitionBodyProp)) { - obj["definitionBody"] = definitionBodyProp - } - descriptionProp, err := expandBigQueryRoutineDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(descriptionProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - determinismLevelProp, err := expandBigQueryRoutineDeterminismLevel(d.Get("determinism_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("determinism_level"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(determinismLevelProp)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, determinismLevelProp)) { - obj["determinismLevel"] = determinismLevelProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines") - if err != nil { - return err - } - - resource_bigquery_routine_log.Printf("[DEBUG] Creating new Routine: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_routine_fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_routine_schema.TimeoutCreate)) - if err != nil { - return resource_bigquery_routine_fmt.Errorf("Error creating Routine: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return resource_bigquery_routine_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_bigquery_routine_log.Printf("[DEBUG] Finished creating Routine %q: %#v", d.Id(), res) - - return resourceBigQueryRoutineRead(d, meta) -} - -func resourceBigQueryRoutineRead(d *resource_bigquery_routine_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_routine_fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_bigquery_routine_fmt.Sprintf("BigQueryRoutine %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - - if flattenedProp := flattenBigQueryRoutineRoutineReference(res["routineReference"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_bigquery_routine_googleapi.Error); ok { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("routine_type", flattenBigQueryRoutineRoutineType(res["routineType"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("creation_time", flattenBigQueryRoutineCreationTime(res["creationTime"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("last_modified_time", flattenBigQueryRoutineLastModifiedTime(res["lastModifiedTime"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("language", flattenBigQueryRoutineLanguage(res["language"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("arguments", flattenBigQueryRoutineArguments(res["arguments"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("return_type", flattenBigQueryRoutineReturnType(res["returnType"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("imported_libraries", flattenBigQueryRoutineImportedLibraries(res["importedLibraries"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("definition_body", flattenBigQueryRoutineDefinitionBody(res["definitionBody"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("description", flattenBigQueryRoutineDescription(res["description"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - if err := d.Set("determinism_level", flattenBigQueryRoutineDeterminismLevel(res["determinismLevel"], d, config)); err != nil { - return resource_bigquery_routine_fmt.Errorf("Error reading Routine: %s", err) - } - - return nil -} - -func resourceBigQueryRoutineUpdate(d *resource_bigquery_routine_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_routine_fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - routineReferenceProp, err := expandBigQueryRoutineRoutineReference(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(routineReferenceProp)) { - obj["routineReference"] = routineReferenceProp - } - routineTypeProp, err := expandBigQueryRoutineRoutineType(d.Get("routine_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("routine_type"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, routineTypeProp)) { - obj["routineType"] = routineTypeProp - } - languageProp, err := expandBigQueryRoutineLanguage(d.Get("language"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, languageProp)) { - obj["language"] = languageProp - } - argumentsProp, err := expandBigQueryRoutineArguments(d.Get("arguments"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("arguments"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, argumentsProp)) { - obj["arguments"] = argumentsProp - } - returnTypeProp, err := expandBigQueryRoutineReturnType(d.Get("return_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("return_type"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, returnTypeProp)) { - obj["returnType"] = returnTypeProp - } - importedLibrariesProp, err := expandBigQueryRoutineImportedLibraries(d.Get("imported_libraries"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("imported_libraries"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, importedLibrariesProp)) { - obj["importedLibraries"] = importedLibrariesProp - } - definitionBodyProp, err := expandBigQueryRoutineDefinitionBody(d.Get("definition_body"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("definition_body"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, definitionBodyProp)) { - obj["definitionBody"] = definitionBodyProp - } - descriptionProp, err := expandBigQueryRoutineDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - determinismLevelProp, err := expandBigQueryRoutineDeterminismLevel(d.Get("determinism_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("determinism_level"); !isEmptyValue(resource_bigquery_routine_reflect.ValueOf(v)) && (ok || !resource_bigquery_routine_reflect.DeepEqual(v, determinismLevelProp)) { - obj["determinismLevel"] = determinismLevelProp - } - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return err - } - - resource_bigquery_routine_log.Printf("[DEBUG] Updating Routine %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_routine_schema.TimeoutUpdate)) - - if err != nil { - return resource_bigquery_routine_fmt.Errorf("Error updating Routine %q: %s", d.Id(), err) - } else { - resource_bigquery_routine_log.Printf("[DEBUG] Finished updating Routine %q: %#v", d.Id(), res) - } - - return resourceBigQueryRoutineRead(d, meta) -} - -func resourceBigQueryRoutineDelete(d *resource_bigquery_routine_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigquery_routine_fmt.Errorf("Error fetching project for Routine: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigQueryBasePath}}projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_bigquery_routine_log.Printf("[DEBUG] Deleting Routine %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_bigquery_routine_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Routine") - } - - resource_bigquery_routine_log.Printf("[DEBUG] Finished deleting Routine %q: %#v", d.Id(), res) - return nil -} - -func resourceBigQueryRoutineImport(d *resource_bigquery_routine_schema.ResourceData, meta interface{}) ([]*resource_bigquery_routine_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/datasets/(?P[^/]+)/routines/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}") - if err != nil { - return nil, resource_bigquery_routine_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_bigquery_routine_schema.ResourceData{d}, nil -} - -func flattenBigQueryRoutineRoutineReference(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_id"] = - flattenBigQueryRoutineRoutineReferenceDatasetId(original["datasetId"], d, config) - transformed["routine_id"] = - flattenBigQueryRoutineRoutineReferenceRoutineId(original["routineId"], d, config) - return []interface{}{transformed} -} - -func flattenBigQueryRoutineRoutineReferenceDatasetId(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineRoutineReferenceRoutineId(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineRoutineType(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineCreationTime(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_routine_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryRoutineLastModifiedTime(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_bigquery_routine_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBigQueryRoutineLanguage(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArguments(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenBigQueryRoutineArgumentsName(original["name"], d, config), - "argument_kind": flattenBigQueryRoutineArgumentsArgumentKind(original["argumentKind"], d, config), - "mode": flattenBigQueryRoutineArgumentsMode(original["mode"], d, config), - "data_type": flattenBigQueryRoutineArgumentsDataType(original["dataType"], d, config), - }) - } - return transformed -} - -func flattenBigQueryRoutineArgumentsName(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArgumentsArgumentKind(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArgumentsMode(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineArgumentsDataType(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := resource_bigquery_routine_json.Marshal(v) - if err != nil { - - resource_bigquery_routine_log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenBigQueryRoutineReturnType(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := resource_bigquery_routine_json.Marshal(v) - if err != nil { - - resource_bigquery_routine_log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenBigQueryRoutineImportedLibraries(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineDefinitionBody(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineDescription(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigQueryRoutineDeterminismLevel(v interface{}, d *resource_bigquery_routine_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigQueryRoutineRoutineReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - transformed := make(map[string]interface{}) - transformed["datasetId"] = d.Get("dataset_id") - project, _ := getProject(d, config) - transformed["projectId"] = project - transformed["routineId"] = d.Get("routine_id") - - return transformed, nil -} - -func expandBigQueryRoutineRoutineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineLanguage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArguments(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandBigQueryRoutineArgumentsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_routine_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedArgumentKind, err := expandBigQueryRoutineArgumentsArgumentKind(original["argument_kind"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_routine_reflect.ValueOf(transformedArgumentKind); val.IsValid() && !isEmptyValue(val) { - transformed["argumentKind"] = transformedArgumentKind - } - - transformedMode, err := expandBigQueryRoutineArgumentsMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_routine_reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - transformedDataType, err := expandBigQueryRoutineArgumentsDataType(original["data_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigquery_routine_reflect.ValueOf(transformedDataType); val.IsValid() && !isEmptyValue(val) { - transformed["dataType"] = transformedDataType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBigQueryRoutineArgumentsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArgumentsArgumentKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArgumentsMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineArgumentsDataType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := resource_bigquery_routine_json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandBigQueryRoutineReturnType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := resource_bigquery_routine_json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandBigQueryRoutineImportedLibraries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineDefinitionBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigQueryRoutineDeterminismLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func bigQueryTableSortArrayByName(array []interface{}) { - resource_bigquery_table_sort.Slice(array, func(i, k int) bool { - return array[i].(map[string]interface{})["name"].(string) < array[k].(map[string]interface{})["name"].(string) - }) -} - -func bigQueryArrayToMapIndexedByName(array []interface{}) map[string]interface{} { - out := map[string]interface{}{} - for _, v := range array { - name := v.(map[string]interface{})["name"].(string) - out[name] = v - } - return out -} - -func bigQueryTablecheckNameExists(jsonList []interface{}) error { - for _, m := range jsonList { - if _, ok := m.(map[string]interface{})["name"]; !ok { - return resource_bigquery_table_fmt.Errorf("No name in schema %+v", m) - } - } - - return nil -} - -func jsonCompareWithMapKeyOverride(key string, a, b interface{}, compareMapKeyVal func(key string, val1, val2 map[string]interface{}) bool) (bool, error) { - switch a.(type) { - case []interface{}: - arrayA := a.([]interface{}) - arrayB, ok := b.([]interface{}) - if !ok { - return false, nil - } else if len(arrayA) != len(arrayB) { - return false, nil - } - - if key == "schema" || key == "fields" { - if err := bigQueryTablecheckNameExists(arrayA); err != nil { - return false, err - } - bigQueryTableSortArrayByName(arrayA) - if err := bigQueryTablecheckNameExists(arrayB); err != nil { - return false, err - } - bigQueryTableSortArrayByName(arrayB) - } - for i := range arrayA { - eq, err := jsonCompareWithMapKeyOverride(resource_bigquery_table_strconv.Itoa(i), arrayA[i], arrayB[i], compareMapKeyVal) - if err != nil { - return false, err - } else if !eq { - return false, nil - } - } - return true, nil - case map[string]interface{}: - objectA := a.(map[string]interface{}) - objectB, ok := b.(map[string]interface{}) - if !ok { - return false, nil - } - - var unionOfKeys map[string]bool = make(map[string]bool) - for subKey := range objectA { - unionOfKeys[subKey] = true - } - for subKey := range objectB { - unionOfKeys[subKey] = true - } - - for subKey := range unionOfKeys { - eq := compareMapKeyVal(subKey, objectA, objectB) - if !eq { - valA, ok1 := objectA[subKey] - valB, ok2 := objectB[subKey] - if !ok1 || !ok2 { - return false, nil - } - eq, err := jsonCompareWithMapKeyOverride(subKey, valA, valB, compareMapKeyVal) - if err != nil || !eq { - return false, err - } - } - } - return true, nil - case string, float64, bool, nil: - return a == b, nil - default: - resource_bigquery_table_log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") - return false, resource_bigquery_table_errors.New("unable to compare values") - } -} - -func valueIsInArray(value interface{}, array []interface{}) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -func bigQueryTableMapKeyOverride(key string, objectA, objectB map[string]interface{}) bool { - - valA := objectA[key] - valB := objectB[key] - switch key { - case "mode": - eq := bigQueryTableNormalizeMode(valA) == bigQueryTableNormalizeMode(valB) - return eq - case "description": - equivalentSet := []interface{}{nil, ""} - eq := valueIsInArray(valA, equivalentSet) && valueIsInArray(valB, equivalentSet) - return eq - case "type": - if valA == nil || valB == nil { - return false - } - return bigQueryTableTypeEq(valA.(string), valB.(string)) - } - - return false -} - -func bigQueryTableSchemaDiffSuppress(name, old, new string, _ *resource_bigquery_table_schema.ResourceData) bool { - - if old == "null" { - old = "[]" - } - var a, b interface{} - if err := resource_bigquery_table_json.Unmarshal([]byte(old), &a); err != nil { - resource_bigquery_table_log.Printf("[DEBUG] unable to unmarshal old json - %v", err) - } - if err := resource_bigquery_table_json.Unmarshal([]byte(new), &b); err != nil { - resource_bigquery_table_log.Printf("[DEBUG] unable to unmarshal new json - %v", err) - } - - eq, err := jsonCompareWithMapKeyOverride(name, a, b, bigQueryTableMapKeyOverride) - if err != nil { - resource_bigquery_table_log.Printf("[DEBUG] %v", err) - resource_bigquery_table_log.Printf("[DEBUG] Error comparing JSON: %v, %v", old, new) - } - - return eq -} - -func bigQueryTableTypeEq(old, new string) bool { - - oldUpper := resource_bigquery_table_strings.ToUpper(old) - newUpper := resource_bigquery_table_strings.ToUpper(new) - - equivalentSet1 := []interface{}{"INTEGER", "INT64"} - equivalentSet2 := []interface{}{"FLOAT", "FLOAT64"} - equivalentSet3 := []interface{}{"BOOLEAN", "BOOL"} - eq0 := oldUpper == newUpper - eq1 := valueIsInArray(oldUpper, equivalentSet1) && valueIsInArray(newUpper, equivalentSet1) - eq2 := valueIsInArray(oldUpper, equivalentSet2) && valueIsInArray(newUpper, equivalentSet2) - eq3 := valueIsInArray(oldUpper, equivalentSet3) && valueIsInArray(newUpper, equivalentSet3) - eq := eq0 || eq1 || eq2 || eq3 - return eq -} - -func bigQueryTableNormalizeMode(mode interface{}) string { - if mode == nil { - return "NULLABLE" - } - - return resource_bigquery_table_strings.ToUpper(mode.(string)) -} - -func bigQueryTableModeIsForceNew(old, new string) bool { - eq := old == new - reqToNull := old == "REQUIRED" && new == "NULLABLE" - return !eq && !reqToNull -} - -func resourceBigQueryTableSchemaIsChangeable(old, new interface{}) (bool, error) { - switch old.(type) { - case []interface{}: - arrayOld := old.([]interface{}) - arrayNew, ok := new.([]interface{}) - if !ok { - - return false, nil - } - if len(arrayOld) > len(arrayNew) { - - return false, nil - } - if err := bigQueryTablecheckNameExists(arrayOld); err != nil { - return false, err - } - mapOld := bigQueryArrayToMapIndexedByName(arrayOld) - if err := bigQueryTablecheckNameExists(arrayNew); err != nil { - return false, err - } - mapNew := bigQueryArrayToMapIndexedByName(arrayNew) - for key := range mapNew { - - if _, ok := mapOld[key]; !ok { - items := mapNew[key].(map[string]interface{}) - for k := range items { - if k == "mode" && resource_bigquery_table_fmt.Sprintf("%v", items[k]) == "REQUIRED" { - return false, nil - } - } - } - } - for key := range mapOld { - - if _, ok := mapNew[key]; !ok { - return false, nil - } - if isChangable, err := - resourceBigQueryTableSchemaIsChangeable(mapOld[key], mapNew[key]); err != nil || !isChangable { - return false, err - } - } - return true, nil - case map[string]interface{}: - objectOld := old.(map[string]interface{}) - objectNew, ok := new.(map[string]interface{}) - if !ok { - - return false, nil - } - var unionOfKeys map[string]bool = make(map[string]bool) - for key := range objectOld { - unionOfKeys[key] = true - } - for key := range objectNew { - unionOfKeys[key] = true - } - for key := range unionOfKeys { - valOld := objectOld[key] - valNew := objectNew[key] - switch key { - case "name": - if valOld != valNew { - return false, nil - } - case "type": - if valOld == nil || valNew == nil { - - return true, nil - } - if !bigQueryTableTypeEq(valOld.(string), valNew.(string)) { - return false, nil - } - case "mode": - if bigQueryTableModeIsForceNew( - bigQueryTableNormalizeMode(valOld), - bigQueryTableNormalizeMode(valNew), - ) { - return false, nil - } - case "fields": - return resourceBigQueryTableSchemaIsChangeable(valOld, valNew) - - } - } - return true, nil - case string, float64, bool, nil: - - resource_bigquery_table_log.Printf("[DEBUG] comparison of generics hit... not expected") - return old == new, nil - default: - resource_bigquery_table_log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") - return false, resource_bigquery_table_errors.New("unable to compare values") - } -} - -func resourceBigQueryTableSchemaCustomizeDiffFunc(d TerraformResourceDiff) error { - if _, hasSchema := d.GetOk("schema"); hasSchema { - oldSchema, newSchema := d.GetChange("schema") - oldSchemaText := oldSchema.(string) - newSchemaText := newSchema.(string) - if oldSchemaText == "null" { - - oldSchemaText = "[]" - } - if newSchemaText == "null" { - newSchemaText = "[]" - } - var old, new interface{} - if err := resource_bigquery_table_json.Unmarshal([]byte(oldSchemaText), &old); err != nil { - - resource_bigquery_table_log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) - } - if err := resource_bigquery_table_json.Unmarshal([]byte(newSchemaText), &new); err != nil { - - resource_bigquery_table_log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) - } - isChangeable, err := resourceBigQueryTableSchemaIsChangeable(old, new) - if err != nil { - return err - } - if !isChangeable { - if err := d.ForceNew("schema"); err != nil { - return err - } - } - return nil - } - return nil -} - -func resourceBigQueryTableSchemaCustomizeDiff(_ resource_bigquery_table_context.Context, d *resource_bigquery_table_schema.ResourceDiff, meta interface{}) error { - return resourceBigQueryTableSchemaCustomizeDiffFunc(d) -} - -func resourceBigQueryTable() *resource_bigquery_table_schema.Resource { - return &resource_bigquery_table_schema.Resource{ - Create: resourceBigQueryTableCreate, - Read: resourceBigQueryTableRead, - Delete: resourceBigQueryTableDelete, - Update: resourceBigQueryTableUpdate, - Importer: &resource_bigquery_table_schema.ResourceImporter{ - State: resourceBigQueryTableImport, - }, - CustomizeDiff: resource_bigquery_table_customdiff.All( - resourceBigQueryTableSchemaCustomizeDiff, - ), - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "table_id": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique ID for the resource. Changing this forces a new resource to be created.`, - }, - - "dataset_id": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The dataset ID to create the table in. Changing this forces a new resource to be created.`, - }, - - "project": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs.`, - }, - - "description": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Description: `The field description.`, - }, - - "expiration_time": { - Type: resource_bigquery_table_schema.TypeInt, - Optional: true, - Computed: true, - Description: `The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.`, - }, - - "external_data_configuration": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "autodetect": { - Type: resource_bigquery_table_schema.TypeBool, - Required: true, - Description: `Let BigQuery try to autodetect the schema and format of the table.`, - }, - - "source_format": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - Description: `The data format. Supported values are: "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "PARQUET", "ORC" and "DATASTORE_BACKUP". To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, - ValidateFunc: resource_bigquery_table_validation.StringInSlice([]string{ - "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", - }, false), - }, - - "source_uris": { - Type: resource_bigquery_table_schema.TypeList, - Required: true, - Description: `A list of the fully-qualified URIs that point to your data in Google Cloud.`, - Elem: &resource_bigquery_table_schema.Schema{Type: resource_bigquery_table_schema.TypeString}, - }, - - "compression": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_table_validation.StringInSlice([]string{"NONE", "GZIP"}, false), - Default: "NONE", - Description: `The compression type of the data source. Valid values are "NONE" or "GZIP".`, - }, - - "schema": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: resource_bigquery_table_validation.StringIsJSON, - StateFunc: func(v interface{}) string { - resource_bigquery_table_json, _ := resource_bigquery_table_structure.NormalizeJsonString(v) - return resource_bigquery_table_json - }, - Description: `A JSON schema for the external table. Schema is required for CSV and JSON formats and is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats when using external tables.`, - }, - - "csv_options": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional properties to set if source_format is set to "CSV".`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "quote": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - Description: `The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in Terraform escaped as \". Due to limitations with Terraform default values, this value is required to be explicitly set.`, - }, - - "allow_jagged_rows": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Default: false, - Description: `Indicates if BigQuery should accept rows that are missing trailing optional columns.`, - }, - - "allow_quoted_newlines": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Default: false, - Description: `Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.`, - }, - - "encoding": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - ValidateFunc: resource_bigquery_table_validation.StringInSlice([]string{"ISO-8859-1", "UTF-8"}, false), - Default: "UTF-8", - Description: `The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.`, - }, - - "field_delimiter": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Default: ",", - Description: `The separator for fields in a CSV file.`, - }, - - "skip_leading_rows": { - Type: resource_bigquery_table_schema.TypeInt, - Optional: true, - Default: 0, - Description: `The number of rows at the top of a CSV file that BigQuery will skip when reading the data.`, - }, - }, - }, - }, - - "google_sheets_options": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Additional options if source_format is set to "GOOGLE_SHEETS".`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "range": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Description: `Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20"`, - AtLeastOneOf: []string{ - "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", - "external_data_configuration.0.google_sheets_options.0.range", - }, - }, - - "skip_leading_rows": { - Type: resource_bigquery_table_schema.TypeInt, - Optional: true, - Description: `The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.`, - AtLeastOneOf: []string{ - "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", - "external_data_configuration.0.google_sheets_options.0.range", - }, - }, - }, - }, - }, - - "hive_partitioning_options": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "mode": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Description: `When set, what mode of hive partitioning to use when reading data.`, - }, - - "require_partition_filter": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - }, - - "source_uri_prefix": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Description: `When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins.`, - }, - }, - }, - }, - - "ignore_unknown_values": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Description: `Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.`, - }, - - "max_bad_records": { - Type: resource_bigquery_table_schema.TypeInt, - Optional: true, - Description: `The maximum number of bad records that BigQuery can ignore when reading data.`, - }, - }, - }, - }, - - "friendly_name": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Description: `A descriptive name for the table.`, - }, - - "labels": { - Type: resource_bigquery_table_schema.TypeMap, - Optional: true, - Elem: &resource_bigquery_table_schema.Schema{Type: resource_bigquery_table_schema.TypeString}, - Description: `A mapping of labels to assign to the resource.`, - }, - - "schema": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resource_bigquery_table_validation.StringIsJSON, - StateFunc: func(v interface{}) string { - resource_bigquery_table_json, _ := resource_bigquery_table_structure.NormalizeJsonString(v) - return resource_bigquery_table_json - }, - DiffSuppressFunc: bigQueryTableSchemaDiffSuppress, - Description: `A JSON schema for the table.`, - }, - - "view": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures this table as a view.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "query": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - Description: `A query that BigQuery executes when the view is referenced.`, - }, - - "use_legacy_sql": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Default: true, - Description: `Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL`, - }, - }, - }, - }, - - "materialized_view": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures this table as a materialized view.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "enable_refresh": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Default: true, - Description: `Specifies if BigQuery should automatically refresh materialized view when the base table is updated. The default is true.`, - }, - - "refresh_interval_ms": { - Type: resource_bigquery_table_schema.TypeInt, - Default: 1800000, - Optional: true, - Description: `Specifies maximum frequency at which this materialized view will be refreshed. The default is 1800000`, - }, - - "query": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A query whose result is persisted.`, - }, - }, - }, - }, - - "time_partitioning": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures time-based partitioning for this table.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "expiration_ms": { - Type: resource_bigquery_table_schema.TypeInt, - Optional: true, - Computed: true, - Description: `Number of milliseconds for which to keep the storage for a partition.`, - }, - - "type": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - Description: `The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.`, - ValidateFunc: resource_bigquery_table_validation.StringInSlice([]string{"DAY", "HOUR", "MONTH", "YEAR"}, false), - }, - - "field": { - Type: resource_bigquery_table_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.`, - }, - - "require_partition_filter": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, - }, - }, - }, - }, - - "range_partitioning": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `If specified, configures range-based partitioning for this table.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "field": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The field used to determine how to create a range-based partition.`, - }, - - "range": { - Type: resource_bigquery_table_schema.TypeList, - Required: true, - MaxItems: 1, - Description: `Information required to partition based on ranges. Structure is documented below.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - - "start": { - Type: resource_bigquery_table_schema.TypeInt, - Required: true, - Description: `Start of the range partitioning, inclusive.`, - }, - - "end": { - Type: resource_bigquery_table_schema.TypeInt, - Required: true, - Description: `End of the range partitioning, exclusive.`, - }, - - "interval": { - Type: resource_bigquery_table_schema.TypeInt, - Required: true, - Description: `The width of each range within the partition.`, - }, - }, - }, - }, - }, - }, - }, - - "clustering": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - MaxItems: 4, - Description: `Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.`, - Elem: &resource_bigquery_table_schema.Schema{Type: resource_bigquery_table_schema.TypeString}, - }, - "encryption_configuration": { - Type: resource_bigquery_table_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user.`, - Elem: &resource_bigquery_table_schema.Resource{ - Schema: map[string]*resource_bigquery_table_schema.Schema{ - "kms_key_name": { - Type: resource_bigquery_table_schema.TypeString, - Required: true, - Description: `The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the google_bigquery_default_service_account datasource and the google_kms_crypto_key_iam_binding resource.`, - }, - "kms_key_version": { - Type: resource_bigquery_table_schema.TypeString, - Computed: true, - Description: `The self link or full name of the kms key version used to encrypt this table.`, - }, - }, - }, - }, - - "creation_time": { - Type: resource_bigquery_table_schema.TypeInt, - Computed: true, - Description: `The time when this table was created, in milliseconds since the epoch.`, - }, - - "etag": { - Type: resource_bigquery_table_schema.TypeString, - Computed: true, - Description: `A hash of the resource.`, - }, - - "last_modified_time": { - Type: resource_bigquery_table_schema.TypeInt, - Computed: true, - Description: `The time when this table was last modified, in milliseconds since the epoch.`, - }, - - "location": { - Type: resource_bigquery_table_schema.TypeString, - Computed: true, - Description: `The geographic location where the table resides. This value is inherited from the dataset.`, - }, - - "num_bytes": { - Type: resource_bigquery_table_schema.TypeInt, - Computed: true, - Description: `The geographic location where the table resides. This value is inherited from the dataset.`, - }, - - "num_long_term_bytes": { - Type: resource_bigquery_table_schema.TypeInt, - Computed: true, - Description: `The number of bytes in the table that are considered "long-term storage".`, - }, - - "num_rows": { - Type: resource_bigquery_table_schema.TypeInt, - Computed: true, - Description: `The number of rows of data in this table, excluding any data in the streaming buffer.`, - }, - - "self_link": { - Type: resource_bigquery_table_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - "type": { - Type: resource_bigquery_table_schema.TypeString, - Computed: true, - Description: `Describes the table type.`, - }, - - "deletion_protection": { - Type: resource_bigquery_table_schema.TypeBool, - Optional: true, - Default: true, - Description: `Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTable(d *resource_bigquery_table_schema.ResourceData, meta interface{}) (*resource_bigquery_table_bigquery.Table, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - table := &resource_bigquery_table_bigquery.Table{ - TableReference: &resource_bigquery_table_bigquery.TableReference{ - DatasetId: d.Get("dataset_id").(string), - TableId: d.Get("table_id").(string), - ProjectId: project, - }, - } - - if v, ok := d.GetOk("view"); ok { - table.View = expandView(v) - } - - if v, ok := d.GetOk("materialized_view"); ok { - table.MaterializedView = expandMaterializedView(v) - } - - if v, ok := d.GetOk("description"); ok { - table.Description = v.(string) - } - - if v, ok := d.GetOk("expiration_time"); ok { - table.ExpirationTime = int64(v.(int)) - } - - if v, ok := d.GetOk("external_data_configuration"); ok { - externalDataConfiguration, err := expandExternalDataConfiguration(v) - if err != nil { - return nil, err - } - - table.ExternalDataConfiguration = externalDataConfiguration - } - - if v, ok := d.GetOk("friendly_name"); ok { - table.FriendlyName = v.(string) - } - - if v, ok := d.GetOk("encryption_configuration.0.kms_key_name"); ok { - table.EncryptionConfiguration = &resource_bigquery_table_bigquery.EncryptionConfiguration{ - KmsKeyName: v.(string), - } - } - - if v, ok := d.GetOk("labels"); ok { - labels := map[string]string{} - - for k, v := range v.(map[string]interface{}) { - labels[k] = v.(string) - } - - table.Labels = labels - } - - if v, ok := d.GetOk("schema"); ok { - resource_bigquery_table_schema, err := expandSchema(v) - if err != nil { - return nil, err - } - table.Schema = resource_bigquery_table_schema - } - - if v, ok := d.GetOk("time_partitioning"); ok { - table.TimePartitioning = expandTimePartitioning(v) - } - - if v, ok := d.GetOk("range_partitioning"); ok { - rangePartitioning, err := expandRangePartitioning(v) - if err != nil { - return nil, err - } - - table.RangePartitioning = rangePartitioning - } - - if v, ok := d.GetOk("clustering"); ok { - table.Clustering = &resource_bigquery_table_bigquery.Clustering{ - Fields: convertStringArr(v.([]interface{})), - ForceSendFields: []string{"Fields"}, - } - } - - return table, nil -} - -func resourceBigQueryTableCreate(d *resource_bigquery_table_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - - resource_bigquery_table_log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) - - res, err := config.NewBigQueryClient(userAgent).Tables.Insert(project, datasetID, table).Do() - if err != nil { - return err - } - - resource_bigquery_table_log.Printf("[INFO] BigQuery table %s has been created", res.Id) - d.SetId(resource_bigquery_table_fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableRead(d *resource_bigquery_table_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - resource_bigquery_table_log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) - - project, err := getProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - res, err := config.NewBigQueryClient(userAgent).Tables.Get(project, datasetID, tableID).Do() - if err != nil { - return handleNotFoundError(err, d, resource_bigquery_table_fmt.Sprintf("BigQuery table %q", tableID)) - } - - if err := d.Set("project", project); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("description", res.Description); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("expiration_time", res.ExpirationTime); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting expiration_time: %s", err) - } - if err := d.Set("friendly_name", res.FriendlyName); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting friendly_name: %s", err) - } - if err := d.Set("labels", res.Labels); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("creation_time", res.CreationTime); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting creation_time: %s", err) - } - if err := d.Set("etag", res.Etag); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("last_modified_time", res.LastModifiedTime); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting last_modified_time: %s", err) - } - if err := d.Set("location", res.Location); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("num_bytes", res.NumBytes); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting num_bytes: %s", err) - } - if err := d.Set("table_id", res.TableReference.TableId); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting table_id: %s", err) - } - if err := d.Set("dataset_id", res.TableReference.DatasetId); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting dataset_id: %s", err) - } - if err := d.Set("num_long_term_bytes", res.NumLongTermBytes); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting num_long_term_bytes: %s", err) - } - if err := d.Set("num_rows", res.NumRows); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting num_rows: %s", err) - } - if err := d.Set("self_link", res.SelfLink); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("type", res.Type); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting type: %s", err) - } - - if res.ExternalDataConfiguration != nil { - externalDataConfiguration, err := flattenExternalDataConfiguration(res.ExternalDataConfiguration) - if err != nil { - return err - } - - if v, ok := d.GetOk("external_data_configuration"); ok { - - edc := v.([]interface{})[0].(map[string]interface{}) - if edc["schema"] != nil { - externalDataConfiguration[0]["schema"] = edc["schema"] - } - } - - if err := d.Set("external_data_configuration", externalDataConfiguration); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting external_data_configuration: %s", err) - } - } - - if res.TimePartitioning != nil { - if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil { - return err - } - } - - if res.RangePartitioning != nil { - if err := d.Set("range_partitioning", flattenRangePartitioning(res.RangePartitioning)); err != nil { - return err - } - } - - if res.Clustering != nil { - if err := d.Set("clustering", res.Clustering.Fields); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting clustering: %s", err) - } - } - if res.EncryptionConfiguration != nil { - if err := d.Set("encryption_configuration", flattenEncryptionConfiguration(res.EncryptionConfiguration)); err != nil { - return err - } - } - - if res.Schema != nil { - resource_bigquery_table_schema, err := flattenSchema(res.Schema) - if err != nil { - return err - } - if err := d.Set("schema", resource_bigquery_table_schema); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting schema: %s", err) - } - } - - if res.View != nil { - view := flattenView(res.View) - if err := d.Set("view", view); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting view: %s", err) - } - } - - if res.MaterializedView != nil { - materialized_view := flattenMaterializedView(res.MaterializedView) - - if err := d.Set("materialized_view", materialized_view); err != nil { - return resource_bigquery_table_fmt.Errorf("Error setting materialized view: %s", err) - } - } - - return nil -} - -func resourceBigQueryTableUpdate(d *resource_bigquery_table_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - table, err := resourceTable(d, meta) - if err != nil { - return err - } - - resource_bigquery_table_log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) - - project, err := getProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, tableID, table).Do(); err != nil { - return err - } - - return resourceBigQueryTableRead(d, meta) -} - -func resourceBigQueryTableDelete(d *resource_bigquery_table_schema.ResourceData, meta interface{}) error { - if d.Get("deletion_protection").(bool) { - return resource_bigquery_table_fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") - } - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - resource_bigquery_table_log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) - - project, err := getProject(d, config) - if err != nil { - return err - } - - datasetID := d.Get("dataset_id").(string) - tableID := d.Get("table_id").(string) - - if err := config.NewBigQueryClient(userAgent).Tables.Delete(project, datasetID, tableID).Do(); err != nil { - return err - } - - d.SetId("") - - return nil -} - -func expandExternalDataConfiguration(cfg interface{}) (*resource_bigquery_table_bigquery.ExternalDataConfiguration, error) { - raw := cfg.([]interface{})[0].(map[string]interface{}) - - edc := &resource_bigquery_table_bigquery.ExternalDataConfiguration{ - Autodetect: raw["autodetect"].(bool), - } - - sourceUris := []string{} - for _, rawSourceUri := range raw["source_uris"].([]interface{}) { - sourceUris = append(sourceUris, rawSourceUri.(string)) - } - if len(sourceUris) > 0 { - edc.SourceUris = sourceUris - } - - if v, ok := raw["compression"]; ok { - edc.Compression = v.(string) - } - if v, ok := raw["csv_options"]; ok { - edc.CsvOptions = expandCsvOptions(v) - } - if v, ok := raw["google_sheets_options"]; ok { - edc.GoogleSheetsOptions = expandGoogleSheetsOptions(v) - } - if v, ok := raw["hive_partitioning_options"]; ok { - edc.HivePartitioningOptions = expandHivePartitioningOptions(v) - } - if v, ok := raw["ignore_unknown_values"]; ok { - edc.IgnoreUnknownValues = v.(bool) - } - if v, ok := raw["max_bad_records"]; ok { - edc.MaxBadRecords = int64(v.(int)) - } - if v, ok := raw["schema"]; ok { - resource_bigquery_table_schema, err := expandSchema(v) - if err != nil { - return nil, err - } - edc.Schema = resource_bigquery_table_schema - } - if v, ok := raw["source_format"]; ok { - edc.SourceFormat = v.(string) - } - - return edc, nil - -} - -func flattenExternalDataConfiguration(edc *resource_bigquery_table_bigquery.ExternalDataConfiguration) ([]map[string]interface{}, error) { - result := map[string]interface{}{} - - result["autodetect"] = edc.Autodetect - result["source_uris"] = edc.SourceUris - - if edc.Compression != "" { - result["compression"] = edc.Compression - } - - if edc.CsvOptions != nil { - result["csv_options"] = flattenCsvOptions(edc.CsvOptions) - } - - if edc.GoogleSheetsOptions != nil { - result["google_sheets_options"] = flattenGoogleSheetsOptions(edc.GoogleSheetsOptions) - } - - if edc.HivePartitioningOptions != nil { - result["hive_partitioning_options"] = flattenHivePartitioningOptions(edc.HivePartitioningOptions) - } - - if edc.IgnoreUnknownValues { - result["ignore_unknown_values"] = edc.IgnoreUnknownValues - } - if edc.MaxBadRecords != 0 { - result["max_bad_records"] = edc.MaxBadRecords - } - - if edc.SourceFormat != "" { - result["source_format"] = edc.SourceFormat - } - - return []map[string]interface{}{result}, nil -} - -func expandCsvOptions(configured interface{}) *resource_bigquery_table_bigquery.CsvOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &resource_bigquery_table_bigquery.CsvOptions{} - - if v, ok := raw["allow_jagged_rows"]; ok { - opts.AllowJaggedRows = v.(bool) - } - - if v, ok := raw["allow_quoted_newlines"]; ok { - opts.AllowQuotedNewlines = v.(bool) - } - - if v, ok := raw["encoding"]; ok { - opts.Encoding = v.(string) - } - - if v, ok := raw["field_delimiter"]; ok { - opts.FieldDelimiter = v.(string) - } - - if v, ok := raw["skip_leading_rows"]; ok { - opts.SkipLeadingRows = int64(v.(int)) - } - - if v, ok := raw["quote"]; ok { - quote := v.(string) - opts.Quote = "e - } - - opts.ForceSendFields = []string{"Quote"} - - return opts -} - -func flattenCsvOptions(opts *resource_bigquery_table_bigquery.CsvOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.AllowJaggedRows { - result["allow_jagged_rows"] = opts.AllowJaggedRows - } - - if opts.AllowQuotedNewlines { - result["allow_quoted_newlines"] = opts.AllowQuotedNewlines - } - - if opts.Encoding != "" { - result["encoding"] = opts.Encoding - } - - if opts.FieldDelimiter != "" { - result["field_delimiter"] = opts.FieldDelimiter - } - - if opts.SkipLeadingRows != 0 { - result["skip_leading_rows"] = opts.SkipLeadingRows - } - - if opts.Quote != nil { - result["quote"] = *opts.Quote - } - - return []map[string]interface{}{result} -} - -func expandGoogleSheetsOptions(configured interface{}) *resource_bigquery_table_bigquery.GoogleSheetsOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &resource_bigquery_table_bigquery.GoogleSheetsOptions{} - - if v, ok := raw["range"]; ok { - opts.Range = v.(string) - } - - if v, ok := raw["skip_leading_rows"]; ok { - opts.SkipLeadingRows = int64(v.(int)) - } - return opts -} - -func flattenGoogleSheetsOptions(opts *resource_bigquery_table_bigquery.GoogleSheetsOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Range != "" { - result["range"] = opts.Range - } - - if opts.SkipLeadingRows != 0 { - result["skip_leading_rows"] = opts.SkipLeadingRows - } - - return []map[string]interface{}{result} -} - -func expandHivePartitioningOptions(configured interface{}) *resource_bigquery_table_bigquery.HivePartitioningOptions { - if len(configured.([]interface{})) == 0 { - return nil - } - - raw := configured.([]interface{})[0].(map[string]interface{}) - opts := &resource_bigquery_table_bigquery.HivePartitioningOptions{} - - if v, ok := raw["mode"]; ok { - opts.Mode = v.(string) - } - - if v, ok := raw["require_partition_filter"]; ok { - opts.RequirePartitionFilter = v.(bool) - } - - if v, ok := raw["source_uri_prefix"]; ok { - opts.SourceUriPrefix = v.(string) - } - - return opts -} - -func flattenHivePartitioningOptions(opts *resource_bigquery_table_bigquery.HivePartitioningOptions) []map[string]interface{} { - result := map[string]interface{}{} - - if opts.Mode != "" { - result["mode"] = opts.Mode - } - - if opts.RequirePartitionFilter { - result["require_partition_filter"] = opts.RequirePartitionFilter - } - - if opts.SourceUriPrefix != "" { - result["source_uri_prefix"] = opts.SourceUriPrefix - } - - return []map[string]interface{}{result} -} - -func expandSchema(raw interface{}) (*resource_bigquery_table_bigquery.TableSchema, error) { - var fields []*resource_bigquery_table_bigquery.TableFieldSchema - - if len(raw.(string)) == 0 { - return nil, nil - } - - if err := resource_bigquery_table_json.Unmarshal([]byte(raw.(string)), &fields); err != nil { - return nil, err - } - - return &resource_bigquery_table_bigquery.TableSchema{Fields: fields}, nil -} - -func flattenSchema(tableSchema *resource_bigquery_table_bigquery.TableSchema) (string, error) { - resource_bigquery_table_schema, err := resource_bigquery_table_json.Marshal(tableSchema.Fields) - if err != nil { - return "", err - } - - return string(resource_bigquery_table_schema), nil -} - -func expandTimePartitioning(configured interface{}) *resource_bigquery_table_bigquery.TimePartitioning { - raw := configured.([]interface{})[0].(map[string]interface{}) - tp := &resource_bigquery_table_bigquery.TimePartitioning{Type: raw["type"].(string)} - - if v, ok := raw["field"]; ok { - tp.Field = v.(string) - } - - if v, ok := raw["expiration_ms"]; ok { - tp.ExpirationMs = int64(v.(int)) - } - - if v, ok := raw["require_partition_filter"]; ok { - tp.RequirePartitionFilter = v.(bool) - } - - return tp -} - -func expandRangePartitioning(configured interface{}) (*resource_bigquery_table_bigquery.RangePartitioning, error) { - if configured == nil { - return nil, nil - } - - rpList := configured.([]interface{}) - if len(rpList) == 0 || rpList[0] == nil { - return nil, resource_bigquery_table_errors.New("Error casting range partitioning interface to expected structure") - } - - rangePartJson := rpList[0].(map[string]interface{}) - rp := &resource_bigquery_table_bigquery.RangePartitioning{ - Field: rangePartJson["field"].(string), - } - - if v, ok := rangePartJson["range"]; ok && v != nil { - rangeLs := v.([]interface{}) - if len(rangeLs) != 1 || rangeLs[0] == nil { - return nil, resource_bigquery_table_errors.New("Non-empty range must be given for range partitioning") - } - - rangeJson := rangeLs[0].(map[string]interface{}) - rp.Range = &resource_bigquery_table_bigquery.RangePartitioningRange{ - Start: int64(rangeJson["start"].(int)), - End: int64(rangeJson["end"].(int)), - Interval: int64(rangeJson["interval"].(int)), - ForceSendFields: []string{"Start"}, - } - } - - return rp, nil -} - -func flattenEncryptionConfiguration(ec *resource_bigquery_table_bigquery.EncryptionConfiguration) []map[string]interface{} { - re := resource_bigquery_table_regexp.MustCompile(`(projects/.*/locations/.*/keyRings/.*/cryptoKeys/.*)/cryptoKeyVersions/.*`) - paths := re.FindStringSubmatch(ec.KmsKeyName) - - if len(paths) > 0 { - return []map[string]interface{}{ - { - "kms_key_name": paths[0], - "kms_key_version": ec.KmsKeyName, - }, - } - } - - return []map[string]interface{}{{"kms_key_name": ec.KmsKeyName, "kms_key_version": ""}} -} - -func flattenTimePartitioning(tp *resource_bigquery_table_bigquery.TimePartitioning) []map[string]interface{} { - result := map[string]interface{}{"type": tp.Type} - - if tp.Field != "" { - result["field"] = tp.Field - } - - if tp.ExpirationMs != 0 { - result["expiration_ms"] = tp.ExpirationMs - } - - if tp.RequirePartitionFilter { - result["require_partition_filter"] = tp.RequirePartitionFilter - } - - return []map[string]interface{}{result} -} - -func flattenRangePartitioning(rp *resource_bigquery_table_bigquery.RangePartitioning) []map[string]interface{} { - result := map[string]interface{}{ - "field": rp.Field, - "range": []map[string]interface{}{ - { - "start": rp.Range.Start, - "end": rp.Range.End, - "interval": rp.Range.Interval, - }, - }, - } - - return []map[string]interface{}{result} -} - -func expandView(configured interface{}) *resource_bigquery_table_bigquery.ViewDefinition { - raw := configured.([]interface{})[0].(map[string]interface{}) - vd := &resource_bigquery_table_bigquery.ViewDefinition{Query: raw["query"].(string)} - - if v, ok := raw["use_legacy_sql"]; ok { - vd.UseLegacySql = v.(bool) - vd.ForceSendFields = append(vd.ForceSendFields, "UseLegacySql") - } - - return vd -} - -func flattenView(vd *resource_bigquery_table_bigquery.ViewDefinition) []map[string]interface{} { - result := map[string]interface{}{"query": vd.Query} - result["use_legacy_sql"] = vd.UseLegacySql - - return []map[string]interface{}{result} -} - -func expandMaterializedView(configured interface{}) *resource_bigquery_table_bigquery.MaterializedViewDefinition { - raw := configured.([]interface{})[0].(map[string]interface{}) - mvd := &resource_bigquery_table_bigquery.MaterializedViewDefinition{Query: raw["query"].(string)} - - if v, ok := raw["enable_refresh"]; ok { - mvd.EnableRefresh = v.(bool) - mvd.ForceSendFields = append(mvd.ForceSendFields, "EnableRefresh") - } - - if v, ok := raw["refresh_interval_ms"]; ok { - mvd.RefreshIntervalMs = int64(v.(int)) - mvd.ForceSendFields = append(mvd.ForceSendFields, "RefreshIntervalMs") - } - - return mvd -} - -func flattenMaterializedView(mvd *resource_bigquery_table_bigquery.MaterializedViewDefinition) []map[string]interface{} { - result := map[string]interface{}{"query": mvd.Query} - result["enable_refresh"] = mvd.EnableRefresh - result["refresh_interval_ms"] = mvd.RefreshIntervalMs - - return []map[string]interface{}{result} -} - -func resourceBigQueryTableImport(d *resource_bigquery_table_schema.ResourceData, meta interface{}) ([]*resource_bigquery_table_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - if err := d.Set("deletion_protection", true); err != nil { - return nil, resource_bigquery_table_fmt.Errorf("Error setting deletion_protection: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}") - if err != nil { - return nil, resource_bigquery_table_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_bigquery_table_schema.ResourceData{d}, nil -} - -func resourceBigtableAppProfile() *resource_bigtable_app_profile_schema.Resource { - return &resource_bigtable_app_profile_schema.Resource{ - Create: resourceBigtableAppProfileCreate, - Read: resourceBigtableAppProfileRead, - Update: resourceBigtableAppProfileUpdate, - Delete: resourceBigtableAppProfileDelete, - - Importer: &resource_bigtable_app_profile_schema.ResourceImporter{ - State: resourceBigtableAppProfileImport, - }, - - Timeouts: &resource_bigtable_app_profile_schema.ResourceTimeout{ - Create: resource_bigtable_app_profile_schema.DefaultTimeout(4 * resource_bigtable_app_profile_time.Minute), - Update: resource_bigtable_app_profile_schema.DefaultTimeout(4 * resource_bigtable_app_profile_time.Minute), - Delete: resource_bigtable_app_profile_schema.DefaultTimeout(4 * resource_bigtable_app_profile_time.Minute), - }, - - Schema: map[string]*resource_bigtable_app_profile_schema.Schema{ - "app_profile_id": { - Type: resource_bigtable_app_profile_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The unique name of the app profile in the form '[_a-zA-Z0-9][-_.a-zA-Z0-9]*'.`, - }, - "description": { - Type: resource_bigtable_app_profile_schema.TypeString, - Optional: true, - Description: `Long form description of the use case for this app profile.`, - }, - "ignore_warnings": { - Type: resource_bigtable_app_profile_schema.TypeBool, - Optional: true, - Description: `If true, ignore safety checks when deleting/updating the app profile.`, - Default: false, - }, - "instance": { - Type: resource_bigtable_app_profile_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The name of the instance to create the app profile within.`, - }, - "multi_cluster_routing_use_any": { - Type: resource_bigtable_app_profile_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available -in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes -consistency to improve availability.`, - ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, - }, - "single_cluster_routing": { - Type: resource_bigtable_app_profile_schema.TypeList, - Optional: true, - Description: `Use a single-cluster routing policy.`, - MaxItems: 1, - Elem: &resource_bigtable_app_profile_schema.Resource{ - Schema: map[string]*resource_bigtable_app_profile_schema.Schema{ - "cluster_id": { - Type: resource_bigtable_app_profile_schema.TypeString, - Required: true, - Description: `The cluster to which read/write requests should be routed.`, - }, - "allow_transactional_writes": { - Type: resource_bigtable_app_profile_schema.TypeBool, - Optional: true, - Description: `If true, CheckAndMutateRow and ReadModifyWriteRow requests are allowed by this app profile. -It is unsafe to send these requests to the same table/row/column in multiple clusters.`, - }, - }, - }, - ExactlyOneOf: []string{"single_cluster_routing", "multi_cluster_routing_use_any"}, - }, - "name": { - Type: resource_bigtable_app_profile_schema.TypeString, - Computed: true, - Description: `The unique name of the requested app profile. Values are of the form 'projects//instances//appProfiles/'.`, - }, - "project": { - Type: resource_bigtable_app_profile_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableAppProfileCreate(d *resource_bigtable_app_profile_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandBigtableAppProfileDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_bigtable_app_profile_reflect.ValueOf(descriptionProp)) && (ok || !resource_bigtable_app_profile_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - multiClusterRoutingUseAnyProp, err := expandBigtableAppProfileMultiClusterRoutingUseAny(d.Get("multi_cluster_routing_use_any"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("multi_cluster_routing_use_any"); !isEmptyValue(resource_bigtable_app_profile_reflect.ValueOf(multiClusterRoutingUseAnyProp)) && (ok || !resource_bigtable_app_profile_reflect.DeepEqual(v, multiClusterRoutingUseAnyProp)) { - obj["multiClusterRoutingUseAny"] = multiClusterRoutingUseAnyProp - } - singleClusterRoutingProp, err := expandBigtableAppProfileSingleClusterRouting(d.Get("single_cluster_routing"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("single_cluster_routing"); !isEmptyValue(resource_bigtable_app_profile_reflect.ValueOf(singleClusterRoutingProp)) && (ok || !resource_bigtable_app_profile_reflect.DeepEqual(v, singleClusterRoutingProp)) { - obj["singleClusterRouting"] = singleClusterRoutingProp - } - - obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles?appProfileId={{app_profile_id}}&ignoreWarnings={{ignore_warnings}}") - if err != nil { - return err - } - - resource_bigtable_app_profile_log.Printf("[DEBUG] Creating new AppProfile: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_bigtable_app_profile_schema.TimeoutCreate)) - if err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error creating AppProfile: %s", err) - } - if err := d.Set("name", flattenBigtableAppProfileName(res["name"], d, config)); err != nil { - return resource_bigtable_app_profile_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") - if err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_bigtable_app_profile_log.Printf("[DEBUG] Finished creating AppProfile %q: %#v", d.Id(), res) - - return resourceBigtableAppProfileRead(d, meta) -} - -func resourceBigtableAppProfileRead(d *resource_bigtable_app_profile_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_bigtable_app_profile_fmt.Sprintf("BigtableAppProfile %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error reading AppProfile: %s", err) - } - - if err := d.Set("name", flattenBigtableAppProfileName(res["name"], d, config)); err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error reading AppProfile: %s", err) - } - if err := d.Set("description", flattenBigtableAppProfileDescription(res["description"], d, config)); err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error reading AppProfile: %s", err) - } - if err := d.Set("multi_cluster_routing_use_any", flattenBigtableAppProfileMultiClusterRoutingUseAny(res["multiClusterRoutingUseAny"], d, config)); err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error reading AppProfile: %s", err) - } - if err := d.Set("single_cluster_routing", flattenBigtableAppProfileSingleClusterRouting(res["singleClusterRouting"], d, config)); err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error reading AppProfile: %s", err) - } - - return nil -} - -func resourceBigtableAppProfileUpdate(d *resource_bigtable_app_profile_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandBigtableAppProfileDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_bigtable_app_profile_reflect.ValueOf(v)) && (ok || !resource_bigtable_app_profile_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - singleClusterRoutingProp, err := expandBigtableAppProfileSingleClusterRouting(d.Get("single_cluster_routing"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("single_cluster_routing"); !isEmptyValue(resource_bigtable_app_profile_reflect.ValueOf(v)) && (ok || !resource_bigtable_app_profile_reflect.DeepEqual(v, singleClusterRoutingProp)) { - obj["singleClusterRouting"] = singleClusterRoutingProp - } - - obj, err = resourceBigtableAppProfileEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}?ignoreWarnings={{ignore_warnings}}") - if err != nil { - return err - } - - resource_bigtable_app_profile_log.Printf("[DEBUG] Updating AppProfile %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("single_cluster_routing") { - updateMask = append(updateMask, "singleClusterRouting") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_bigtable_app_profile_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_bigtable_app_profile_schema.TimeoutUpdate)) - - if err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error updating AppProfile %q: %s", d.Id(), err) - } else { - resource_bigtable_app_profile_log.Printf("[DEBUG] Finished updating AppProfile %q: %#v", d.Id(), res) - } - - return resourceBigtableAppProfileRead(d, meta) -} - -func resourceBigtableAppProfileDelete(d *resource_bigtable_app_profile_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_bigtable_app_profile_fmt.Errorf("Error fetching project for AppProfile: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BigtableBasePath}}projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}?ignoreWarnings={{ignore_warnings}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_bigtable_app_profile_log.Printf("[DEBUG] Deleting AppProfile %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_bigtable_app_profile_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AppProfile") - } - - resource_bigtable_app_profile_log.Printf("[DEBUG] Finished deleting AppProfile %q: %#v", d.Id(), res) - return nil -} - -func resourceBigtableAppProfileImport(d *resource_bigtable_app_profile_schema.ResourceData, meta interface{}) ([]*resource_bigtable_app_profile_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/appProfiles/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/appProfiles/{{app_profile_id}}") - if err != nil { - return nil, resource_bigtable_app_profile_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_bigtable_app_profile_schema.ResourceData{d}, nil -} - -func flattenBigtableAppProfileName(v interface{}, d *resource_bigtable_app_profile_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigtableAppProfileDescription(v interface{}, d *resource_bigtable_app_profile_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigtableAppProfileMultiClusterRoutingUseAny(v interface{}, d *resource_bigtable_app_profile_schema.ResourceData, config *Config) interface{} { - return v != nil -} - -func flattenBigtableAppProfileSingleClusterRouting(v interface{}, d *resource_bigtable_app_profile_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cluster_id"] = - flattenBigtableAppProfileSingleClusterRoutingClusterId(original["clusterId"], d, config) - transformed["allow_transactional_writes"] = - flattenBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(original["allowTransactionalWrites"], d, config) - return []interface{}{transformed} -} - -func flattenBigtableAppProfileSingleClusterRoutingClusterId(v interface{}, d *resource_bigtable_app_profile_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(v interface{}, d *resource_bigtable_app_profile_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBigtableAppProfileDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigtableAppProfileMultiClusterRoutingUseAny(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || !v.(bool) { - return nil, nil - } - - return resource_bigtable_app_profile_bigtableadmin.MultiClusterRoutingUseAny{}, nil -} - -func expandBigtableAppProfileSingleClusterRouting(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedClusterId, err := expandBigtableAppProfileSingleClusterRoutingClusterId(original["cluster_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigtable_app_profile_reflect.ValueOf(transformedClusterId); val.IsValid() && !isEmptyValue(val) { - transformed["clusterId"] = transformedClusterId - } - - transformedAllowTransactionalWrites, err := expandBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(original["allow_transactional_writes"], d, config) - if err != nil { - return nil, err - } else if val := resource_bigtable_app_profile_reflect.ValueOf(transformedAllowTransactionalWrites); val.IsValid() && !isEmptyValue(val) { - transformed["allowTransactionalWrites"] = transformedAllowTransactionalWrites - } - - return transformed, nil -} - -func expandBigtableAppProfileSingleClusterRoutingClusterId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBigtableAppProfileSingleClusterRoutingAllowTransactionalWrites(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceBigtableAppProfileEncoder(d *resource_bigtable_app_profile_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if err := d.Set("instance", GetResourceNameFromSelfLink(d.Get("instance").(string))); err != nil { - return nil, resource_bigtable_app_profile_fmt.Errorf("Error setting instance: %s", err) - } - return obj, nil -} - -const ( - GCPolicyModeIntersection = "INTERSECTION" - GCPolicyModeUnion = "UNION" -) - -func resourceBigtableGCPolicyCustomizeDiffFunc(diff TerraformResourceDiff) error { - count := diff.Get("max_age.#").(int) - if count < 1 { - return nil - } - - oldDays, newDays := diff.GetChange("max_age.0.days") - oldDuration, newDuration := diff.GetChange("max_age.0.duration") - resource_bigtable_gc_policy_log.Printf("days: %v %v", oldDays, newDays) - resource_bigtable_gc_policy_log.Printf("duration: %v %v", oldDuration, newDuration) - - if oldDuration == "" && newDuration != "" { - - do, err := resource_bigtable_gc_policy_time.ParseDuration(newDuration.(string)) - if err != nil { - return err - } - dn := resource_bigtable_gc_policy_time.Hour * 24 * resource_bigtable_gc_policy_time.Duration(oldDays.(int)) - if do == dn { - err := diff.Clear("max_age.0.days") - if err != nil { - return err - } - err = diff.Clear("max_age.0.duration") - if err != nil { - return err - } - } - } - - return nil -} - -func resourceBigtableGCPolicyCustomizeDiff(_ resource_bigtable_gc_policy_context.Context, d *resource_bigtable_gc_policy_schema.ResourceDiff, meta interface{}) error { - return resourceBigtableGCPolicyCustomizeDiffFunc(d) -} - -func resourceBigtableGCPolicy() *resource_bigtable_gc_policy_schema.Resource { - return &resource_bigtable_gc_policy_schema.Resource{ - Create: resourceBigtableGCPolicyCreate, - Read: resourceBigtableGCPolicyRead, - Delete: resourceBigtableGCPolicyDestroy, - CustomizeDiff: resourceBigtableGCPolicyCustomizeDiff, - - Schema: map[string]*resource_bigtable_gc_policy_schema.Schema{ - "instance_name": { - Type: resource_bigtable_gc_policy_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The name of the Bigtable instance.`, - }, - - "table": { - Type: resource_bigtable_gc_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the table.`, - }, - - "column_family": { - Type: resource_bigtable_gc_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the column family.`, - }, - - "mode": { - Type: resource_bigtable_gc_policy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `If multiple policies are set, you should choose between UNION OR INTERSECTION.`, - ValidateFunc: resource_bigtable_gc_policy_validation.StringInSlice([]string{GCPolicyModeIntersection, GCPolicyModeUnion}, false), - }, - - "max_age": { - Type: resource_bigtable_gc_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `GC policy that applies to all cells older than the given age.`, - MaxItems: 1, - Elem: &resource_bigtable_gc_policy_schema.Resource{ - Schema: map[string]*resource_bigtable_gc_policy_schema.Schema{ - "days": { - Type: resource_bigtable_gc_policy_schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: true, - Deprecated: "Deprecated in favor of duration", - Description: `Number of days before applying GC policy.`, - ExactlyOneOf: []string{"max_age.0.days", "max_age.0.duration"}, - }, - "duration": { - Type: resource_bigtable_gc_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Duration before applying GC policy`, - ValidateFunc: validateDuration(), - ExactlyOneOf: []string{"max_age.0.days", "max_age.0.duration"}, - }, - }, - }, - }, - - "max_version": { - Type: resource_bigtable_gc_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `GC policy that applies to all versions of a cell except for the most recent.`, - Elem: &resource_bigtable_gc_policy_schema.Resource{ - Schema: map[string]*resource_bigtable_gc_policy_schema.Schema{ - "number": { - Type: resource_bigtable_gc_policy_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Number of version before applying the GC policy.`, - }, - }, - }, - }, - - "project": { - Type: resource_bigtable_gc_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableGCPolicyCreate(d *resource_bigtable_gc_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - ctx := resource_bigtable_gc_policy_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return resource_bigtable_gc_policy_fmt.Errorf("Error starting admin client. %s", err) - } - if err := d.Set("instance_name", instanceName); err != nil { - return resource_bigtable_gc_policy_fmt.Errorf("Error setting instance_name: %s", err) - } - - defer c.Close() - - gcPolicy, err := generateBigtableGCPolicy(d) - if err != nil { - return err - } - - tableName := d.Get("table").(string) - columnFamily := d.Get("column_family").(string) - - err = retryTimeDuration(func() error { - reqErr := c.SetGCPolicy(ctx, tableName, columnFamily, gcPolicy) - return reqErr - }, d.Timeout(resource_bigtable_gc_policy_schema.TimeoutCreate), isBigTableRetryableError) - if err != nil { - return err - } - - table, err := c.TableInfo(ctx, tableName) - if err != nil { - return resource_bigtable_gc_policy_fmt.Errorf("Error retrieving table. Could not find %s in %s. %s", tableName, instanceName, err) - } - - for _, i := range table.FamilyInfos { - if i.Name == columnFamily { - d.SetId(i.GCPolicy) - } - } - - return resourceBigtableGCPolicyRead(d, meta) -} - -func resourceBigtableGCPolicyRead(d *resource_bigtable_gc_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - ctx := resource_bigtable_gc_policy_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return resource_bigtable_gc_policy_fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("table").(string) - ti, err := c.TableInfo(ctx, name) - if err != nil { - resource_bigtable_gc_policy_log.Printf("[WARN] Removing %s because it's gone", name) - d.SetId("") - return nil - } - - for _, fi := range ti.FamilyInfos { - if fi.Name == name { - d.SetId(fi.GCPolicy) - break - } - } - - if err := d.Set("project", project); err != nil { - return resource_bigtable_gc_policy_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceBigtableGCPolicyDestroy(d *resource_bigtable_gc_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - ctx := resource_bigtable_gc_policy_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return resource_bigtable_gc_policy_fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - err = retryTimeDuration(func() error { - reqErr := c.SetGCPolicy(ctx, d.Get("table").(string), d.Get("column_family").(string), resource_bigtable_gc_policy_bigtable.NoGcPolicy()) - return reqErr - }, d.Timeout(resource_bigtable_gc_policy_schema.TimeoutDelete), isBigTableRetryableError) - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -func generateBigtableGCPolicy(d *resource_bigtable_gc_policy_schema.ResourceData) (resource_bigtable_gc_policy_bigtable.GCPolicy, error) { - var policies []resource_bigtable_gc_policy_bigtable.GCPolicy - mode := d.Get("mode").(string) - ma, aok := d.GetOk("max_age") - mv, vok := d.GetOk("max_version") - - if !aok && !vok { - return resource_bigtable_gc_policy_bigtable.NoGcPolicy(), nil - } - - if mode == "" && aok && vok { - return nil, resource_bigtable_gc_policy_fmt.Errorf("If multiple policies are set, mode can't be empty") - } - - if aok { - l, _ := ma.([]interface{}) - d, err := getMaxAgeDuration(l[0].(map[string]interface{})) - if err != nil { - return nil, err - } - - policies = append(policies, resource_bigtable_gc_policy_bigtable.MaxAgePolicy(d)) - } - - if vok { - l, _ := mv.([]interface{}) - n, _ := l[0].(map[string]interface{})["number"].(int) - - policies = append(policies, resource_bigtable_gc_policy_bigtable.MaxVersionsPolicy(n)) - } - - switch mode { - case GCPolicyModeUnion: - return resource_bigtable_gc_policy_bigtable.UnionPolicy(policies...), nil - case GCPolicyModeIntersection: - return resource_bigtable_gc_policy_bigtable.IntersectionPolicy(policies...), nil - } - - return policies[0], nil -} - -func getMaxAgeDuration(values map[string]interface{}) (resource_bigtable_gc_policy_time.Duration, error) { - d := values["duration"].(string) - if d != "" { - return resource_bigtable_gc_policy_time.ParseDuration(d) - } - - days := values["days"].(int) - - return resource_bigtable_gc_policy_time.Hour * 24 * resource_bigtable_gc_policy_time.Duration(days), nil -} - -func resourceBigtableInstance() *resource_bigtable_instance_schema.Resource { - return &resource_bigtable_instance_schema.Resource{ - Create: resourceBigtableInstanceCreate, - Read: resourceBigtableInstanceRead, - Update: resourceBigtableInstanceUpdate, - Delete: resourceBigtableInstanceDestroy, - - Importer: &resource_bigtable_instance_schema.ResourceImporter{ - State: resourceBigtableInstanceImport, - }, - - CustomizeDiff: resource_bigtable_instance_customdiff.All( - resourceBigtableInstanceClusterReorderTypeList, - ), - - SchemaVersion: 1, - StateUpgraders: []resource_bigtable_instance_schema.StateUpgrader{ - { - Type: resourceBigtableInstanceResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceBigtableInstanceUpgradeV0, - Version: 0, - }, - }, - - Schema: map[string]*resource_bigtable_instance_schema.Schema{ - "name": { - Type: resource_bigtable_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name (also called Instance Id in the Cloud Console) of the Cloud Bigtable instance.`, - }, - - "cluster": { - Type: resource_bigtable_instance_schema.TypeList, - Optional: true, - Computed: true, - Description: `A block of cluster configuration options. This can be specified at least once.`, - Elem: &resource_bigtable_instance_schema.Resource{ - Schema: map[string]*resource_bigtable_instance_schema.Schema{ - "cluster_id": { - Type: resource_bigtable_instance_schema.TypeString, - Required: true, - Description: `The ID of the Cloud Bigtable cluster.`, - }, - "zone": { - Type: resource_bigtable_instance_schema.TypeString, - Computed: true, - Optional: true, - Description: `The zone to create the Cloud Bigtable cluster in. Each cluster must have a different zone in the same region. Zones that support Bigtable instances are noted on the Cloud Bigtable locations page.`, - }, - "num_nodes": { - Type: resource_bigtable_instance_schema.TypeInt, - Optional: true, - - Computed: true, - ValidateFunc: resource_bigtable_instance_validation.IntAtLeast(1), - Description: `The number of nodes in your Cloud Bigtable cluster. Required, with a minimum of 1 for a PRODUCTION instance. Must be left unset for a DEVELOPMENT instance.`, - }, - "storage_type": { - Type: resource_bigtable_instance_schema.TypeString, - Optional: true, - Default: "SSD", - ValidateFunc: resource_bigtable_instance_validation.StringInSlice([]string{"SSD", "HDD"}, false), - Description: `The storage type to use. One of "SSD" or "HDD". Defaults to "SSD".`, - }, - "kms_key_name": { - Type: resource_bigtable_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect the destination Bigtable cluster. The requirements for this key are: 1) The Cloud Bigtable service account associated with the project that contains this cluster must be granted the cloudkms.cryptoKeyEncrypterDecrypter role on the CMEK key. 2) Only regional keys can be used and the region of the CMEK key must match the region of the cluster. 3) All clusters within an instance must use the same CMEK key. Values are of the form projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`, - }, - }, - }, - }, - "display_name": { - Type: resource_bigtable_instance_schema.TypeString, - Optional: true, - Computed: true, - Description: `The human-readable display name of the Bigtable instance. Defaults to the instance name.`, - }, - - "instance_type": { - Type: resource_bigtable_instance_schema.TypeString, - Optional: true, - Default: "PRODUCTION", - ValidateFunc: resource_bigtable_instance_validation.StringInSlice([]string{"DEVELOPMENT", "PRODUCTION"}, false), - Description: `The instance type to create. One of "DEVELOPMENT" or "PRODUCTION". Defaults to "PRODUCTION".`, - Deprecated: `It is recommended to leave this field unspecified since the distinction between "DEVELOPMENT" and "PRODUCTION" instances is going away, and all instances will become "PRODUCTION" instances. This means that new and existing "DEVELOPMENT" instances will be converted to "PRODUCTION" instances. It is recommended for users to use "PRODUCTION" instances in any case, since a 1-node "PRODUCTION" instance is functionally identical to a "DEVELOPMENT" instance, but without the accompanying restrictions.`, - }, - - "deletion_protection": { - Type: resource_bigtable_instance_schema.TypeBool, - Optional: true, - Default: true, - Description: `Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.`, - }, - - "labels": { - Type: resource_bigtable_instance_schema.TypeMap, - Optional: true, - Elem: &resource_bigtable_instance_schema.Schema{Type: resource_bigtable_instance_schema.TypeString}, - Description: `A mapping of labels to assign to the resource.`, - }, - - "project": { - Type: resource_bigtable_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableInstanceCreate(d *resource_bigtable_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - ctx := resource_bigtable_instance_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - conf := &resource_bigtable_instance_bigtable.InstanceWithClustersConfig{ - InstanceID: d.Get("name").(string), - } - - displayName, ok := d.GetOk("display_name") - if !ok { - displayName = conf.InstanceID - } - conf.DisplayName = displayName.(string) - - if _, ok := d.GetOk("labels"); ok { - conf.Labels = expandLabels(d) - } - - switch d.Get("instance_type").(string) { - case "DEVELOPMENT": - conf.InstanceType = resource_bigtable_instance_bigtable.DEVELOPMENT - case "PRODUCTION": - conf.InstanceType = resource_bigtable_instance_bigtable.PRODUCTION - } - - conf.Clusters, err = expandBigtableClusters(d.Get("cluster").([]interface{}), conf.InstanceID, config) - if err != nil { - return err - } - - c, err := config.BigTableClientFactory(userAgent).NewInstanceAdminClient(project) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error starting instance admin client. %s", err) - } - - defer c.Close() - - err = c.CreateInstanceWithClusters(ctx, conf) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error creating instance. %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceBigtableInstanceRead(d, meta) -} - -func resourceBigtableInstanceRead(d *resource_bigtable_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - ctx := resource_bigtable_instance_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - c, err := config.BigTableClientFactory(userAgent).NewInstanceAdminClient(project) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error starting instance admin client. %s", err) - } - - defer c.Close() - - instanceName := d.Get("name").(string) - - instance, err := c.InstanceInfo(ctx, instanceName) - if err != nil { - resource_bigtable_instance_log.Printf("[WARN] Removing %s because it's gone", instanceName) - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting project: %s", err) - } - - clusters, err := c.Clusters(ctx, instance.Name) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error retrieving instance clusters. %s", err) - } - - clustersNewState := []map[string]interface{}{} - for _, cluster := range clusters { - clustersNewState = append(clustersNewState, flattenBigtableCluster(cluster)) - } - - resource_bigtable_instance_log.Printf("[DEBUG] Setting clusters in state: %#v", clustersNewState) - err = d.Set("cluster", clustersNewState) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting clusters in state: %s", err.Error()) - } - - if err := d.Set("name", instance.Name); err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", instance.DisplayName); err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting display_name: %s", err) - } - if err := d.Set("labels", instance.Labels); err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting labels: %s", err) - } - - return nil -} - -func resourceBigtableInstanceUpdate(d *resource_bigtable_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - ctx := resource_bigtable_instance_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - c, err := config.BigTableClientFactory(userAgent).NewInstanceAdminClient(project) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error starting instance admin client. %s", err) - } - defer c.Close() - - conf := &resource_bigtable_instance_bigtable.InstanceWithClustersConfig{ - InstanceID: d.Get("name").(string), - } - - displayName, ok := d.GetOk("display_name") - if !ok { - displayName = conf.InstanceID - } - conf.DisplayName = displayName.(string) - - if d.HasChange("labels") { - conf.Labels = expandLabels(d) - } - - switch d.Get("instance_type").(string) { - case "DEVELOPMENT": - conf.InstanceType = resource_bigtable_instance_bigtable.DEVELOPMENT - case "PRODUCTION": - conf.InstanceType = resource_bigtable_instance_bigtable.PRODUCTION - } - - conf.Clusters, err = expandBigtableClusters(d.Get("cluster").([]interface{}), conf.InstanceID, config) - if err != nil { - return err - } - - _, err = resource_bigtable_instance_bigtable.UpdateInstanceAndSyncClusters(ctx, c, conf) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error updating instance. %s", err) - } - - return resourceBigtableInstanceRead(d, meta) -} - -func resourceBigtableInstanceDestroy(d *resource_bigtable_instance_schema.ResourceData, meta interface{}) error { - if d.Get("deletion_protection").(bool) { - return resource_bigtable_instance_fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") - } - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - ctx := resource_bigtable_instance_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - c, err := config.BigTableClientFactory(userAgent).NewInstanceAdminClient(project) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error starting instance admin client. %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - err = c.DeleteInstance(ctx, name) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error deleting instance. %s", err) - } - - d.SetId("") - - return nil -} - -func flattenBigtableCluster(c *resource_bigtable_instance_bigtable.ClusterInfo) map[string]interface{} { - var storageType string - switch c.StorageType { - case resource_bigtable_instance_bigtable.SSD: - storageType = "SSD" - case resource_bigtable_instance_bigtable.HDD: - storageType = "HDD" - } - - return map[string]interface{}{ - "zone": c.Zone, - "num_nodes": c.ServeNodes, - "cluster_id": c.Name, - "storage_type": storageType, - "kms_key_name": c.KMSKeyName, - } -} - -func expandBigtableClusters(clusters []interface{}, instanceID string, config *Config) ([]resource_bigtable_instance_bigtable.ClusterConfig, error) { - results := make([]resource_bigtable_instance_bigtable.ClusterConfig, 0, len(clusters)) - for _, c := range clusters { - cluster := c.(map[string]interface{}) - zone, err := getBigtableZone(cluster["zone"].(string), config) - if err != nil { - return nil, err - } - var storageType resource_bigtable_instance_bigtable.StorageType - switch cluster["storage_type"].(string) { - case "SSD": - storageType = resource_bigtable_instance_bigtable.SSD - case "HDD": - storageType = resource_bigtable_instance_bigtable.HDD - } - results = append(results, resource_bigtable_instance_bigtable.ClusterConfig{ - InstanceID: instanceID, - Zone: zone, - ClusterID: cluster["cluster_id"].(string), - NumNodes: int32(cluster["num_nodes"].(int)), - StorageType: storageType, - KMSKeyName: cluster["kms_key_name"].(string), - }) - } - return results, nil -} - -func getBigtableZone(z string, config *Config) (string, error) { - if z == "" { - if config.Zone != "" { - return config.Zone, nil - } - return "", resource_bigtable_instance_fmt.Errorf("cannot determine zone: set in cluster.0.zone, or set provider-level zone") - } - return GetResourceNameFromSelfLink(z), nil -} - -func resourceBigtableInstanceClusterReorderTypeList(_ resource_bigtable_instance_context.Context, diff *resource_bigtable_instance_schema.ResourceDiff, meta interface{}) error { - oldCount, newCount := diff.GetChange("cluster.#") - - if newCount.(int) < 1 { - return resource_bigtable_instance_fmt.Errorf("config is invalid: Too few cluster blocks: Should have at least 1 \"cluster\" block") - } - - n, _ := diff.GetChange("name") - if n == nil || n == "" { - return nil - } - - oldIds := []string{} - clusters := make(map[string]interface{}, newCount.(int)) - - for i := 0; i < oldCount.(int); i++ { - oldId, _ := diff.GetChange(resource_bigtable_instance_fmt.Sprintf("cluster.%d.cluster_id", i)) - if oldId != nil && oldId != "" { - oldIds = append(oldIds, oldId.(string)) - } - } - resource_bigtable_instance_log.Printf("[DEBUG] Saw old ids: %#v", oldIds) - - for i := 0; i < newCount.(int); i++ { - _, newId := diff.GetChange(resource_bigtable_instance_fmt.Sprintf("cluster.%d.cluster_id", i)) - _, c := diff.GetChange(resource_bigtable_instance_fmt.Sprintf("cluster.%d", i)) - clusters[newId.(string)] = c - } - - var orderedClusters []interface{} - for i := 0; i < newCount.(int); i++ { - - if i >= len(oldIds) { - orderedClusters = append(orderedClusters, nil) - continue - } - - oldId := oldIds[i] - if c, ok := clusters[oldId]; ok { - resource_bigtable_instance_log.Printf("[DEBUG] Matched: %#v", oldId) - orderedClusters = append(orderedClusters, c) - delete(clusters, oldId) - } else { - orderedClusters = append(orderedClusters, nil) - } - } - - resource_bigtable_instance_log.Printf("[DEBUG] Remaining clusters: %#v", clusters) - for _, elem := range clusters { - for i, e := range orderedClusters { - if e == nil { - orderedClusters[i] = elem - } - } - } - - err := diff.SetNew("cluster", orderedClusters) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting cluster diff: %s", err) - } - - for i := 0; i < newCount.(int); i++ { - oldId, newId := diff.GetChange(resource_bigtable_instance_fmt.Sprintf("cluster.%d.cluster_id", i)) - if oldId != newId { - continue - } - - oZone, nZone := diff.GetChange(resource_bigtable_instance_fmt.Sprintf("cluster.%d.zone", i)) - if oZone != nZone { - err := diff.ForceNew(resource_bigtable_instance_fmt.Sprintf("cluster.%d.zone", i)) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting cluster diff: %s", err) - } - } - - oST, nST := diff.GetChange(resource_bigtable_instance_fmt.Sprintf("cluster.%d.storage_type", i)) - if oST != nST { - err := diff.ForceNew(resource_bigtable_instance_fmt.Sprintf("cluster.%d.storage_type", i)) - if err != nil { - return resource_bigtable_instance_fmt.Errorf("Error setting cluster diff: %s", err) - } - } - } - - return nil -} - -func resourceBigtableInstanceImport(d *resource_bigtable_instance_schema.ResourceData, meta interface{}) ([]*resource_bigtable_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return nil, resource_bigtable_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_bigtable_instance_schema.ResourceData{d}, nil -} - -func resourceBigtableInstanceResourceV0() *resource_bigtable_instance_migrate_schema.Resource { - return &resource_bigtable_instance_migrate_schema.Resource{ - Schema: map[string]*resource_bigtable_instance_migrate_schema.Schema{ - "name": { - Type: resource_bigtable_instance_migrate_schema.TypeString, - Required: true, - ForceNew: true, - }, - - "cluster": { - Type: resource_bigtable_instance_migrate_schema.TypeList, - Optional: true, - Computed: true, - Elem: &resource_bigtable_instance_migrate_schema.Resource{ - Schema: map[string]*resource_bigtable_instance_migrate_schema.Schema{ - "cluster_id": { - Type: resource_bigtable_instance_migrate_schema.TypeString, - Required: true, - }, - "zone": { - Type: resource_bigtable_instance_migrate_schema.TypeString, - Required: true, - }, - "num_nodes": { - Type: resource_bigtable_instance_migrate_schema.TypeInt, - Optional: true, - - Computed: true, - ValidateFunc: resource_bigtable_instance_migrate_validation.IntAtLeast(1), - }, - "storage_type": { - Type: resource_bigtable_instance_migrate_schema.TypeString, - Optional: true, - Default: "SSD", - ValidateFunc: resource_bigtable_instance_migrate_validation.StringInSlice([]string{"SSD", "HDD"}, false), - }, - }, - }, - }, - "display_name": { - Type: resource_bigtable_instance_migrate_schema.TypeString, - Optional: true, - Computed: true, - }, - - "instance_type": { - Type: resource_bigtable_instance_migrate_schema.TypeString, - Optional: true, - Default: "PRODUCTION", - ValidateFunc: resource_bigtable_instance_migrate_validation.StringInSlice([]string{"DEVELOPMENT", "PRODUCTION"}, false), - }, - - "project": { - Type: resource_bigtable_instance_migrate_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableInstanceUpgradeV0(_ resource_bigtable_instance_migrate_context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - resource_bigtable_instance_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - - rawState["deletion_protection"] = true - - resource_bigtable_instance_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", rawState) - return rawState, nil -} - -func resourceBigtableTable() *resource_bigtable_table_schema.Resource { - return &resource_bigtable_table_schema.Resource{ - Create: resourceBigtableTableCreate, - Read: resourceBigtableTableRead, - Update: resourceBigtableTableUpdate, - Delete: resourceBigtableTableDestroy, - - Importer: &resource_bigtable_table_schema.ResourceImporter{ - State: resourceBigtableTableImport, - }, - - Schema: map[string]*resource_bigtable_table_schema.Schema{ - "name": { - Type: resource_bigtable_table_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the table.`, - }, - - "column_family": { - Type: resource_bigtable_table_schema.TypeSet, - Optional: true, - Description: `A group of columns within a table which share a common configuration. This can be specified multiple times.`, - Elem: &resource_bigtable_table_schema.Resource{ - Schema: map[string]*resource_bigtable_table_schema.Schema{ - "family": { - Type: resource_bigtable_table_schema.TypeString, - Required: true, - Description: `The name of the column family.`, - }, - }, - }, - }, - - "instance_name": { - Type: resource_bigtable_table_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The name of the Bigtable instance.`, - }, - - "split_keys": { - Type: resource_bigtable_table_schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &resource_bigtable_table_schema.Schema{Type: resource_bigtable_table_schema.TypeString}, - Description: `A list of predefined keys to split the table on. !> Warning: Modifying the split_keys of an existing table will cause Terraform to delete/recreate the entire google_bigtable_table resource.`, - }, - - "project": { - Type: resource_bigtable_table_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBigtableTableCreate(d *resource_bigtable_table_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - ctx := resource_bigtable_table_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error starting admin client. %s", err) - } - if err := d.Set("instance_name", instanceName); err != nil { - return resource_bigtable_table_fmt.Errorf("Error setting instance_name: %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - if v, ok := d.GetOk("split_keys"); ok { - splitKeys := convertStringArr(v.([]interface{})) - - err = c.CreatePresplitTable(ctx, name, splitKeys) - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error creating presplit table. %s", err) - } - } else { - - err = c.CreateTable(ctx, name) - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error creating table. %s", err) - } - } - - if d.Get("column_family.#").(int) > 0 { - columns := d.Get("column_family").(*resource_bigtable_table_schema.Set).List() - - for _, co := range columns { - column := co.(map[string]interface{}) - - if v, ok := column["family"]; ok { - if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { - return resource_bigtable_table_fmt.Errorf("Error creating column family %s. %s", v, err) - } - } - } - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceBigtableTableRead(d, meta) -} - -func resourceBigtableTableRead(d *resource_bigtable_table_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - ctx := resource_bigtable_table_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - table, err := c.TableInfo(ctx, name) - if err != nil { - resource_bigtable_table_log.Printf("[WARN] Removing %s because it's gone", name) - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_bigtable_table_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("column_family", flattenColumnFamily(table.Families)); err != nil { - return resource_bigtable_table_fmt.Errorf("Error setting column_family: %s", err) - } - - return nil -} - -func resourceBigtableTableUpdate(d *resource_bigtable_table_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - ctx := resource_bigtable_table_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error starting admin client. %s", err) - } - defer c.Close() - - o, n := d.GetChange("column_family") - oSet := o.(*resource_bigtable_table_schema.Set) - nSet := n.(*resource_bigtable_table_schema.Set) - name := d.Get("name").(string) - - for _, new := range nSet.Difference(oSet).List() { - column := new.(map[string]interface{}) - - if v, ok := column["family"]; ok { - resource_bigtable_table_log.Printf("[DEBUG] adding column family %q", v) - if err := c.CreateColumnFamily(ctx, name, v.(string)); err != nil { - return resource_bigtable_table_fmt.Errorf("Error creating column family %q: %s", v, err) - } - } - } - - for _, old := range oSet.Difference(nSet).List() { - column := old.(map[string]interface{}) - - if v, ok := column["family"]; ok { - resource_bigtable_table_log.Printf("[DEBUG] removing column family %q", v) - if err := c.DeleteColumnFamily(ctx, name, v.(string)); err != nil { - return resource_bigtable_table_fmt.Errorf("Error deleting column family %q: %s", v, err) - } - } - } - - return resourceBigtableTableRead(d, meta) -} - -func resourceBigtableTableDestroy(d *resource_bigtable_table_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - ctx := resource_bigtable_table_context.Background() - - project, err := getProject(d, config) - if err != nil { - return err - } - - instanceName := GetResourceNameFromSelfLink(d.Get("instance_name").(string)) - c, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, instanceName) - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error starting admin client. %s", err) - } - - defer c.Close() - - name := d.Get("name").(string) - err = c.DeleteTable(ctx, name) - if err != nil { - return resource_bigtable_table_fmt.Errorf("Error deleting table. %s", err) - } - - d.SetId("") - - return nil -} - -func flattenColumnFamily(families []string) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(families)) - - for _, f := range families { - data := make(map[string]interface{}) - data["family"] = f - result = append(result, data) - } - - return result -} - -func resourceBigtableTableImport(d *resource_bigtable_table_schema.ResourceData, meta interface{}) ([]*resource_bigtable_table_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/tables/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance_name}}/tables/{{name}}") - if err != nil { - return nil, resource_bigtable_table_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_bigtable_table_schema.ResourceData{d}, nil -} - -func resourceBillingBudget() *resource_billing_budget_schema.Resource { - return &resource_billing_budget_schema.Resource{ - Create: resourceBillingBudgetCreate, - Read: resourceBillingBudgetRead, - Update: resourceBillingBudgetUpdate, - Delete: resourceBillingBudgetDelete, - - Importer: &resource_billing_budget_schema.ResourceImporter{ - State: resourceBillingBudgetImport, - }, - - Timeouts: &resource_billing_budget_schema.ResourceTimeout{ - Create: resource_billing_budget_schema.DefaultTimeout(4 * resource_billing_budget_time.Minute), - Update: resource_billing_budget_schema.DefaultTimeout(4 * resource_billing_budget_time.Minute), - Delete: resource_billing_budget_schema.DefaultTimeout(4 * resource_billing_budget_time.Minute), - }, - - SchemaVersion: 1, - StateUpgraders: []resource_billing_budget_schema.StateUpgrader{ - { - Type: resourceBillingBudgetResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceBillingBudgetUpgradeV0, - Version: 0, - }, - }, - - Schema: map[string]*resource_billing_budget_schema.Schema{ - "amount": { - Type: resource_billing_budget_schema.TypeList, - Required: true, - Description: `The budgeted amount for each usage period.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "last_period_amount": { - Type: resource_billing_budget_schema.TypeBool, - Optional: true, - Description: `Configures a budget amount that is automatically set to 100% of -last period's spend. -Boolean. Set value to true to use. Do not set to false, instead -use the 'specified_amount' block.`, - ExactlyOneOf: []string{"amount.0.specified_amount", "amount.0.last_period_amount"}, - }, - "specified_amount": { - Type: resource_billing_budget_schema.TypeList, - Optional: true, - Description: `A specified amount to use as the budget. currencyCode is -optional. If specified, it must match the currency of the -billing account. The currencyCode is provided on output.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "currency_code": { - Type: resource_billing_budget_schema.TypeString, - Computed: true, - Optional: true, - Description: `The 3-letter currency code defined in ISO 4217.`, - }, - "nanos": { - Type: resource_billing_budget_schema.TypeInt, - Optional: true, - Description: `Number of nano (10^-9) units of the amount. -The value must be between -999,999,999 and +999,999,999 -inclusive. If units is positive, nanos must be positive or -zero. If units is zero, nanos can be positive, zero, or -negative. If units is negative, nanos must be negative or -zero. For example $-1.75 is represented as units=-1 and -nanos=-750,000,000.`, - }, - "units": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `The whole units of the amount. For example if currencyCode -is "USD", then 1 unit is one US dollar.`, - }, - }, - }, - ExactlyOneOf: []string{"amount.0.specified_amount", "amount.0.last_period_amount"}, - }, - }, - }, - }, - "billing_account": { - Type: resource_billing_budget_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the billing account to set a budget on.`, - }, - "threshold_rules": { - Type: resource_billing_budget_schema.TypeList, - Required: true, - Description: `Rules that trigger alerts (notifications of thresholds being -crossed) when spend exceeds the specified percentages of the -budget.`, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "threshold_percent": { - Type: resource_billing_budget_schema.TypeFloat, - Required: true, - Description: `Send an alert when this threshold is exceeded. This is a -1.0-based percentage, so 0.5 = 50%. Must be >= 0.`, - }, - "spend_basis": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - ValidateFunc: resource_billing_budget_validation.StringInSlice([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}, false), - Description: `The type of basis used to determine if spend has passed -the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, - Default: "CURRENT_SPEND", - }, - }, - }, - }, - "all_updates_rule": { - Type: resource_billing_budget_schema.TypeList, - Optional: true, - Description: `Defines notifications that are sent on every update to the -billing account's spend, regardless of the thresholds defined -using threshold rules.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "disable_default_iam_recipients": { - Type: resource_billing_budget_schema.TypeBool, - Optional: true, - Description: `Boolean. When set to true, disables default notifications sent -when a threshold is exceeded. Default recipients are -those with Billing Account Administrators and Billing -Account Users IAM roles for the target account.`, - Default: false, - }, - "monitoring_notification_channels": { - Type: resource_billing_budget_schema.TypeList, - Optional: true, - Description: `The full resource name of a monitoring notification -channel in the form -projects/{project_id}/notificationChannels/{channel_id}. -A maximum of 5 channels are allowed.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"all_updates_rule.0.pubsub_topic", "all_updates_rule.0.monitoring_notification_channels"}, - }, - "pubsub_topic": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `The name of the Cloud Pub/Sub topic where budget related -messages will be published, in the form -projects/{project_id}/topics/{topic_id}. Updates are sent -at regular intervals to the topic.`, - AtLeastOneOf: []string{"all_updates_rule.0.pubsub_topic", "all_updates_rule.0.monitoring_notification_channels"}, - }, - "schema_version": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `The schema version of the notification. Only "1.0" is -accepted. It represents the JSON schema as defined in -https://cloud.google.com/billing/docs/how-to/budgets#notification_format.`, - Default: "1.0", - }, - }, - }, - }, - "budget_filter": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `Filters that define which resources are used to compute the actual -spend against the budget.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "credit_types": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `A set of subaccounts of the form billingAccounts/{account_id}, -specifying that usage from only this set of subaccounts should -be included in the budget. If a subaccount is set to the name of -the parent account, usage from the parent account will be included. -If the field is omitted, the report will include usage from the parent -account and all subaccounts, if they exist.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "credit_types_treatment": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - ValidateFunc: resource_billing_budget_validation.StringInSlice([]string{"INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS", ""}, false), - Description: `Specifies how credits should be treated when determining spend -for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values: ["INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS"]`, - Default: "INCLUDE_ALL_CREDITS", - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "labels": { - Type: resource_billing_budget_schema.TypeMap, - Computed: true, - Optional: true, - Description: `A single label and value pair specifying that usage from only -this set of labeled resources should be included in the budget.`, - Elem: &resource_billing_budget_schema.Schema{Type: resource_billing_budget_schema.TypeString}, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "projects": { - Type: resource_billing_budget_schema.TypeSet, - Optional: true, - Description: `A set of projects of the form projects/{project_number}, -specifying that usage from only this set of projects should be -included in the budget. If omitted, the report will include -all usage for the billing account, regardless of which project -the usage occurred on.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - Set: resource_billing_budget_schema.HashString, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "services": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `A set of services of the form services/{service_id}, -specifying that usage from only this set of services should be -included in the budget. If omitted, the report will include -usage for all the services. The service names are available -through the Catalog API: -https://cloud.google.com/billing/v1/how-tos/catalog-api.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "subaccounts": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `A set of subaccounts of the form billingAccounts/{account_id}, -specifying that usage from only this set of subaccounts should -be included in the budget. If a subaccount is set to the name of -the parent account, usage from the parent account will be included. -If the field is omitted, the report will include usage from the parent -account and all subaccounts, if they exist.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - }, - }, - }, - "display_name": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `User data for display name in UI. Must be <= 60 chars.`, - }, - "name": { - Type: resource_billing_budget_schema.TypeString, - Computed: true, - Description: `Resource name of the budget. The resource name -implies the scope of a budget. Values are of the form -billingAccounts/{billingAccountId}/budgets/{budgetId}.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBillingBudgetCreate(d *resource_billing_budget_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandBillingBudgetDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(displayNameProp)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - budgetFilterProp, err := expandBillingBudgetBudgetFilter(d.Get("budget_filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("budget_filter"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(budgetFilterProp)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, budgetFilterProp)) { - obj["budgetFilter"] = budgetFilterProp - } - amountProp, err := expandBillingBudgetAmount(d.Get("amount"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("amount"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(amountProp)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, amountProp)) { - obj["amount"] = amountProp - } - thresholdRulesProp, err := expandBillingBudgetThresholdRules(d.Get("threshold_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threshold_rules"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(thresholdRulesProp)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, thresholdRulesProp)) { - obj["thresholdRules"] = thresholdRulesProp - } - notificationsRuleProp, err := expandBillingBudgetAllUpdatesRule(d.Get("all_updates_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("all_updates_rule"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(notificationsRuleProp)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, notificationsRuleProp)) { - obj["notificationsRule"] = notificationsRuleProp - } - - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets") - if err != nil { - return err - } - - resource_billing_budget_log.Printf("[DEBUG] Creating new Budget: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_billing_budget_schema.TimeoutCreate)) - if err != nil { - return resource_billing_budget_fmt.Errorf("Error creating Budget: %s", err) - } - if err := d.Set("name", flattenBillingBudgetName(res["name"], d, config)); err != nil { - return resource_billing_budget_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "billingAccounts/{{billing_account}}/budgets/{{name}}") - if err != nil { - return resource_billing_budget_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_billing_budget_log.Printf("[DEBUG] Finished creating Budget %q: %#v", d.Id(), res) - - return resourceBillingBudgetRead(d, meta) -} - -func resourceBillingBudgetRead(d *resource_billing_budget_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_billing_budget_fmt.Sprintf("BillingBudget %q", d.Id())) - } - - if err := d.Set("name", flattenBillingBudgetName(res["name"], d, config)); err != nil { - return resource_billing_budget_fmt.Errorf("Error reading Budget: %s", err) - } - if err := d.Set("display_name", flattenBillingBudgetDisplayName(res["displayName"], d, config)); err != nil { - return resource_billing_budget_fmt.Errorf("Error reading Budget: %s", err) - } - if err := d.Set("budget_filter", flattenBillingBudgetBudgetFilter(res["budgetFilter"], d, config)); err != nil { - return resource_billing_budget_fmt.Errorf("Error reading Budget: %s", err) - } - if err := d.Set("amount", flattenBillingBudgetAmount(res["amount"], d, config)); err != nil { - return resource_billing_budget_fmt.Errorf("Error reading Budget: %s", err) - } - if err := d.Set("threshold_rules", flattenBillingBudgetThresholdRules(res["thresholdRules"], d, config)); err != nil { - return resource_billing_budget_fmt.Errorf("Error reading Budget: %s", err) - } - if err := d.Set("all_updates_rule", flattenBillingBudgetAllUpdatesRule(res["notificationsRule"], d, config)); err != nil { - return resource_billing_budget_fmt.Errorf("Error reading Budget: %s", err) - } - - return nil -} - -func resourceBillingBudgetUpdate(d *resource_billing_budget_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandBillingBudgetDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(v)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - budgetFilterProp, err := expandBillingBudgetBudgetFilter(d.Get("budget_filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("budget_filter"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(v)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, budgetFilterProp)) { - obj["budgetFilter"] = budgetFilterProp - } - amountProp, err := expandBillingBudgetAmount(d.Get("amount"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("amount"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(v)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, amountProp)) { - obj["amount"] = amountProp - } - thresholdRulesProp, err := expandBillingBudgetThresholdRules(d.Get("threshold_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("threshold_rules"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(v)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, thresholdRulesProp)) { - obj["thresholdRules"] = thresholdRulesProp - } - notificationsRuleProp, err := expandBillingBudgetAllUpdatesRule(d.Get("all_updates_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("all_updates_rule"); !isEmptyValue(resource_billing_budget_reflect.ValueOf(v)) && (ok || !resource_billing_budget_reflect.DeepEqual(v, notificationsRuleProp)) { - obj["notificationsRule"] = notificationsRuleProp - } - - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") - if err != nil { - return err - } - - resource_billing_budget_log.Printf("[DEBUG] Updating Budget %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("budget_filter") { - updateMask = append(updateMask, "budgetFilter.projects") - } - - if d.HasChange("amount") { - updateMask = append(updateMask, "amount.specifiedAmount.currencyCode", - "amount.specifiedAmount.units", - "amount.specifiedAmount.nanos") - } - - if d.HasChange("threshold_rules") { - updateMask = append(updateMask, "thresholdRules") - } - - if d.HasChange("all_updates_rule") { - updateMask = append(updateMask, "notificationsRule.pubsubTopic", - "notificationsRule.schemaVersion", - "notificationsRule.monitoringNotificationChannels", - "notificationsRule.disableDefaultIamRecipients") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_billing_budget_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_billing_budget_schema.TimeoutUpdate)) - - if err != nil { - return resource_billing_budget_fmt.Errorf("Error updating Budget %q: %s", d.Id(), err) - } else { - resource_billing_budget_log.Printf("[DEBUG] Finished updating Budget %q: %#v", d.Id(), res) - } - - return resourceBillingBudgetRead(d, meta) -} - -func resourceBillingBudgetDelete(d *resource_billing_budget_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{BillingBasePath}}billingAccounts/{{billing_account}}/budgets/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_billing_budget_log.Printf("[DEBUG] Deleting Budget %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_billing_budget_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Budget") - } - - resource_billing_budget_log.Printf("[DEBUG] Finished deleting Budget %q: %#v", d.Id(), res) - return nil -} - -func resourceBillingBudgetImport(d *resource_billing_budget_schema.ResourceData, meta interface{}) ([]*resource_billing_budget_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "billingAccounts/(?P[^/]+)/budgets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "billingAccounts/{{billing_account}}/budgets/{{name}}") - if err != nil { - return nil, resource_billing_budget_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_billing_budget_schema.ResourceData{d}, nil -} - -func flattenBillingBudgetName(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenBillingBudgetDisplayName(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetBudgetFilter(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["projects"] = - flattenBillingBudgetBudgetFilterProjects(original["projects"], d, config) - transformed["credit_types_treatment"] = - flattenBillingBudgetBudgetFilterCreditTypesTreatment(original["creditTypesTreatment"], d, config) - transformed["services"] = - flattenBillingBudgetBudgetFilterServices(original["services"], d, config) - transformed["credit_types"] = - flattenBillingBudgetBudgetFilterCreditTypes(original["creditTypes"], d, config) - transformed["subaccounts"] = - flattenBillingBudgetBudgetFilterSubaccounts(original["subaccounts"], d, config) - transformed["labels"] = - flattenBillingBudgetBudgetFilterLabels(original["labels"], d, config) - return []interface{}{transformed} -} - -func flattenBillingBudgetBudgetFilterProjects(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_billing_budget_schema.NewSet(resource_billing_budget_schema.HashString, v.([]interface{})) -} - -func flattenBillingBudgetBudgetFilterCreditTypesTreatment(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetBudgetFilterServices(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetBudgetFilterCreditTypes(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetBudgetFilterSubaccounts(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetBudgetFilterLabels(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - for key, val := range original { - l := val.([]interface{}) - for _, v := range l { - transformed[key] = v.(string) - } - } - return transformed -} - -func flattenBillingBudgetAmount(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["specified_amount"] = - flattenBillingBudgetAmountSpecifiedAmount(original["specifiedAmount"], d, config) - transformed["last_period_amount"] = - flattenBillingBudgetAmountLastPeriodAmount(original["lastPeriodAmount"], d, config) - return []interface{}{transformed} -} - -func flattenBillingBudgetAmountSpecifiedAmount(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["currency_code"] = - flattenBillingBudgetAmountSpecifiedAmountCurrencyCode(original["currencyCode"], d, config) - transformed["units"] = - flattenBillingBudgetAmountSpecifiedAmountUnits(original["units"], d, config) - transformed["nanos"] = - flattenBillingBudgetAmountSpecifiedAmountNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenBillingBudgetAmountSpecifiedAmountCurrencyCode(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetAmountSpecifiedAmountUnits(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetAmountSpecifiedAmountNanos(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_billing_budget_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenBillingBudgetAmountLastPeriodAmount(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v != nil -} - -func flattenBillingBudgetThresholdRules(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "threshold_percent": flattenBillingBudgetThresholdRulesThresholdPercent(original["thresholdPercent"], d, config), - "spend_basis": flattenBillingBudgetThresholdRulesSpendBasis(original["spendBasis"], d, config), - }) - } - return transformed -} - -func flattenBillingBudgetThresholdRulesThresholdPercent(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetThresholdRulesSpendBasis(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetAllUpdatesRule(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_topic"] = - flattenBillingBudgetAllUpdatesRulePubsubTopic(original["pubsubTopic"], d, config) - transformed["schema_version"] = - flattenBillingBudgetAllUpdatesRuleSchemaVersion(original["schemaVersion"], d, config) - transformed["monitoring_notification_channels"] = - flattenBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(original["monitoringNotificationChannels"], d, config) - transformed["disable_default_iam_recipients"] = - flattenBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(original["disableDefaultIamRecipients"], d, config) - return []interface{}{transformed} -} - -func flattenBillingBudgetAllUpdatesRulePubsubTopic(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetAllUpdatesRuleSchemaVersion(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_billing_budget_reflect.ValueOf(v)) { - return "1.0" - } - - return v -} - -func flattenBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(v interface{}, d *resource_billing_budget_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBillingBudgetDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetBudgetFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjects, err := expandBillingBudgetBudgetFilterProjects(original["projects"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedProjects); val.IsValid() && !isEmptyValue(val) { - transformed["projects"] = transformedProjects - } - - transformedCreditTypesTreatment, err := expandBillingBudgetBudgetFilterCreditTypesTreatment(original["credit_types_treatment"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedCreditTypesTreatment); val.IsValid() && !isEmptyValue(val) { - transformed["creditTypesTreatment"] = transformedCreditTypesTreatment - } - - transformedServices, err := expandBillingBudgetBudgetFilterServices(original["services"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedServices); val.IsValid() && !isEmptyValue(val) { - transformed["services"] = transformedServices - } - - transformedCreditTypes, err := expandBillingBudgetBudgetFilterCreditTypes(original["credit_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedCreditTypes); val.IsValid() && !isEmptyValue(val) { - transformed["creditTypes"] = transformedCreditTypes - } - - transformedSubaccounts, err := expandBillingBudgetBudgetFilterSubaccounts(original["subaccounts"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedSubaccounts); val.IsValid() && !isEmptyValue(val) { - transformed["subaccounts"] = transformedSubaccounts - } - - transformedLabels, err := expandBillingBudgetBudgetFilterLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - return transformed, nil -} - -func expandBillingBudgetBudgetFilterProjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_billing_budget_schema.Set).List() - return v, nil -} - -func expandBillingBudgetBudgetFilterCreditTypesTreatment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetBudgetFilterServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetBudgetFilterCreditTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetBudgetFilterSubaccounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetBudgetFilterLabels(v interface{}, d TerraformResourceData, config *Config) (map[string][]string, error) { - if v == nil { - return map[string][]string{}, nil - } - m := make(map[string][]string) - for k, val := range v.(map[string]interface{}) { - m[k] = []string{val.(string)} - } - return m, nil -} - -func expandBillingBudgetAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSpecifiedAmount, err := expandBillingBudgetAmountSpecifiedAmount(original["specified_amount"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedSpecifiedAmount); val.IsValid() && !isEmptyValue(val) { - transformed["specifiedAmount"] = transformedSpecifiedAmount - } - - transformedLastPeriodAmount, err := expandBillingBudgetAmountLastPeriodAmount(original["last_period_amount"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedLastPeriodAmount); val.IsValid() && !isEmptyValue(val) { - transformed["lastPeriodAmount"] = transformedLastPeriodAmount - } - - return transformed, nil -} - -func expandBillingBudgetAmountSpecifiedAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCurrencyCode, err := expandBillingBudgetAmountSpecifiedAmountCurrencyCode(original["currency_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedCurrencyCode); val.IsValid() && !isEmptyValue(val) { - transformed["currencyCode"] = transformedCurrencyCode - } - - transformedUnits, err := expandBillingBudgetAmountSpecifiedAmountUnits(original["units"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedUnits); val.IsValid() && !isEmptyValue(val) { - transformed["units"] = transformedUnits - } - - transformedNanos, err := expandBillingBudgetAmountSpecifiedAmountNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandBillingBudgetAmountSpecifiedAmountCurrencyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetAmountSpecifiedAmountUnits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetAmountSpecifiedAmountNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetAmountLastPeriodAmount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || !v.(bool) { - return nil, nil - } - - return struct{}{}, nil -} - -func expandBillingBudgetThresholdRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThresholdPercent, err := expandBillingBudgetThresholdRulesThresholdPercent(original["threshold_percent"], d, config) - if err != nil { - return nil, err - } else { - transformed["thresholdPercent"] = transformedThresholdPercent - } - - transformedSpendBasis, err := expandBillingBudgetThresholdRulesSpendBasis(original["spend_basis"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedSpendBasis); val.IsValid() && !isEmptyValue(val) { - transformed["spendBasis"] = transformedSpendBasis - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBillingBudgetThresholdRulesThresholdPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetThresholdRulesSpendBasis(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetAllUpdatesRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandBillingBudgetAllUpdatesRulePubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - transformedSchemaVersion, err := expandBillingBudgetAllUpdatesRuleSchemaVersion(original["schema_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedSchemaVersion); val.IsValid() && !isEmptyValue(val) { - transformed["schemaVersion"] = transformedSchemaVersion - } - - transformedMonitoringNotificationChannels, err := expandBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(original["monitoring_notification_channels"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedMonitoringNotificationChannels); val.IsValid() && !isEmptyValue(val) { - transformed["monitoringNotificationChannels"] = transformedMonitoringNotificationChannels - } - - transformedDisableDefaultIamRecipients, err := expandBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(original["disable_default_iam_recipients"], d, config) - if err != nil { - return nil, err - } else if val := resource_billing_budget_reflect.ValueOf(transformedDisableDefaultIamRecipients); val.IsValid() && !isEmptyValue(val) { - transformed["disableDefaultIamRecipients"] = transformedDisableDefaultIamRecipients - } - - return transformed, nil -} - -func expandBillingBudgetAllUpdatesRulePubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetAllUpdatesRuleSchemaVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetAllUpdatesRuleMonitoringNotificationChannels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBillingBudgetAllUpdatesRuleDisableDefaultIamRecipients(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceBillingBudgetResourceV0() *resource_billing_budget_schema.Resource { - return &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "amount": { - Type: resource_billing_budget_schema.TypeList, - Required: true, - Description: `The budgeted amount for each usage period.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "last_period_amount": { - Type: resource_billing_budget_schema.TypeBool, - Optional: true, - Description: `Configures a budget amount that is automatically set to 100% of -last period's spend. -Boolean. Set value to true to use. Do not set to false, instead -use the 'specified_amount' block.`, - ExactlyOneOf: []string{"amount.0.specified_amount", "amount.0.last_period_amount"}, - }, - "specified_amount": { - Type: resource_billing_budget_schema.TypeList, - Optional: true, - Description: `A specified amount to use as the budget. currencyCode is -optional. If specified, it must match the currency of the -billing account. The currencyCode is provided on output.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "currency_code": { - Type: resource_billing_budget_schema.TypeString, - Computed: true, - Optional: true, - Description: `The 3-letter currency code defined in ISO 4217.`, - }, - "nanos": { - Type: resource_billing_budget_schema.TypeInt, - Optional: true, - Description: `Number of nano (10^-9) units of the amount. -The value must be between -999,999,999 and +999,999,999 -inclusive. If units is positive, nanos must be positive or -zero. If units is zero, nanos can be positive, zero, or -negative. If units is negative, nanos must be negative or -zero. For example $-1.75 is represented as units=-1 and -nanos=-750,000,000.`, - }, - "units": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `The whole units of the amount. For example if currencyCode -is "USD", then 1 unit is one US dollar.`, - }, - }, - }, - ExactlyOneOf: []string{"amount.0.specified_amount", "amount.0.last_period_amount"}, - }, - }, - }, - }, - "billing_account": { - Type: resource_billing_budget_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the billing account to set a budget on.`, - }, - "threshold_rules": { - Type: resource_billing_budget_schema.TypeList, - Required: true, - Description: `Rules that trigger alerts (notifications of thresholds being -crossed) when spend exceeds the specified percentages of the -budget.`, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "threshold_percent": { - Type: resource_billing_budget_schema.TypeFloat, - Required: true, - Description: `Send an alert when this threshold is exceeded. This is a -1.0-based percentage, so 0.5 = 50%. Must be >= 0.`, - }, - "spend_basis": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - ValidateFunc: resource_billing_budget_validation.StringInSlice([]string{"CURRENT_SPEND", "FORECASTED_SPEND", ""}, false), - Description: `The type of basis used to determine if spend has passed -the threshold. Default value: "CURRENT_SPEND" Possible values: ["CURRENT_SPEND", "FORECASTED_SPEND"]`, - Default: "CURRENT_SPEND", - }, - }, - }, - }, - "all_updates_rule": { - Type: resource_billing_budget_schema.TypeList, - Optional: true, - Description: `Defines notifications that are sent on every update to the -billing account's spend, regardless of the thresholds defined -using threshold rules.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "disable_default_iam_recipients": { - Type: resource_billing_budget_schema.TypeBool, - Optional: true, - Description: `Boolean. When set to true, disables default notifications sent -when a threshold is exceeded. Default recipients are -those with Billing Account Administrators and Billing -Account Users IAM roles for the target account.`, - Default: false, - }, - "monitoring_notification_channels": { - Type: resource_billing_budget_schema.TypeList, - Optional: true, - Description: `The full resource name of a monitoring notification -channel in the form -projects/{project_id}/notificationChannels/{channel_id}. -A maximum of 5 channels are allowed.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"all_updates_rule.0.pubsub_topic", "all_updates_rule.0.monitoring_notification_channels"}, - }, - "pubsub_topic": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `The name of the Cloud Pub/Sub topic where budget related -messages will be published, in the form -projects/{project_id}/topics/{topic_id}. Updates are sent -at regular intervals to the topic.`, - AtLeastOneOf: []string{"all_updates_rule.0.pubsub_topic", "all_updates_rule.0.monitoring_notification_channels"}, - }, - "schema_version": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `The schema version of the notification. Only "1.0" is -accepted. It represents the JSON schema as defined in -https://cloud.google.com/billing/docs/how-to/budgets#notification_format.`, - Default: "1.0", - }, - }, - }, - }, - "budget_filter": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `Filters that define which resources are used to compute the actual -spend against the budget.`, - MaxItems: 1, - Elem: &resource_billing_budget_schema.Resource{ - Schema: map[string]*resource_billing_budget_schema.Schema{ - "credit_types": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `A set of subaccounts of the form billingAccounts/{account_id}, -specifying that usage from only this set of subaccounts should -be included in the budget. If a subaccount is set to the name of -the parent account, usage from the parent account will be included. -If the field is omitted, the report will include usage from the parent -account and all subaccounts, if they exist.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "credit_types_treatment": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - ValidateFunc: resource_billing_budget_validation.StringInSlice([]string{"INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS", ""}, false), - Description: `Specifies how credits should be treated when determining spend -for threshold calculations. Default value: "INCLUDE_ALL_CREDITS" Possible values: ["INCLUDE_ALL_CREDITS", "EXCLUDE_ALL_CREDITS", "INCLUDE_SPECIFIED_CREDITS"]`, - Default: "INCLUDE_ALL_CREDITS", - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "labels": { - Type: resource_billing_budget_schema.TypeMap, - Computed: true, - Optional: true, - Description: `A single label and value pair specifying that usage from only -this set of labeled resources should be included in the budget.`, - Elem: &resource_billing_budget_schema.Schema{Type: resource_billing_budget_schema.TypeString}, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "projects": { - Type: resource_billing_budget_schema.TypeList, - Optional: true, - Description: `A set of projects of the form projects/{project_number}, -specifying that usage from only this set of projects should be -included in the budget. If omitted, the report will include -all usage for the billing account, regardless of which project -the usage occurred on.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "services": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `A set of services of the form services/{service_id}, -specifying that usage from only this set of services should be -included in the budget. If omitted, the report will include -usage for all the services. The service names are available -through the Catalog API: -https://cloud.google.com/billing/v1/how-tos/catalog-api.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - "subaccounts": { - Type: resource_billing_budget_schema.TypeList, - Computed: true, - Optional: true, - Description: `A set of subaccounts of the form billingAccounts/{account_id}, -specifying that usage from only this set of subaccounts should -be included in the budget. If a subaccount is set to the name of -the parent account, usage from the parent account will be included. -If the field is omitted, the report will include usage from the parent -account and all subaccounts, if they exist.`, - Elem: &resource_billing_budget_schema.Schema{ - Type: resource_billing_budget_schema.TypeString, - }, - AtLeastOneOf: []string{"budget_filter.0.projects", "budget_filter.0.credit_types_treatment", "budget_filter.0.services", "budget_filter.0.subaccounts", "budget_filter.0.labels"}, - }, - }, - }, - }, - "display_name": { - Type: resource_billing_budget_schema.TypeString, - Optional: true, - Description: `User data for display name in UI. Must be <= 60 chars.`, - }, - "name": { - Type: resource_billing_budget_schema.TypeString, - Computed: true, - Description: `Resource name of the budget. The resource name -implies the scope of a budget. Values are of the form -billingAccounts/{billingAccountId}/budgets/{budgetId}.`, - }, - }, - } -} - -func resourceBillingBudgetUpgradeV0(_ resource_billing_budget_context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - resource_billing_budget_log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - - rawState["name"] = GetResourceNameFromSelfLink(rawState["name"].(string)) - - resource_billing_budget_log.Printf("[DEBUG] Attributes after migration: %#v", rawState) - return rawState, nil -} - -func compareSignatureAlgorithm(_, old, new string, _ *resource_binary_authorization_attestor_schema.ResourceData) bool { - - normalizedAlgorithms := map[string]string{ - "ECDSA_P256_SHA256": "ECDSA_P256_SHA256", - "EC_SIGN_P256_SHA256": "ECDSA_P256_SHA256", - "ECDSA_P384_SHA384": "ECDSA_P384_SHA384", - "EC_SIGN_P384_SHA384": "ECDSA_P384_SHA384", - "ECDSA_P521_SHA512": "ECDSA_P521_SHA512", - "EC_SIGN_P521_SHA512": "ECDSA_P521_SHA512", - } - - normalizedOld := old - normalizedNew := new - - if normalized, ok := normalizedAlgorithms[old]; ok { - normalizedOld = normalized - } - if normalized, ok := normalizedAlgorithms[new]; ok { - normalizedNew = normalized - } - - if normalizedNew == normalizedOld { - return true - } - - return false -} - -func resourceBinaryAuthorizationAttestor() *resource_binary_authorization_attestor_schema.Resource { - return &resource_binary_authorization_attestor_schema.Resource{ - Create: resourceBinaryAuthorizationAttestorCreate, - Read: resourceBinaryAuthorizationAttestorRead, - Update: resourceBinaryAuthorizationAttestorUpdate, - Delete: resourceBinaryAuthorizationAttestorDelete, - - Importer: &resource_binary_authorization_attestor_schema.ResourceImporter{ - State: resourceBinaryAuthorizationAttestorImport, - }, - - Timeouts: &resource_binary_authorization_attestor_schema.ResourceTimeout{ - Create: resource_binary_authorization_attestor_schema.DefaultTimeout(4 * resource_binary_authorization_attestor_time.Minute), - Update: resource_binary_authorization_attestor_schema.DefaultTimeout(4 * resource_binary_authorization_attestor_time.Minute), - Delete: resource_binary_authorization_attestor_schema.DefaultTimeout(4 * resource_binary_authorization_attestor_time.Minute), - }, - - Schema: map[string]*resource_binary_authorization_attestor_schema.Schema{ - "attestation_authority_note": { - Type: resource_binary_authorization_attestor_schema.TypeList, - Required: true, - Description: `A Container Analysis ATTESTATION_AUTHORITY Note, created by the user.`, - MaxItems: 1, - Elem: &resource_binary_authorization_attestor_schema.Resource{ - Schema: map[string]*resource_binary_authorization_attestor_schema.Schema{ - "note_reference": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The resource name of a ATTESTATION_AUTHORITY Note, created by the -user. If the Note is in a different project from the Attestor, it -should be specified in the format 'projects/*/notes/*' (or the legacy -'providers/*/notes/*'). This field may not be updated. -An attestation by this attestor is stored as a Container Analysis -ATTESTATION_AUTHORITY Occurrence that names a container image -and that links to this Note.`, - }, - "public_keys": { - Type: resource_binary_authorization_attestor_schema.TypeList, - Optional: true, - Description: `Public keys that verify attestations signed by this attestor. This -field may be updated. -If this field is non-empty, one of the specified public keys must -verify that an attestation was signed by this attestor for the -image specified in the admission request. -If this field is empty, this attestor always returns that no valid -attestations exist.`, - Elem: &resource_binary_authorization_attestor_schema.Resource{ - Schema: map[string]*resource_binary_authorization_attestor_schema.Schema{ - "ascii_armored_pgp_public_key": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Optional: true, - Description: `ASCII-armored representation of a PGP public key, as the -entire output by the command -'gpg --export --armor foo@example.com' (either LF or CRLF -line endings). When using this field, id should be left -blank. The BinAuthz API handlers will calculate the ID -and fill it in automatically. BinAuthz computes this ID -as the OpenPGP RFC4880 V4 fingerprint, represented as -upper-case hex. If id is provided by the caller, it will -be overwritten by the API-calculated ID.`, - }, - "comment": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Optional: true, - Description: `A descriptive comment. This field may be updated.`, - }, - "id": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Computed: true, - Optional: true, - Description: `The ID of this public key. Signatures verified by BinAuthz -must include the ID of the public key that can be used to -verify them, and that ID must match the contents of this -field exactly. Additional restrictions on this field can -be imposed based on which public key type is encapsulated. -See the documentation on publicKey cases below for details.`, - }, - "pkix_public_key": { - Type: resource_binary_authorization_attestor_schema.TypeList, - Optional: true, - Description: `A raw PKIX SubjectPublicKeyInfo format public key. - -NOTE: id may be explicitly provided by the caller when using this -type of public key, but it MUST be a valid RFC3986 URI. If id is left -blank, a default one will be computed based on the digest of the DER -encoding of the public key.`, - MaxItems: 1, - Elem: &resource_binary_authorization_attestor_schema.Resource{ - Schema: map[string]*resource_binary_authorization_attestor_schema.Schema{ - "public_key_pem": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Optional: true, - Description: `A PEM-encoded public key, as described in -'https://tools.ietf.org/html/rfc7468#section-13'`, - }, - "signature_algorithm": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Optional: true, - Description: `The signature algorithm used to verify a message against -a signature using this key. These signature algorithm must -match the structure and any object identifiers encoded in -publicKeyPem (i.e. this algorithm must match that of the -public key).`, - }, - }, - }, - }, - }, - }, - }, - "delegation_service_account_email": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Computed: true, - Description: `This field will contain the service account email address that -this Attestor will use as the principal when querying Container -Analysis. Attestor administrators must grant this service account -the IAM role needed to read attestations from the noteReference in -Container Analysis (containeranalysis.notes.occurrences.viewer). -This email address is fixed for the lifetime of the Attestor, but -callers should not make any other assumptions about the service -account email; future versions may use an email based on a -different naming pattern.`, - }, - }, - }, - }, - "name": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name.`, - }, - "description": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Optional: true, - Description: `A descriptive comment. This field may be updated. The field may be -displayed in chooser dialogs.`, - }, - "project": { - Type: resource_binary_authorization_attestor_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBinaryAuthorizationAttestorCreate(d *resource_binary_authorization_attestor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandBinaryAuthorizationAttestorName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_binary_authorization_attestor_reflect.ValueOf(nameProp)) && (ok || !resource_binary_authorization_attestor_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandBinaryAuthorizationAttestorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_binary_authorization_attestor_reflect.ValueOf(descriptionProp)) && (ok || !resource_binary_authorization_attestor_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - userOwnedGrafeasNoteProp, err := expandBinaryAuthorizationAttestorAttestationAuthorityNote(d.Get("attestation_authority_note"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority_note"); !isEmptyValue(resource_binary_authorization_attestor_reflect.ValueOf(userOwnedGrafeasNoteProp)) && (ok || !resource_binary_authorization_attestor_reflect.DeepEqual(v, userOwnedGrafeasNoteProp)) { - obj["userOwnedGrafeasNote"] = userOwnedGrafeasNoteProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors?attestorId={{name}}") - if err != nil { - return err - } - - resource_binary_authorization_attestor_log.Printf("[DEBUG] Creating new Attestor: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_binary_authorization_attestor_schema.TimeoutCreate)) - if err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error creating Attestor: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") - if err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_binary_authorization_attestor_log.Printf("[DEBUG] Finished creating Attestor %q: %#v", d.Id(), res) - - return resourceBinaryAuthorizationAttestorRead(d, meta) -} - -func resourceBinaryAuthorizationAttestorRead(d *resource_binary_authorization_attestor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_binary_authorization_attestor_fmt.Sprintf("BinaryAuthorizationAttestor %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error reading Attestor: %s", err) - } - - if err := d.Set("name", flattenBinaryAuthorizationAttestorName(res["name"], d, config)); err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error reading Attestor: %s", err) - } - if err := d.Set("description", flattenBinaryAuthorizationAttestorDescription(res["description"], d, config)); err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error reading Attestor: %s", err) - } - if err := d.Set("attestation_authority_note", flattenBinaryAuthorizationAttestorAttestationAuthorityNote(res["userOwnedGrafeasNote"], d, config)); err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error reading Attestor: %s", err) - } - - return nil -} - -func resourceBinaryAuthorizationAttestorUpdate(d *resource_binary_authorization_attestor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandBinaryAuthorizationAttestorName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_binary_authorization_attestor_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_attestor_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandBinaryAuthorizationAttestorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_binary_authorization_attestor_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_attestor_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - userOwnedGrafeasNoteProp, err := expandBinaryAuthorizationAttestorAttestationAuthorityNote(d.Get("attestation_authority_note"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority_note"); !isEmptyValue(resource_binary_authorization_attestor_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_attestor_reflect.DeepEqual(v, userOwnedGrafeasNoteProp)) { - obj["userOwnedGrafeasNote"] = userOwnedGrafeasNoteProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") - if err != nil { - return err - } - - resource_binary_authorization_attestor_log.Printf("[DEBUG] Updating Attestor %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_binary_authorization_attestor_schema.TimeoutUpdate)) - - if err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error updating Attestor %q: %s", d.Id(), err) - } else { - resource_binary_authorization_attestor_log.Printf("[DEBUG] Finished updating Attestor %q: %#v", d.Id(), res) - } - - return resourceBinaryAuthorizationAttestorRead(d, meta) -} - -func resourceBinaryAuthorizationAttestorDelete(d *resource_binary_authorization_attestor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_attestor_fmt.Errorf("Error fetching project for Attestor: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/attestors/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_binary_authorization_attestor_log.Printf("[DEBUG] Deleting Attestor %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_binary_authorization_attestor_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Attestor") - } - - resource_binary_authorization_attestor_log.Printf("[DEBUG] Finished deleting Attestor %q: %#v", d.Id(), res) - return nil -} - -func resourceBinaryAuthorizationAttestorImport(d *resource_binary_authorization_attestor_schema.ResourceData, meta interface{}) ([]*resource_binary_authorization_attestor_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/attestors/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") - if err != nil { - return nil, resource_binary_authorization_attestor_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_binary_authorization_attestor_schema.ResourceData{d}, nil -} - -func flattenBinaryAuthorizationAttestorName(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenBinaryAuthorizationAttestorDescription(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNote(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["note_reference"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(original["noteReference"], d, config) - transformed["public_keys"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(original["publicKeys"], d, config) - transformed["delegation_service_account_email"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(original["delegationServiceAccountEmail"], d, config) - return []interface{}{transformed} -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "comment": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(original["comment"], d, config), - "id": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(original["id"], d, config), - "ascii_armored_pgp_public_key": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(original["asciiArmoredPgpPublicKey"], d, config), - "pkix_public_key": flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(original["pkixPublicKey"], d, config), - }) - } - return transformed -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["public_key_pem"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(original["publicKeyPem"], d, config) - transformed["signature_algorithm"] = - flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(original["signatureAlgorithm"], d, config) - return []interface{}{transformed} -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(v interface{}, d *resource_binary_authorization_attestor_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBinaryAuthorizationAttestorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/attestors/{{name}}") -} - -func expandBinaryAuthorizationAttestorDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNote(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNoteReference, err := expandBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(original["note_reference"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedNoteReference); val.IsValid() && !isEmptyValue(val) { - transformed["noteReference"] = transformedNoteReference - } - - transformedPublicKeys, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(original["public_keys"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedPublicKeys); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeys"] = transformedPublicKeys - } - - transformedDelegationServiceAccountEmail, err := expandBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(original["delegation_service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedDelegationServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["delegationServiceAccountEmail"] = transformedDelegationServiceAccountEmail - } - - return transformed, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNoteNoteReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := resource_binary_authorization_attestor_regexp.MustCompile("projects/(.+)/notes/(.+)") - if r.MatchString(v.(string)) { - return v.(string), nil - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return resource_binary_authorization_attestor_fmt.Sprintf("projects/%s/notes/%s", project, v.(string)), nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeys(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedComment, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(original["comment"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedComment); val.IsValid() && !isEmptyValue(val) { - transformed["comment"] = transformedComment - } - - transformedId, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedAsciiArmoredPgpPublicKey, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(original["ascii_armored_pgp_public_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedAsciiArmoredPgpPublicKey); val.IsValid() && !isEmptyValue(val) { - transformed["asciiArmoredPgpPublicKey"] = transformedAsciiArmoredPgpPublicKey - } - - transformedPkixPublicKey, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(original["pkix_public_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedPkixPublicKey); val.IsValid() && !isEmptyValue(val) { - transformed["pkixPublicKey"] = transformedPkixPublicKey - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysComment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysAsciiArmoredPgpPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPublicKeyPem, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(original["public_key_pem"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedPublicKeyPem); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeyPem"] = transformedPublicKeyPem - } - - transformedSignatureAlgorithm, err := expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(original["signature_algorithm"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_attestor_reflect.ValueOf(transformedSignatureAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["signatureAlgorithm"] = transformedSignatureAlgorithm - } - - return transformed, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeyPublicKeyPem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNotePublicKeysPkixPublicKeySignatureAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationAttestorAttestationAuthorityNoteDelegationServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func defaultBinaryAuthorizationPolicy(project string) map[string]interface{} { - return map[string]interface{}{ - "name": resource_binary_authorization_policy_fmt.Sprintf("projects/%s/policy", project), - "admissionWhitelistPatterns": []interface{}{ - map[string]interface{}{ - "namePattern": "gcr.io/google_containers/*", - }, - }, - "defaultAdmissionRule": map[string]interface{}{ - "evaluationMode": "ALWAYS_ALLOW", - "enforcementMode": "ENFORCED_BLOCK_AND_AUDIT_LOG", - }, - } -} - -func resourceBinaryAuthorizationPolicy() *resource_binary_authorization_policy_schema.Resource { - return &resource_binary_authorization_policy_schema.Resource{ - Create: resourceBinaryAuthorizationPolicyCreate, - Read: resourceBinaryAuthorizationPolicyRead, - Update: resourceBinaryAuthorizationPolicyUpdate, - Delete: resourceBinaryAuthorizationPolicyDelete, - - Importer: &resource_binary_authorization_policy_schema.ResourceImporter{ - State: resourceBinaryAuthorizationPolicyImport, - }, - - Timeouts: &resource_binary_authorization_policy_schema.ResourceTimeout{ - Create: resource_binary_authorization_policy_schema.DefaultTimeout(4 * resource_binary_authorization_policy_time.Minute), - Update: resource_binary_authorization_policy_schema.DefaultTimeout(4 * resource_binary_authorization_policy_time.Minute), - Delete: resource_binary_authorization_policy_schema.DefaultTimeout(4 * resource_binary_authorization_policy_time.Minute), - }, - - Schema: map[string]*resource_binary_authorization_policy_schema.Schema{ - "default_admission_rule": { - Type: resource_binary_authorization_policy_schema.TypeList, - Required: true, - Description: `Default admission rule for a cluster without a per-cluster admission -rule.`, - MaxItems: 1, - Elem: &resource_binary_authorization_policy_schema.Resource{ - Schema: map[string]*resource_binary_authorization_policy_schema.Schema{ - "enforcement_mode": { - Type: resource_binary_authorization_policy_schema.TypeString, - Required: true, - ValidateFunc: resource_binary_authorization_policy_validation.StringInSlice([]string{"ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"}, false), - Description: `The action when a pod creation is denied by the admission rule. Possible values: ["ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"]`, - }, - "evaluation_mode": { - Type: resource_binary_authorization_policy_schema.TypeString, - Required: true, - ValidateFunc: resource_binary_authorization_policy_validation.StringInSlice([]string{"ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"}, false), - Description: `How this admission rule will be evaluated. Possible values: ["ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"]`, - }, - "require_attestations_by": { - Type: resource_binary_authorization_policy_schema.TypeSet, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The resource names of the attestors that must attest to a -container image. If the attestor is in a different project from the -policy, it should be specified in the format 'projects/*/attestors/*'. -Each attestor must exist before a policy can reference it. To add an -attestor to a policy the principal issuing the policy change -request must be able to read the attestor resource. - -Note: this field must be non-empty when the evaluation_mode field -specifies REQUIRE_ATTESTATION, otherwise it must be empty.`, - Elem: &resource_binary_authorization_policy_schema.Schema{ - Type: resource_binary_authorization_policy_schema.TypeString, - }, - Set: selfLinkNameHash, - }, - }, - }, - }, - "admission_whitelist_patterns": { - Type: resource_binary_authorization_policy_schema.TypeList, - Optional: true, - Description: `A whitelist of image patterns to exclude from admission rules. If an -image's name matches a whitelist pattern, the image's admission -requests will always be permitted regardless of your admission rules.`, - Elem: &resource_binary_authorization_policy_schema.Resource{ - Schema: map[string]*resource_binary_authorization_policy_schema.Schema{ - "name_pattern": { - Type: resource_binary_authorization_policy_schema.TypeString, - Required: true, - Description: `An image name pattern to whitelist, in the form -'registry/path/to/image'. This supports a trailing * as a -wildcard, but this is allowed only in text after the registry/ -part.`, - }, - }, - }, - }, - "cluster_admission_rules": { - Type: resource_binary_authorization_policy_schema.TypeSet, - Optional: true, - Description: `Per-cluster admission rules. An admission rule specifies either that -all container images used in a pod creation request must be attested -to by one or more attestors, that all pod creations will be allowed, -or that all pod creations will be denied. There can be at most one -admission rule per cluster spec. - - -Identifier format: '{{location}}.{{clusterId}}'. -A location is either a compute zone (e.g. 'us-central1-a') or a region -(e.g. 'us-central1').`, - Elem: &resource_binary_authorization_policy_schema.Resource{ - Schema: map[string]*resource_binary_authorization_policy_schema.Schema{ - "cluster": { - Type: resource_binary_authorization_policy_schema.TypeString, - Required: true, - }, - "enforcement_mode": { - Type: resource_binary_authorization_policy_schema.TypeString, - Required: true, - ValidateFunc: resource_binary_authorization_policy_validation.StringInSlice([]string{"ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"}, false), - Description: `The action when a pod creation is denied by the admission rule. Possible values: ["ENFORCED_BLOCK_AND_AUDIT_LOG", "DRYRUN_AUDIT_LOG_ONLY"]`, - }, - "evaluation_mode": { - Type: resource_binary_authorization_policy_schema.TypeString, - Required: true, - ValidateFunc: resource_binary_authorization_policy_validation.StringInSlice([]string{"ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"}, false), - Description: `How this admission rule will be evaluated. Possible values: ["ALWAYS_ALLOW", "REQUIRE_ATTESTATION", "ALWAYS_DENY"]`, - }, - "require_attestations_by": { - Type: resource_binary_authorization_policy_schema.TypeSet, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The resource names of the attestors that must attest to a -container image. If the attestor is in a different project from the -policy, it should be specified in the format 'projects/*/attestors/*'. -Each attestor must exist before a policy can reference it. To add an -attestor to a policy the principal issuing the policy change -request must be able to read the attestor resource. - -Note: this field must be non-empty when the evaluation_mode field -specifies REQUIRE_ATTESTATION, otherwise it must be empty.`, - Elem: &resource_binary_authorization_policy_schema.Schema{ - Type: resource_binary_authorization_policy_schema.TypeString, - }, - Set: selfLinkNameHash, - }, - }, - }, - Set: func(v interface{}) int { - - raw := v.(map[string]interface{}) - - copy := make((map[string]interface{})) - for key, value := range raw { - copy[key] = value - } - at := copy["require_attestations_by"].(*resource_binary_authorization_policy_schema.Set) - if at != nil { - t := convertAndMapStringArr(at.List(), GetResourceNameFromSelfLink) - copy["require_attestations_by"] = resource_binary_authorization_policy_schema.NewSet(selfLinkNameHash, convertStringArrToInterface(t)) - } - var buf resource_binary_authorization_policy_bytes.Buffer - resource_binary_authorization_policy_schema.SerializeResourceForHash(&buf, copy, resourceBinaryAuthorizationPolicy().Schema["cluster_admission_rules"].Elem.(*resource_binary_authorization_policy_schema.Resource)) - return hashcode(buf.String()) - }, - }, - "description": { - Type: resource_binary_authorization_policy_schema.TypeString, - Optional: true, - Description: `A descriptive comment.`, - }, - "global_policy_evaluation_mode": { - Type: resource_binary_authorization_policy_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_binary_authorization_policy_validation.StringInSlice([]string{"ENABLE", "DISABLE", ""}, false), - Description: `Controls the evaluation of a Google-maintained global admission policy -for common system-level images. Images not covered by the global -policy will be subject to the project admission policy. Possible values: ["ENABLE", "DISABLE"]`, - }, - "project": { - Type: resource_binary_authorization_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBinaryAuthorizationPolicyCreate(d *resource_binary_authorization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(descriptionProp)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - globalPolicyEvaluationModeProp, err := expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(d.Get("global_policy_evaluation_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("global_policy_evaluation_mode"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(globalPolicyEvaluationModeProp)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, globalPolicyEvaluationModeProp)) { - obj["globalPolicyEvaluationMode"] = globalPolicyEvaluationModeProp - } - admissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get("admission_whitelist_patterns"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admission_whitelist_patterns"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(admissionWhitelistPatternsProp)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, admissionWhitelistPatternsProp)) { - obj["admissionWhitelistPatterns"] = admissionWhitelistPatternsProp - } - clusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get("cluster_admission_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cluster_admission_rules"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(clusterAdmissionRulesProp)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, clusterAdmissionRulesProp)) { - obj["clusterAdmissionRules"] = clusterAdmissionRulesProp - } - defaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get("default_admission_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_admission_rule"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(defaultAdmissionRuleProp)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, defaultAdmissionRuleProp)) { - obj["defaultAdmissionRule"] = defaultAdmissionRuleProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - resource_binary_authorization_policy_log.Printf("[DEBUG] Creating new Policy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_binary_authorization_policy_schema.TimeoutCreate)) - if err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error creating Policy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}") - if err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_binary_authorization_policy_log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) - - return resourceBinaryAuthorizationPolicyRead(d, meta) -} - -func resourceBinaryAuthorizationPolicyRead(d *resource_binary_authorization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_binary_authorization_policy_fmt.Sprintf("BinaryAuthorizationPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error reading Policy: %s", err) - } - - if err := d.Set("description", flattenBinaryAuthorizationPolicyDescription(res["description"], d, config)); err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("global_policy_evaluation_mode", flattenBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(res["globalPolicyEvaluationMode"], d, config)); err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("admission_whitelist_patterns", flattenBinaryAuthorizationPolicyAdmissionWhitelistPatterns(res["admissionWhitelistPatterns"], d, config)); err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("cluster_admission_rules", flattenBinaryAuthorizationPolicyClusterAdmissionRules(res["clusterAdmissionRules"], d, config)); err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("default_admission_rule", flattenBinaryAuthorizationPolicyDefaultAdmissionRule(res["defaultAdmissionRule"], d, config)); err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error reading Policy: %s", err) - } - - return nil -} - -func resourceBinaryAuthorizationPolicyUpdate(d *resource_binary_authorization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - globalPolicyEvaluationModeProp, err := expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(d.Get("global_policy_evaluation_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("global_policy_evaluation_mode"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, globalPolicyEvaluationModeProp)) { - obj["globalPolicyEvaluationMode"] = globalPolicyEvaluationModeProp - } - admissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get("admission_whitelist_patterns"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admission_whitelist_patterns"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, admissionWhitelistPatternsProp)) { - obj["admissionWhitelistPatterns"] = admissionWhitelistPatternsProp - } - clusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get("cluster_admission_rules"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cluster_admission_rules"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, clusterAdmissionRulesProp)) { - obj["clusterAdmissionRules"] = clusterAdmissionRulesProp - } - defaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get("default_admission_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_admission_rule"); !isEmptyValue(resource_binary_authorization_policy_reflect.ValueOf(v)) && (ok || !resource_binary_authorization_policy_reflect.DeepEqual(v, defaultAdmissionRuleProp)) { - obj["defaultAdmissionRule"] = defaultAdmissionRuleProp - } - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - resource_binary_authorization_policy_log.Printf("[DEBUG] Updating Policy %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_binary_authorization_policy_schema.TimeoutUpdate)) - - if err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) - } else { - resource_binary_authorization_policy_log.Printf("[DEBUG] Finished updating Policy %q: %#v", d.Id(), res) - } - - return resourceBinaryAuthorizationPolicyRead(d, meta) -} - -func resourceBinaryAuthorizationPolicyDelete(d *resource_binary_authorization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_binary_authorization_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{BinaryAuthorizationBasePath}}projects/{{project}}/policy") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = defaultBinaryAuthorizationPolicy(d.Get("project").(string)) - resource_binary_authorization_policy_log.Printf("[DEBUG] Deleting Policy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_binary_authorization_policy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Policy") - } - - resource_binary_authorization_policy_log.Printf("[DEBUG] Finished deleting Policy %q: %#v", d.Id(), res) - return nil -} - -func resourceBinaryAuthorizationPolicyImport(d *resource_binary_authorization_policy_schema.ResourceData, meta interface{}) ([]*resource_binary_authorization_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}") - if err != nil { - return nil, resource_binary_authorization_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_binary_authorization_policy_schema.ResourceData{d}, nil -} - -func flattenBinaryAuthorizationPolicyDescription(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name_pattern": flattenBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original["namePattern"], d, config), - }) - } - return transformed -} - -func flattenBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "cluster": k, - "evaluation_mode": flattenBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original["evaluationMode"], d, config), - "require_attestations_by": flattenBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original["requireAttestationsBy"], d, config), - "enforcement_mode": flattenBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original["enforcementMode"], d, config), - }) - } - return transformed -} - -func flattenBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_binary_authorization_policy_schema.NewSet(selfLinkNameHash, v.([]interface{})) -} - -func flattenBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["evaluation_mode"] = - flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original["evaluationMode"], d, config) - transformed["require_attestations_by"] = - flattenBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original["requireAttestationsBy"], d, config) - transformed["enforcement_mode"] = - flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original["enforcementMode"], d, config) - return []interface{}{transformed} -} - -func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_binary_authorization_policy_schema.NewSet(selfLinkNameHash, v.([]interface{})) -} - -func flattenBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d *resource_binary_authorization_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandBinaryAuthorizationPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNamePattern, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original["name_pattern"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_policy_reflect.ValueOf(transformedNamePattern); val.IsValid() && !isEmptyValue(val) { - transformed["namePattern"] = transformedNamePattern - } - - req = append(req, transformed) - } - return req, nil -} - -func expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_binary_authorization_policy_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEvaluationMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original["evaluation_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_policy_reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !isEmptyValue(val) { - transformed["evaluationMode"] = transformedEvaluationMode - } - - transformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original["require_attestations_by"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_policy_reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !isEmptyValue(val) { - transformed["requireAttestationsBy"] = transformedRequireAttestationsBy - } - - transformedEnforcementMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original["enforcement_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_policy_reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !isEmptyValue(val) { - transformed["enforcementMode"] = transformedEnforcementMode - } - - transformedCluster, err := expandString(original["cluster"], d, config) - if err != nil { - return nil, err - } - m[transformedCluster] = transformed - } - return m, nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := resource_binary_authorization_policy_regexp.MustCompile("projects/(.+)/attestors/(.+)") - - var project string - var err error - for _, s := range v.(*resource_binary_authorization_policy_schema.Set).List() { - if !r.MatchString(s.(string)) { - project, err = getProject(d, config) - if err != nil { - return []interface{}{}, err - } - break - } - } - - return convertAndMapStringArr(v.(*resource_binary_authorization_policy_schema.Set).List(), func(s string) string { - if r.MatchString(s) { - return s - } - - return resource_binary_authorization_policy_fmt.Sprintf("projects/%s/attestors/%s", project, s) - }), nil -} - -func expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEvaluationMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original["evaluation_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_policy_reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !isEmptyValue(val) { - transformed["evaluationMode"] = transformedEvaluationMode - } - - transformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original["require_attestations_by"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_policy_reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !isEmptyValue(val) { - transformed["requireAttestationsBy"] = transformedRequireAttestationsBy - } - - transformedEnforcementMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original["enforcement_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_binary_authorization_policy_reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !isEmptyValue(val) { - transformed["enforcementMode"] = transformedEnforcementMode - } - - return transformed, nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := resource_binary_authorization_policy_regexp.MustCompile("projects/(.+)/attestors/(.+)") - - var project string - var err error - for _, s := range v.(*resource_binary_authorization_policy_schema.Set).List() { - if !r.MatchString(s.(string)) { - project, err = getProject(d, config) - if err != nil { - return []interface{}{}, err - } - break - } - } - - return convertAndMapStringArr(v.(*resource_binary_authorization_policy_schema.Set).List(), func(s string) string { - if r.MatchString(s) { - return s - } - - return resource_binary_authorization_policy_fmt.Sprintf("projects/%s/attestors/%s", project, s) - }), nil -} - -func expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudAssetFolderFeed() *resource_cloud_asset_folder_feed_schema.Resource { - return &resource_cloud_asset_folder_feed_schema.Resource{ - Create: resourceCloudAssetFolderFeedCreate, - Read: resourceCloudAssetFolderFeedRead, - Update: resourceCloudAssetFolderFeedUpdate, - Delete: resourceCloudAssetFolderFeedDelete, - - Importer: &resource_cloud_asset_folder_feed_schema.ResourceImporter{ - State: resourceCloudAssetFolderFeedImport, - }, - - Timeouts: &resource_cloud_asset_folder_feed_schema.ResourceTimeout{ - Create: resource_cloud_asset_folder_feed_schema.DefaultTimeout(4 * resource_cloud_asset_folder_feed_time.Minute), - Update: resource_cloud_asset_folder_feed_schema.DefaultTimeout(4 * resource_cloud_asset_folder_feed_time.Minute), - Delete: resource_cloud_asset_folder_feed_schema.DefaultTimeout(4 * resource_cloud_asset_folder_feed_time.Minute), - }, - - Schema: map[string]*resource_cloud_asset_folder_feed_schema.Schema{ - "billing_project": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project whose identity will be used when sending messages to the -destination pubsub topic. It also specifies the project for API -enablement check, quota, and billing.`, - }, - "feed_id": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Required: true, - ForceNew: true, - Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, - }, - "feed_output_config": { - Type: resource_cloud_asset_folder_feed_schema.TypeList, - Required: true, - Description: `Output configuration for asset feed destination.`, - MaxItems: 1, - Elem: &resource_cloud_asset_folder_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_folder_feed_schema.Schema{ - "pubsub_destination": { - Type: resource_cloud_asset_folder_feed_schema.TypeList, - Required: true, - Description: `Destination on Cloud Pubsub.`, - MaxItems: 1, - Elem: &resource_cloud_asset_folder_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_folder_feed_schema.Schema{ - "topic": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Required: true, - Description: `Destination on Cloud Pubsub topic.`, - }, - }, - }, - }, - }, - }, - }, - "folder": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The folder this feed should be created in.`, - }, - "asset_names": { - Type: resource_cloud_asset_folder_feed_schema.TypeList, - Optional: true, - Description: `A list of the full names of the assets to receive updates. You must specify either or both of -assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are -exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. -See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, - Elem: &resource_cloud_asset_folder_feed_schema.Schema{ - Type: resource_cloud_asset_folder_feed_schema.TypeString, - }, - }, - "asset_types": { - Type: resource_cloud_asset_folder_feed_schema.TypeList, - Optional: true, - Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames -and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to -the feed. For example: "compute.googleapis.com/Disk" -See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all -supported asset types.`, - Elem: &resource_cloud_asset_folder_feed_schema.Schema{ - Type: resource_cloud_asset_folder_feed_schema.TypeString, - }, - }, - "condition": { - Type: resource_cloud_asset_folder_feed_schema.TypeList, - Optional: true, - Description: `A condition which determines whether an asset update should be published. If specified, an asset -will be returned only when the expression evaluates to true. When set, expression field -must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with -expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of -condition are optional.`, - MaxItems: 1, - Elem: &resource_cloud_asset_folder_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_folder_feed_schema.Schema{ - "expression": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, -e.g. when hovered over it in a UI.`, - }, - "location": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file -name and a position in the file.`, - }, - "title": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. -This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - "content_type": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloud_asset_folder_feed_validation.StringInSlice([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY", ""}, false), - Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY"]`, - }, - "folder_id": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Computed: true, - Description: `The ID of the folder where this feed has been created. Both [FOLDER_NUMBER] -and folders/[FOLDER_NUMBER] are accepted.`, - }, - "name": { - Type: resource_cloud_asset_folder_feed_schema.TypeString, - Computed: true, - Description: `The format will be folders/{folder_number}/feeds/{client-assigned_feed_identifier}.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudAssetFolderFeedCreate(d *resource_cloud_asset_folder_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetFolderFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(assetNamesProp)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetFolderFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(assetTypesProp)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetFolderFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(contentTypeProp)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetFolderFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(feedOutputConfigProp)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetFolderFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(conditionProp)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetFolderFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}folders/{{folder_id}}/feeds?feedId={{feed_id}}") - if err != nil { - return err - } - - resource_cloud_asset_folder_feed_log.Printf("[DEBUG] Creating new FolderFeed: %#v", obj) - billingProject := "" - - if parts := resource_cloud_asset_folder_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - origUserProjectOverride := config.UserProjectOverride - config.UserProjectOverride = true - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_folder_feed_schema.TimeoutCreate)) - if err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error creating FolderFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetFolderFeedName(res["name"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - config.UserProjectOverride = origUserProjectOverride - - resource_cloud_asset_folder_feed_log.Printf("[DEBUG] Finished creating FolderFeed %q: %#v", d.Id(), res) - - return resourceCloudAssetFolderFeedRead(d, meta) -} - -func resourceCloudAssetFolderFeedRead(d *resource_cloud_asset_folder_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := resource_cloud_asset_folder_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_asset_folder_feed_fmt.Sprintf("CloudAssetFolderFeed %q", d.Id())) - } - - if err := d.Set("folder_id", flattenCloudAssetFolderFeedFolderId(res["folder_id"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetFolderFeedName(res["name"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("asset_names", flattenCloudAssetFolderFeedAssetNames(res["assetNames"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("asset_types", flattenCloudAssetFolderFeedAssetTypes(res["assetTypes"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("content_type", flattenCloudAssetFolderFeedContentType(res["contentType"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("feed_output_config", flattenCloudAssetFolderFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error reading FolderFeed: %s", err) - } - if err := d.Set("condition", flattenCloudAssetFolderFeedCondition(res["condition"], d, config)); err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error reading FolderFeed: %s", err) - } - - return nil -} - -func resourceCloudAssetFolderFeedUpdate(d *resource_cloud_asset_folder_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetFolderFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetFolderFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetFolderFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetFolderFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetFolderFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(resource_cloud_asset_folder_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_folder_feed_reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetFolderFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - resource_cloud_asset_folder_feed_log.Printf("[DEBUG] Updating FolderFeed %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("asset_names") { - updateMask = append(updateMask, "assetNames") - } - - if d.HasChange("asset_types") { - updateMask = append(updateMask, "assetTypes") - } - - if d.HasChange("content_type") { - updateMask = append(updateMask, "contentType") - } - - if d.HasChange("feed_output_config") { - updateMask = append(updateMask, "feedOutputConfig") - } - - if d.HasChange("condition") { - updateMask = append(updateMask, "condition") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloud_asset_folder_feed_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if parts := resource_cloud_asset_folder_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_folder_feed_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloud_asset_folder_feed_fmt.Errorf("Error updating FolderFeed %q: %s", d.Id(), err) - } else { - resource_cloud_asset_folder_feed_log.Printf("[DEBUG] Finished updating FolderFeed %q: %#v", d.Id(), res) - } - - return resourceCloudAssetFolderFeedRead(d, meta) -} - -func resourceCloudAssetFolderFeedDelete(d *resource_cloud_asset_folder_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if parts := resource_cloud_asset_folder_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - resource_cloud_asset_folder_feed_log.Printf("[DEBUG] Deleting FolderFeed %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_folder_feed_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "FolderFeed") - } - - resource_cloud_asset_folder_feed_log.Printf("[DEBUG] Finished deleting FolderFeed %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudAssetFolderFeedImport(d *resource_cloud_asset_folder_feed_schema.ResourceData, meta interface{}) ([]*resource_cloud_asset_folder_feed_schema.ResourceData, error) { - if err := d.Set("name", d.Id()); err != nil { - return nil, err - } - return []*resource_cloud_asset_folder_feed_schema.ResourceData{d}, nil -} - -func flattenCloudAssetFolderFeedFolderId(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedName(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedAssetNames(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedAssetTypes(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedContentType(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedFeedOutputConfig(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_destination"] = - flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestination(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic"] = - flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedCondition(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenCloudAssetFolderFeedConditionExpression(original["expression"], d, config) - transformed["title"] = - flattenCloudAssetFolderFeedConditionTitle(original["title"], d, config) - transformed["description"] = - flattenCloudAssetFolderFeedConditionDescription(original["description"], d, config) - transformed["location"] = - flattenCloudAssetFolderFeedConditionLocation(original["location"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetFolderFeedConditionExpression(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedConditionTitle(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedConditionDescription(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetFolderFeedConditionLocation(v interface{}, d *resource_cloud_asset_folder_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudAssetFolderFeedAssetNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedAssetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedFeedOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubDestination, err := expandCloudAssetFolderFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_folder_feed_reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubDestination"] = transformedPubsubDestination - } - - return transformed, nil -} - -func expandCloudAssetFolderFeedFeedOutputConfigPubsubDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopic, err := expandCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_folder_feed_reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandCloudAssetFolderFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandCloudAssetFolderFeedConditionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_folder_feed_reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandCloudAssetFolderFeedConditionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_folder_feed_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandCloudAssetFolderFeedConditionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_folder_feed_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandCloudAssetFolderFeedConditionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_folder_feed_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandCloudAssetFolderFeedConditionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedConditionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedConditionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetFolderFeedConditionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudAssetFolderFeedEncoder(d *resource_cloud_asset_folder_feed_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if folder, ok := d.GetOkExists("folder"); ok { - if err := d.Set("folder_id", resource_cloud_asset_folder_feed_strings.TrimPrefix(folder.(string), "folders/")); err != nil { - return nil, resource_cloud_asset_folder_feed_fmt.Errorf("Error setting folder_id: %s", err) - } - } - - newObj := make(map[string]interface{}) - newObj["feed"] = obj - return newObj, nil -} - -func resourceCloudAssetOrganizationFeed() *resource_cloud_asset_organization_feed_schema.Resource { - return &resource_cloud_asset_organization_feed_schema.Resource{ - Create: resourceCloudAssetOrganizationFeedCreate, - Read: resourceCloudAssetOrganizationFeedRead, - Update: resourceCloudAssetOrganizationFeedUpdate, - Delete: resourceCloudAssetOrganizationFeedDelete, - - Importer: &resource_cloud_asset_organization_feed_schema.ResourceImporter{ - State: resourceCloudAssetOrganizationFeedImport, - }, - - Timeouts: &resource_cloud_asset_organization_feed_schema.ResourceTimeout{ - Create: resource_cloud_asset_organization_feed_schema.DefaultTimeout(4 * resource_cloud_asset_organization_feed_time.Minute), - Update: resource_cloud_asset_organization_feed_schema.DefaultTimeout(4 * resource_cloud_asset_organization_feed_time.Minute), - Delete: resource_cloud_asset_organization_feed_schema.DefaultTimeout(4 * resource_cloud_asset_organization_feed_time.Minute), - }, - - Schema: map[string]*resource_cloud_asset_organization_feed_schema.Schema{ - "billing_project": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project whose identity will be used when sending messages to the -destination pubsub topic. It also specifies the project for API -enablement check, quota, and billing.`, - }, - "feed_id": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Required: true, - ForceNew: true, - Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, - }, - "feed_output_config": { - Type: resource_cloud_asset_organization_feed_schema.TypeList, - Required: true, - Description: `Output configuration for asset feed destination.`, - MaxItems: 1, - Elem: &resource_cloud_asset_organization_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_organization_feed_schema.Schema{ - "pubsub_destination": { - Type: resource_cloud_asset_organization_feed_schema.TypeList, - Required: true, - Description: `Destination on Cloud Pubsub.`, - MaxItems: 1, - Elem: &resource_cloud_asset_organization_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_organization_feed_schema.Schema{ - "topic": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Required: true, - Description: `Destination on Cloud Pubsub topic.`, - }, - }, - }, - }, - }, - }, - }, - "org_id": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization this feed should be created in.`, - }, - "asset_names": { - Type: resource_cloud_asset_organization_feed_schema.TypeList, - Optional: true, - Description: `A list of the full names of the assets to receive updates. You must specify either or both of -assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are -exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. -See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, - Elem: &resource_cloud_asset_organization_feed_schema.Schema{ - Type: resource_cloud_asset_organization_feed_schema.TypeString, - }, - }, - "asset_types": { - Type: resource_cloud_asset_organization_feed_schema.TypeList, - Optional: true, - Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames -and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to -the feed. For example: "compute.googleapis.com/Disk" -See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all -supported asset types.`, - Elem: &resource_cloud_asset_organization_feed_schema.Schema{ - Type: resource_cloud_asset_organization_feed_schema.TypeString, - }, - }, - "condition": { - Type: resource_cloud_asset_organization_feed_schema.TypeList, - Optional: true, - Description: `A condition which determines whether an asset update should be published. If specified, an asset -will be returned only when the expression evaluates to true. When set, expression field -must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with -expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of -condition are optional.`, - MaxItems: 1, - Elem: &resource_cloud_asset_organization_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_organization_feed_schema.Schema{ - "expression": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, -e.g. when hovered over it in a UI.`, - }, - "location": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file -name and a position in the file.`, - }, - "title": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. -This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - "content_type": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloud_asset_organization_feed_validation.StringInSlice([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY", ""}, false), - Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY"]`, - }, - "name": { - Type: resource_cloud_asset_organization_feed_schema.TypeString, - Computed: true, - Description: `The format will be organizations/{organization_number}/feeds/{client-assigned_feed_identifier}.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudAssetOrganizationFeedCreate(d *resource_cloud_asset_organization_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetOrganizationFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(assetNamesProp)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetOrganizationFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(assetTypesProp)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetOrganizationFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(contentTypeProp)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetOrganizationFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(feedOutputConfigProp)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetOrganizationFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(conditionProp)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetOrganizationFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}organizations/{{org_id}}/feeds?feedId={{feed_id}}") - if err != nil { - return err - } - - resource_cloud_asset_organization_feed_log.Printf("[DEBUG] Creating new OrganizationFeed: %#v", obj) - billingProject := "" - - if parts := resource_cloud_asset_organization_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - origUserProjectOverride := config.UserProjectOverride - config.UserProjectOverride = true - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_organization_feed_schema.TimeoutCreate)) - if err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error creating OrganizationFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetOrganizationFeedName(res["name"], d, config)); err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - config.UserProjectOverride = origUserProjectOverride - - resource_cloud_asset_organization_feed_log.Printf("[DEBUG] Finished creating OrganizationFeed %q: %#v", d.Id(), res) - - return resourceCloudAssetOrganizationFeedRead(d, meta) -} - -func resourceCloudAssetOrganizationFeedRead(d *resource_cloud_asset_organization_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := resource_cloud_asset_organization_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_asset_organization_feed_fmt.Sprintf("CloudAssetOrganizationFeed %q", d.Id())) - } - - if err := d.Set("name", flattenCloudAssetOrganizationFeedName(res["name"], d, config)); err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("asset_names", flattenCloudAssetOrganizationFeedAssetNames(res["assetNames"], d, config)); err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("asset_types", flattenCloudAssetOrganizationFeedAssetTypes(res["assetTypes"], d, config)); err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("content_type", flattenCloudAssetOrganizationFeedContentType(res["contentType"], d, config)); err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("feed_output_config", flattenCloudAssetOrganizationFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - if err := d.Set("condition", flattenCloudAssetOrganizationFeedCondition(res["condition"], d, config)); err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error reading OrganizationFeed: %s", err) - } - - return nil -} - -func resourceCloudAssetOrganizationFeedUpdate(d *resource_cloud_asset_organization_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetOrganizationFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetOrganizationFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetOrganizationFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetOrganizationFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetOrganizationFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(resource_cloud_asset_organization_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_organization_feed_reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetOrganizationFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - resource_cloud_asset_organization_feed_log.Printf("[DEBUG] Updating OrganizationFeed %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("asset_names") { - updateMask = append(updateMask, "assetNames") - } - - if d.HasChange("asset_types") { - updateMask = append(updateMask, "assetTypes") - } - - if d.HasChange("content_type") { - updateMask = append(updateMask, "contentType") - } - - if d.HasChange("feed_output_config") { - updateMask = append(updateMask, "feedOutputConfig") - } - - if d.HasChange("condition") { - updateMask = append(updateMask, "condition") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloud_asset_organization_feed_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if parts := resource_cloud_asset_organization_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_organization_feed_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloud_asset_organization_feed_fmt.Errorf("Error updating OrganizationFeed %q: %s", d.Id(), err) - } else { - resource_cloud_asset_organization_feed_log.Printf("[DEBUG] Finished updating OrganizationFeed %q: %#v", d.Id(), res) - } - - return resourceCloudAssetOrganizationFeedRead(d, meta) -} - -func resourceCloudAssetOrganizationFeedDelete(d *resource_cloud_asset_organization_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if parts := resource_cloud_asset_organization_feed_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - resource_cloud_asset_organization_feed_log.Printf("[DEBUG] Deleting OrganizationFeed %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_organization_feed_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "OrganizationFeed") - } - - resource_cloud_asset_organization_feed_log.Printf("[DEBUG] Finished deleting OrganizationFeed %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudAssetOrganizationFeedImport(d *resource_cloud_asset_organization_feed_schema.ResourceData, meta interface{}) ([]*resource_cloud_asset_organization_feed_schema.ResourceData, error) { - if err := d.Set("name", d.Id()); err != nil { - return nil, err - } - return []*resource_cloud_asset_organization_feed_schema.ResourceData{d}, nil -} - -func flattenCloudAssetOrganizationFeedName(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedAssetNames(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedAssetTypes(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedContentType(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedFeedOutputConfig(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_destination"] = - flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic"] = - flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedCondition(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenCloudAssetOrganizationFeedConditionExpression(original["expression"], d, config) - transformed["title"] = - flattenCloudAssetOrganizationFeedConditionTitle(original["title"], d, config) - transformed["description"] = - flattenCloudAssetOrganizationFeedConditionDescription(original["description"], d, config) - transformed["location"] = - flattenCloudAssetOrganizationFeedConditionLocation(original["location"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetOrganizationFeedConditionExpression(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedConditionTitle(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedConditionDescription(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetOrganizationFeedConditionLocation(v interface{}, d *resource_cloud_asset_organization_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudAssetOrganizationFeedAssetNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedAssetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedFeedOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubDestination, err := expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_organization_feed_reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubDestination"] = transformedPubsubDestination - } - - return transformed, nil -} - -func expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopic, err := expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_organization_feed_reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandCloudAssetOrganizationFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandCloudAssetOrganizationFeedConditionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_organization_feed_reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandCloudAssetOrganizationFeedConditionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_organization_feed_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandCloudAssetOrganizationFeedConditionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_organization_feed_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandCloudAssetOrganizationFeedConditionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_organization_feed_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandCloudAssetOrganizationFeedConditionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedConditionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedConditionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetOrganizationFeedConditionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudAssetOrganizationFeedEncoder(d *resource_cloud_asset_organization_feed_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if folder, ok := d.GetOkExists("folder"); ok { - if err := d.Set("folder_id", resource_cloud_asset_organization_feed_strings.TrimPrefix(folder.(string), "folders/")); err != nil { - return nil, resource_cloud_asset_organization_feed_fmt.Errorf("Error setting folder_id: %s", err) - } - } - - newObj := make(map[string]interface{}) - newObj["feed"] = obj - return newObj, nil -} - -func resourceCloudAssetProjectFeed() *resource_cloud_asset_project_feed_schema.Resource { - return &resource_cloud_asset_project_feed_schema.Resource{ - Create: resourceCloudAssetProjectFeedCreate, - Read: resourceCloudAssetProjectFeedRead, - Update: resourceCloudAssetProjectFeedUpdate, - Delete: resourceCloudAssetProjectFeedDelete, - - Importer: &resource_cloud_asset_project_feed_schema.ResourceImporter{ - State: resourceCloudAssetProjectFeedImport, - }, - - Timeouts: &resource_cloud_asset_project_feed_schema.ResourceTimeout{ - Create: resource_cloud_asset_project_feed_schema.DefaultTimeout(4 * resource_cloud_asset_project_feed_time.Minute), - Update: resource_cloud_asset_project_feed_schema.DefaultTimeout(4 * resource_cloud_asset_project_feed_time.Minute), - Delete: resource_cloud_asset_project_feed_schema.DefaultTimeout(4 * resource_cloud_asset_project_feed_time.Minute), - }, - - Schema: map[string]*resource_cloud_asset_project_feed_schema.Schema{ - "feed_id": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Required: true, - ForceNew: true, - Description: `This is the client-assigned asset feed identifier and it needs to be unique under a specific parent.`, - }, - "feed_output_config": { - Type: resource_cloud_asset_project_feed_schema.TypeList, - Required: true, - Description: `Output configuration for asset feed destination.`, - MaxItems: 1, - Elem: &resource_cloud_asset_project_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_project_feed_schema.Schema{ - "pubsub_destination": { - Type: resource_cloud_asset_project_feed_schema.TypeList, - Required: true, - Description: `Destination on Cloud Pubsub.`, - MaxItems: 1, - Elem: &resource_cloud_asset_project_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_project_feed_schema.Schema{ - "topic": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Required: true, - Description: `Destination on Cloud Pubsub topic.`, - }, - }, - }, - }, - }, - }, - }, - "asset_names": { - Type: resource_cloud_asset_project_feed_schema.TypeList, - Optional: true, - Description: `A list of the full names of the assets to receive updates. You must specify either or both of -assetNames and assetTypes. Only asset updates matching specified assetNames and assetTypes are -exported to the feed. For example: //compute.googleapis.com/projects/my_project_123/zones/zone1/instances/instance1. -See https://cloud.google.com/apis/design/resourceNames#fullResourceName for more info.`, - Elem: &resource_cloud_asset_project_feed_schema.Schema{ - Type: resource_cloud_asset_project_feed_schema.TypeString, - }, - }, - "asset_types": { - Type: resource_cloud_asset_project_feed_schema.TypeList, - Optional: true, - Description: `A list of types of the assets to receive updates. You must specify either or both of assetNames -and assetTypes. Only asset updates matching specified assetNames and assetTypes are exported to -the feed. For example: "compute.googleapis.com/Disk" -See https://cloud.google.com/asset-inventory/docs/supported-asset-types for a list of all -supported asset types.`, - Elem: &resource_cloud_asset_project_feed_schema.Schema{ - Type: resource_cloud_asset_project_feed_schema.TypeString, - }, - }, - "billing_project": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The project whose identity will be used when sending messages to the -destination pubsub topic. It also specifies the project for API -enablement check, quota, and billing. If not specified, the resource's -project will be used.`, - }, - "condition": { - Type: resource_cloud_asset_project_feed_schema.TypeList, - Optional: true, - Description: `A condition which determines whether an asset update should be published. If specified, an asset -will be returned only when the expression evaluates to true. When set, expression field -must be a valid CEL expression on a TemporalAsset with name temporal_asset. Example: a Feed with -expression "temporal_asset.deleted == true" will only publish Asset deletions. Other fields of -condition are optional.`, - MaxItems: 1, - Elem: &resource_cloud_asset_project_feed_schema.Resource{ - Schema: map[string]*resource_cloud_asset_project_feed_schema.Schema{ - "expression": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, -e.g. when hovered over it in a UI.`, - }, - "location": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file -name and a position in the file.`, - }, - "title": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. -This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - "content_type": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloud_asset_project_feed_validation.StringInSlice([]string{"CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY", ""}, false), - Description: `Asset content type. If not specified, no content but the asset name and type will be returned. Possible values: ["CONTENT_TYPE_UNSPECIFIED", "RESOURCE", "IAM_POLICY", "ORG_POLICY", "ACCESS_POLICY"]`, - }, - "name": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Computed: true, - Description: `The format will be projects/{projectNumber}/feeds/{client-assigned_feed_identifier}.`, - }, - "project": { - Type: resource_cloud_asset_project_feed_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudAssetProjectFeedCreate(d *resource_cloud_asset_project_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetProjectFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(assetNamesProp)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetProjectFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(assetTypesProp)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetProjectFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(contentTypeProp)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetProjectFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(feedOutputConfigProp)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetProjectFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(conditionProp)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetProjectFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}projects/{{project}}/feeds?feedId={{feed_id}}") - if err != nil { - return err - } - - resource_cloud_asset_project_feed_log.Printf("[DEBUG] Creating new ProjectFeed: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - origUserProjectOverride := config.UserProjectOverride - config.UserProjectOverride = true - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_project_feed_schema.TimeoutCreate)) - if err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error creating ProjectFeed: %s", err) - } - if err := d.Set("name", flattenCloudAssetProjectFeedName(res["name"], d, config)); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - config.UserProjectOverride = origUserProjectOverride - - resource_cloud_asset_project_feed_log.Printf("[DEBUG] Finished creating ProjectFeed %q: %#v", d.Id(), res) - - return resourceCloudAssetProjectFeedRead(d, meta) -} - -func resourceCloudAssetProjectFeedRead(d *resource_cloud_asset_project_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_asset_project_feed_fmt.Sprintf("CloudAssetProjectFeed %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error reading ProjectFeed: %s", err) - } - - if err := d.Set("name", flattenCloudAssetProjectFeedName(res["name"], d, config)); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("asset_names", flattenCloudAssetProjectFeedAssetNames(res["assetNames"], d, config)); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("asset_types", flattenCloudAssetProjectFeedAssetTypes(res["assetTypes"], d, config)); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("content_type", flattenCloudAssetProjectFeedContentType(res["contentType"], d, config)); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("feed_output_config", flattenCloudAssetProjectFeedFeedOutputConfig(res["feedOutputConfig"], d, config)); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error reading ProjectFeed: %s", err) - } - if err := d.Set("condition", flattenCloudAssetProjectFeedCondition(res["condition"], d, config)); err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error reading ProjectFeed: %s", err) - } - - return nil -} - -func resourceCloudAssetProjectFeedUpdate(d *resource_cloud_asset_project_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - assetNamesProp, err := expandCloudAssetProjectFeedAssetNames(d.Get("asset_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_names"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, assetNamesProp)) { - obj["assetNames"] = assetNamesProp - } - assetTypesProp, err := expandCloudAssetProjectFeedAssetTypes(d.Get("asset_types"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("asset_types"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, assetTypesProp)) { - obj["assetTypes"] = assetTypesProp - } - contentTypeProp, err := expandCloudAssetProjectFeedContentType(d.Get("content_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_type"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, contentTypeProp)) { - obj["contentType"] = contentTypeProp - } - feedOutputConfigProp, err := expandCloudAssetProjectFeedFeedOutputConfig(d.Get("feed_output_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("feed_output_config"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, feedOutputConfigProp)) { - obj["feedOutputConfig"] = feedOutputConfigProp - } - conditionProp, err := expandCloudAssetProjectFeedCondition(d.Get("condition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("condition"); !isEmptyValue(resource_cloud_asset_project_feed_reflect.ValueOf(v)) && (ok || !resource_cloud_asset_project_feed_reflect.DeepEqual(v, conditionProp)) { - obj["condition"] = conditionProp - } - - obj, err = resourceCloudAssetProjectFeedEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - resource_cloud_asset_project_feed_log.Printf("[DEBUG] Updating ProjectFeed %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("asset_names") { - updateMask = append(updateMask, "assetNames") - } - - if d.HasChange("asset_types") { - updateMask = append(updateMask, "assetTypes") - } - - if d.HasChange("content_type") { - updateMask = append(updateMask, "contentType") - } - - if d.HasChange("feed_output_config") { - updateMask = append(updateMask, "feedOutputConfig") - } - - if d.HasChange("condition") { - updateMask = append(updateMask, "condition") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloud_asset_project_feed_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_project_feed_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error updating ProjectFeed %q: %s", d.Id(), err) - } else { - resource_cloud_asset_project_feed_log.Printf("[DEBUG] Finished updating ProjectFeed %q: %#v", d.Id(), res) - } - - return resourceCloudAssetProjectFeedRead(d, meta) -} - -func resourceCloudAssetProjectFeedDelete(d *resource_cloud_asset_project_feed_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_asset_project_feed_fmt.Errorf("Error fetching project for ProjectFeed: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudAssetBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloud_asset_project_feed_log.Printf("[DEBUG] Deleting ProjectFeed %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_asset_project_feed_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ProjectFeed") - } - - resource_cloud_asset_project_feed_log.Printf("[DEBUG] Finished deleting ProjectFeed %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudAssetProjectFeedImport(d *resource_cloud_asset_project_feed_schema.ResourceData, meta interface{}) ([]*resource_cloud_asset_project_feed_schema.ResourceData, error) { - if err := d.Set("name", d.Id()); err != nil { - return nil, err - } - return []*resource_cloud_asset_project_feed_schema.ResourceData{d}, nil -} - -func flattenCloudAssetProjectFeedName(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedAssetNames(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedAssetTypes(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedContentType(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedFeedOutputConfig(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_destination"] = - flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestination(original["pubsubDestination"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestination(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic"] = - flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedCondition(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenCloudAssetProjectFeedConditionExpression(original["expression"], d, config) - transformed["title"] = - flattenCloudAssetProjectFeedConditionTitle(original["title"], d, config) - transformed["description"] = - flattenCloudAssetProjectFeedConditionDescription(original["description"], d, config) - transformed["location"] = - flattenCloudAssetProjectFeedConditionLocation(original["location"], d, config) - return []interface{}{transformed} -} - -func flattenCloudAssetProjectFeedConditionExpression(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedConditionTitle(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedConditionDescription(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudAssetProjectFeedConditionLocation(v interface{}, d *resource_cloud_asset_project_feed_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudAssetProjectFeedAssetNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedAssetTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedFeedOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubDestination, err := expandCloudAssetProjectFeedFeedOutputConfigPubsubDestination(original["pubsub_destination"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_project_feed_reflect.ValueOf(transformedPubsubDestination); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubDestination"] = transformedPubsubDestination - } - - return transformed, nil -} - -func expandCloudAssetProjectFeedFeedOutputConfigPubsubDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopic, err := expandCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_project_feed_reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - return transformed, nil -} - -func expandCloudAssetProjectFeedFeedOutputConfigPubsubDestinationTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandCloudAssetProjectFeedConditionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_project_feed_reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandCloudAssetProjectFeedConditionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_project_feed_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandCloudAssetProjectFeedConditionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_project_feed_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandCloudAssetProjectFeedConditionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_asset_project_feed_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandCloudAssetProjectFeedConditionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedConditionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedConditionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudAssetProjectFeedConditionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudAssetProjectFeedEncoder(d *resource_cloud_asset_project_feed_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if folder, ok := d.GetOkExists("folder"); ok { - if err := d.Set("folder_id", resource_cloud_asset_project_feed_strings.TrimPrefix(folder.(string), "folders/")); err != nil { - return nil, resource_cloud_asset_project_feed_fmt.Errorf("Error setting folder_id: %s", err) - } - } - - newObj := make(map[string]interface{}) - newObj["feed"] = obj - return newObj, nil -} - -func resourceCloudIdentityGroup() *resource_cloud_identity_group_schema.Resource { - return &resource_cloud_identity_group_schema.Resource{ - Create: resourceCloudIdentityGroupCreate, - Read: resourceCloudIdentityGroupRead, - Update: resourceCloudIdentityGroupUpdate, - Delete: resourceCloudIdentityGroupDelete, - - Importer: &resource_cloud_identity_group_schema.ResourceImporter{ - State: resourceCloudIdentityGroupImport, - }, - - Timeouts: &resource_cloud_identity_group_schema.ResourceTimeout{ - Create: resource_cloud_identity_group_schema.DefaultTimeout(6 * resource_cloud_identity_group_time.Minute), - Update: resource_cloud_identity_group_schema.DefaultTimeout(4 * resource_cloud_identity_group_time.Minute), - Delete: resource_cloud_identity_group_schema.DefaultTimeout(4 * resource_cloud_identity_group_time.Minute), - }, - - Schema: map[string]*resource_cloud_identity_group_schema.Schema{ - "group_key": { - Type: resource_cloud_identity_group_schema.TypeList, - Required: true, - ForceNew: true, - Description: `EntityKey of the Group.`, - MaxItems: 1, - Elem: &resource_cloud_identity_group_schema.Resource{ - Schema: map[string]*resource_cloud_identity_group_schema.Schema{ - "id": { - Type: resource_cloud_identity_group_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the entity. - -For Google-managed entities, the id must be the email address of an existing -group or user. - -For external-identity-mapped entities, the id must be a string conforming -to the Identity Source's requirements. - -Must be unique within a namespace.`, - }, - "namespace": { - Type: resource_cloud_identity_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The namespace in which the entity exists. - -If not specified, the EntityKey represents a Google-managed entity -such as a Google user or a Google Group. - -If specified, the EntityKey represents an external-identity-mapped group. -The namespace must correspond to an identity source created in Admin Console -and must be in the form of 'identitysources/{identity_source_id}'.`, - }, - }, - }, - }, - "labels": { - Type: resource_cloud_identity_group_schema.TypeMap, - Required: true, - ForceNew: true, - Description: `The labels that apply to the Group. - -Must not contain more than one entry. Must contain the entry -'cloudidentity.googleapis.com/groups.discussion_forum': '' if the Group is a Google Group or -'system/groups/external': '' if the Group is an external-identity-mapped group.`, - Elem: &resource_cloud_identity_group_schema.Schema{Type: resource_cloud_identity_group_schema.TypeString}, - }, - "parent": { - Type: resource_cloud_identity_group_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the entity under which this Group resides in the -Cloud Identity resource hierarchy. - -Must be of the form identitysources/{identity_source_id} for external-identity-mapped -groups or customers/{customer_id} for Google Groups.`, - }, - "description": { - Type: resource_cloud_identity_group_schema.TypeString, - Optional: true, - Description: `An extended description to help users determine the purpose of a Group. -Must not be longer than 4,096 characters.`, - }, - "display_name": { - Type: resource_cloud_identity_group_schema.TypeString, - Optional: true, - Description: `The display name of the Group.`, - }, - "initial_group_config": { - Type: resource_cloud_identity_group_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloud_identity_group_validation.StringInSlice([]string{"INITIAL_GROUP_CONFIG_UNSPECIFIED", "WITH_INITIAL_OWNER", "EMPTY", ""}, false), - Description: `The initial configuration options for creating a Group. - -See the -[API reference](https://cloud.google.com/identity/docs/reference/rest/v1beta1/groups/create#initialgroupconfig) -for possible values. Default value: "EMPTY" Possible values: ["INITIAL_GROUP_CONFIG_UNSPECIFIED", "WITH_INITIAL_OWNER", "EMPTY"]`, - Default: "EMPTY", - }, - "create_time": { - Type: resource_cloud_identity_group_schema.TypeString, - Computed: true, - Description: `The time when the Group was created.`, - }, - "name": { - Type: resource_cloud_identity_group_schema.TypeString, - Computed: true, - Description: `Resource name of the Group in the format: groups/{group_id}, where group_id -is the unique ID assigned to the Group.`, - }, - "update_time": { - Type: resource_cloud_identity_group_schema.TypeString, - Computed: true, - Description: `The time when the Group was last updated.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudIdentityGroupCreate(d *resource_cloud_identity_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - groupKeyProp, err := expandCloudIdentityGroupGroupKey(d.Get("group_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("group_key"); !isEmptyValue(resource_cloud_identity_group_reflect.ValueOf(groupKeyProp)) && (ok || !resource_cloud_identity_group_reflect.DeepEqual(v, groupKeyProp)) { - obj["groupKey"] = groupKeyProp - } - parentProp, err := expandCloudIdentityGroupParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_cloud_identity_group_reflect.ValueOf(parentProp)) && (ok || !resource_cloud_identity_group_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - displayNameProp, err := expandCloudIdentityGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_cloud_identity_group_reflect.ValueOf(displayNameProp)) && (ok || !resource_cloud_identity_group_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandCloudIdentityGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_cloud_identity_group_reflect.ValueOf(descriptionProp)) && (ok || !resource_cloud_identity_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandCloudIdentityGroupLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_cloud_identity_group_reflect.ValueOf(labelsProp)) && (ok || !resource_cloud_identity_group_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}groups?initialGroupConfig={{initial_group_config}}") - if err != nil { - return err - } - - resource_cloud_identity_group_log.Printf("[DEBUG] Creating new Group: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_identity_group_schema.TimeoutCreate)) - if err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error creating Group: %s", err) - } - if err := d.Set("name", flattenCloudIdentityGroupName(res["name"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_cloud_identity_group_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_cloud_identity_group_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - err = PollingWaitTime(resourceCloudIdentityGroupPollRead(d, meta), PollCheckForExistenceWith403, "Creating Group", d.Timeout(resource_cloud_identity_group_schema.TimeoutCreate), 10) - if err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error waiting to create Group: %s", err) - } - - resource_cloud_identity_group_log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res) - - return resourceCloudIdentityGroupRead(d, meta) -} - -func resourceCloudIdentityGroupPollRead(d *resource_cloud_identity_group_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceCloudIdentityGroupRead(d *resource_cloud_identity_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_identity_group_fmt.Sprintf("CloudIdentityGroup %q", d.Id())) - } - - if err := d.Set("name", flattenCloudIdentityGroupName(res["name"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("group_key", flattenCloudIdentityGroupGroupKey(res["groupKey"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("parent", flattenCloudIdentityGroupParent(res["parent"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("display_name", flattenCloudIdentityGroupDisplayName(res["displayName"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("description", flattenCloudIdentityGroupDescription(res["description"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("create_time", flattenCloudIdentityGroupCreateTime(res["createTime"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("update_time", flattenCloudIdentityGroupUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("labels", flattenCloudIdentityGroupLabels(res["labels"], d, config)); err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error reading Group: %s", err) - } - - return nil -} - -func resourceCloudIdentityGroupUpdate(d *resource_cloud_identity_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandCloudIdentityGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_cloud_identity_group_reflect.ValueOf(v)) && (ok || !resource_cloud_identity_group_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandCloudIdentityGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_cloud_identity_group_reflect.ValueOf(v)) && (ok || !resource_cloud_identity_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - resource_cloud_identity_group_log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloud_identity_group_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_identity_group_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloud_identity_group_fmt.Errorf("Error updating Group %q: %s", d.Id(), err) - } else { - resource_cloud_identity_group_log.Printf("[DEBUG] Finished updating Group %q: %#v", d.Id(), res) - } - - return resourceCloudIdentityGroupRead(d, meta) -} - -func resourceCloudIdentityGroupDelete(d *resource_cloud_identity_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloud_identity_group_log.Printf("[DEBUG] Deleting Group %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_identity_group_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Group") - } - - resource_cloud_identity_group_log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIdentityGroupImport(d *resource_cloud_identity_group_schema.ResourceData, meta interface{}) ([]*resource_cloud_identity_group_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - - if err := d.Set("name", name); err != nil { - return nil, resource_cloud_identity_group_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - return []*resource_cloud_identity_group_schema.ResourceData{d}, nil -} - -func flattenCloudIdentityGroupName(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupGroupKey(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["id"] = - flattenCloudIdentityGroupGroupKeyId(original["id"], d, config) - transformed["namespace"] = - flattenCloudIdentityGroupGroupKeyNamespace(original["namespace"], d, config) - return []interface{}{transformed} -} - -func flattenCloudIdentityGroupGroupKeyId(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupGroupKeyNamespace(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupParent(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupDisplayName(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupDescription(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupCreateTime(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupUpdateTime(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupLabels(v interface{}, d *resource_cloud_identity_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIdentityGroupGroupKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandCloudIdentityGroupGroupKeyId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_identity_group_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedNamespace, err := expandCloudIdentityGroupGroupKeyNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_identity_group_reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - return transformed, nil -} - -func expandCloudIdentityGroupGroupKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupGroupKeyNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceCloudIdentityGroupMembership() *resource_cloud_identity_group_membership_schema.Resource { - return &resource_cloud_identity_group_membership_schema.Resource{ - Create: resourceCloudIdentityGroupMembershipCreate, - Read: resourceCloudIdentityGroupMembershipRead, - Update: resourceCloudIdentityGroupMembershipUpdate, - Delete: resourceCloudIdentityGroupMembershipDelete, - - Importer: &resource_cloud_identity_group_membership_schema.ResourceImporter{ - State: resourceCloudIdentityGroupMembershipImport, - }, - - Timeouts: &resource_cloud_identity_group_membership_schema.ResourceTimeout{ - Create: resource_cloud_identity_group_membership_schema.DefaultTimeout(4 * resource_cloud_identity_group_membership_time.Minute), - Update: resource_cloud_identity_group_membership_schema.DefaultTimeout(4 * resource_cloud_identity_group_membership_time.Minute), - Delete: resource_cloud_identity_group_membership_schema.DefaultTimeout(4 * resource_cloud_identity_group_membership_time.Minute), - }, - - Schema: map[string]*resource_cloud_identity_group_membership_schema.Schema{ - "group": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Group to create this membership in.`, - }, - "roles": { - Type: resource_cloud_identity_group_membership_schema.TypeSet, - Required: true, - Description: `The MembershipRoles that apply to the Membership. -Must not contain duplicate MembershipRoles with the same name.`, - Elem: cloudidentityGroupMembershipRolesSchema(), - }, - "preferred_member_key": { - Type: resource_cloud_identity_group_membership_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `EntityKey of the member.`, - MaxItems: 1, - Elem: &resource_cloud_identity_group_membership_schema.Resource{ - Schema: map[string]*resource_cloud_identity_group_membership_schema.Schema{ - "id": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the entity. - -For Google-managed entities, the id must be the email address of an existing -group or user. - -For external-identity-mapped entities, the id must be a string conforming -to the Identity Source's requirements. - -Must be unique within a namespace.`, - }, - "namespace": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The namespace in which the entity exists. - -If not specified, the EntityKey represents a Google-managed entity -such as a Google user or a Google Group. - -If specified, the EntityKey represents an external-identity-mapped group. -The namespace must correspond to an identity source created in Admin Console -and must be in the form of 'identitysources/{identity_source_id}'.`, - }, - }, - }, - ExactlyOneOf: []string{"preferred_member_key"}, - }, - "create_time": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Computed: true, - Description: `The time when the Membership was created.`, - }, - "name": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Computed: true, - Description: `The resource name of the Membership, of the form groups/{group_id}/memberships/{membership_id}.`, - }, - "type": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Computed: true, - Description: `The type of the membership.`, - }, - "update_time": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Computed: true, - Description: `The time when the Membership was last updated.`, - }, - }, - UseJSONNumber: true, - } -} - -func cloudidentityGroupMembershipRolesSchema() *resource_cloud_identity_group_membership_schema.Resource { - return &resource_cloud_identity_group_membership_schema.Resource{ - Schema: map[string]*resource_cloud_identity_group_membership_schema.Schema{ - "name": { - Type: resource_cloud_identity_group_membership_schema.TypeString, - Required: true, - ValidateFunc: resource_cloud_identity_group_membership_validation.StringInSlice([]string{"OWNER", "MANAGER", "MEMBER"}, false), - Description: `The name of the MembershipRole. Must be one of OWNER, MANAGER, MEMBER. Possible values: ["OWNER", "MANAGER", "MEMBER"]`, - }, - }, - } -} - -func resourceCloudIdentityGroupMembershipCreate(d *resource_cloud_identity_group_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - preferredMemberKeyProp, err := expandCloudIdentityGroupMembershipPreferredMemberKey(d.Get("preferred_member_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preferred_member_key"); !isEmptyValue(resource_cloud_identity_group_membership_reflect.ValueOf(preferredMemberKeyProp)) && (ok || !resource_cloud_identity_group_membership_reflect.DeepEqual(v, preferredMemberKeyProp)) { - obj["preferredMemberKey"] = preferredMemberKeyProp - } - rolesProp, err := expandCloudIdentityGroupMembershipRoles(d.Get("roles"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("roles"); !isEmptyValue(resource_cloud_identity_group_membership_reflect.ValueOf(rolesProp)) && (ok || !resource_cloud_identity_group_membership_reflect.DeepEqual(v, rolesProp)) { - obj["roles"] = rolesProp - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{group}}/memberships") - if err != nil { - return err - } - - resource_cloud_identity_group_membership_log.Printf("[DEBUG] Creating new GroupMembership: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_identity_group_membership_schema.TimeoutCreate)) - if err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error creating GroupMembership: %s", err) - } - if err := d.Set("name", flattenCloudIdentityGroupMembershipName(res["name"], d, config)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_cloud_identity_group_membership_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_cloud_identity_group_membership_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_cloud_identity_group_membership_log.Printf("[DEBUG] Finished creating GroupMembership %q: %#v", d.Id(), res) - - return resourceCloudIdentityGroupMembershipRead(d, meta) -} - -func resourceCloudIdentityGroupMembershipRead(d *resource_cloud_identity_group_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_identity_group_membership_fmt.Sprintf("CloudIdentityGroupMembership %q", d.Id())) - } - - if err := d.Set("name", flattenCloudIdentityGroupMembershipName(res["name"], d, config)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("preferred_member_key", flattenCloudIdentityGroupMembershipPreferredMemberKey(res["preferredMemberKey"], d, config)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("create_time", flattenCloudIdentityGroupMembershipCreateTime(res["createTime"], d, config)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("update_time", flattenCloudIdentityGroupMembershipUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("roles", flattenCloudIdentityGroupMembershipRoles(res["roles"], d, config)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error reading GroupMembership: %s", err) - } - if err := d.Set("type", flattenCloudIdentityGroupMembershipType(res["type"], d, config)); err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error reading GroupMembership: %s", err) - } - - return nil -} - -func resourceCloudIdentityGroupMembershipUpdate(d *resource_cloud_identity_group_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - d.Partial(true) - - if d.HasChange("roles") { - obj := make(map[string]interface{}) - - rolesProp, err := expandCloudIdentityGroupMembershipRoles(d.Get("roles"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("roles"); !isEmptyValue(resource_cloud_identity_group_membership_reflect.ValueOf(v)) && (ok || !resource_cloud_identity_group_membership_reflect.DeepEqual(v, rolesProp)) { - obj["roles"] = rolesProp - } - - obj, err = resourceCloudIdentityGroupMembershipUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}:modifyMembershipRoles") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_identity_group_membership_schema.TimeoutUpdate)) - if err != nil { - return resource_cloud_identity_group_membership_fmt.Errorf("Error updating GroupMembership %q: %s", d.Id(), err) - } else { - resource_cloud_identity_group_membership_log.Printf("[DEBUG] Finished updating GroupMembership %q: %#v", d.Id(), res) - } - - } - - d.Partial(false) - - return resourceCloudIdentityGroupMembershipRead(d, meta) -} - -func resourceCloudIdentityGroupMembershipDelete(d *resource_cloud_identity_group_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudIdentityBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloud_identity_group_membership_log.Printf("[DEBUG] Deleting GroupMembership %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_identity_group_membership_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GroupMembership") - } - - resource_cloud_identity_group_membership_log.Printf("[DEBUG] Finished deleting GroupMembership %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIdentityGroupMembershipImport(d *resource_cloud_identity_group_membership_schema.ResourceData, meta interface{}) ([]*resource_cloud_identity_group_membership_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, resource_cloud_identity_group_membership_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - group := resource_cloud_identity_group_membership_regexp.MustCompile(`groups/[^/]+`).FindString(id) - if err := d.Set("group", group); err != nil { - return nil, resource_cloud_identity_group_membership_fmt.Errorf("Error setting group property: %s", err) - } - - return []*resource_cloud_identity_group_membership_schema.ResourceData{d}, nil -} - -func flattenCloudIdentityGroupMembershipName(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipPreferredMemberKey(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["id"] = - flattenCloudIdentityGroupMembershipPreferredMemberKeyId(original["id"], d, config) - transformed["namespace"] = - flattenCloudIdentityGroupMembershipPreferredMemberKeyNamespace(original["namespace"], d, config) - return []interface{}{transformed} -} - -func flattenCloudIdentityGroupMembershipPreferredMemberKeyId(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipPreferredMemberKeyNamespace(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipCreateTime(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipUpdateTime(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipRoles(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_cloud_identity_group_membership_schema.NewSet(resource_cloud_identity_group_membership_schema.HashResource(cloudidentityGroupMembershipRolesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "name": flattenCloudIdentityGroupMembershipRolesName(original["name"], d, config), - }) - } - return transformed -} - -func flattenCloudIdentityGroupMembershipRolesName(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIdentityGroupMembershipType(v interface{}, d *resource_cloud_identity_group_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIdentityGroupMembershipPreferredMemberKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandCloudIdentityGroupMembershipPreferredMemberKeyId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_identity_group_membership_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedNamespace, err := expandCloudIdentityGroupMembershipPreferredMemberKeyNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_identity_group_membership_reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - return transformed, nil -} - -func expandCloudIdentityGroupMembershipPreferredMemberKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupMembershipPreferredMemberKeyNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIdentityGroupMembershipRoles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_cloud_identity_group_membership_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudIdentityGroupMembershipRolesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_identity_group_membership_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudIdentityGroupMembershipRolesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudIdentityGroupMembershipUpdateEncoder(d *resource_cloud_identity_group_membership_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - b, a := d.GetChange("roles") - before := b.(*resource_cloud_identity_group_membership_schema.Set) - after := a.(*resource_cloud_identity_group_membership_schema.Set) - - addRoles := after.Difference(before).List() - var removeRoles []string - for _, r := range before.Difference(after).List() { - removeRoles = append(removeRoles, r.(map[string]interface{})["name"].(string)) - } - req := map[string]interface{}{"addRoles": addRoles, "removeRoles": removeRoles} - return req, nil -} - -var domainMappingGoogleProvidedLabels = []string{ - "cloud.googleapis.com/location", - "run.googleapis.com/overrideAt", -} - -func domainMappingLabelDiffSuppress(k, old, new string, d *resource_cloud_run_domain_mapping_schema.ResourceData) bool { - - for _, label := range domainMappingGoogleProvidedLabels { - if resource_cloud_run_domain_mapping_strings.Contains(k, label) && new == "" { - return true - } - } - - if resource_cloud_run_domain_mapping_strings.Contains(k, "labels.%") { - return true - } - - return false -} - -func resourceCloudRunDomainMapping() *resource_cloud_run_domain_mapping_schema.Resource { - return &resource_cloud_run_domain_mapping_schema.Resource{ - Create: resourceCloudRunDomainMappingCreate, - Read: resourceCloudRunDomainMappingRead, - Delete: resourceCloudRunDomainMappingDelete, - - Importer: &resource_cloud_run_domain_mapping_schema.ResourceImporter{ - State: resourceCloudRunDomainMappingImport, - }, - - Timeouts: &resource_cloud_run_domain_mapping_schema.ResourceTimeout{ - Create: resource_cloud_run_domain_mapping_schema.DefaultTimeout(6 * resource_cloud_run_domain_mapping_time.Minute), - Delete: resource_cloud_run_domain_mapping_schema.DefaultTimeout(4 * resource_cloud_run_domain_mapping_time.Minute), - }, - - Schema: map[string]*resource_cloud_run_domain_mapping_schema.Schema{ - "location": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the cloud run instance. eg us-central1`, - }, - "metadata": { - Type: resource_cloud_run_domain_mapping_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Metadata associated with this DomainMapping.`, - MaxItems: 1, - Elem: &resource_cloud_run_domain_mapping_schema.Resource{ - Schema: map[string]*resource_cloud_run_domain_mapping_schema.Schema{ - "namespace": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Required: true, - ForceNew: true, - Description: `In Cloud Run the namespace must be equal to either the -project ID or project number.`, - }, - "annotations": { - Type: resource_cloud_run_domain_mapping_schema.TypeMap, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: cloudrunAnnotationDiffSuppress, - Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: http://kubernetes.io/docs/user-guide/annotations - -**Note**: The Cloud Run API may add additional annotations that were not provided in your config. -If terraform plan shows a diff where a server-side annotation is added, you can add it to your config -or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.`, - Elem: &resource_cloud_run_domain_mapping_schema.Schema{Type: resource_cloud_run_domain_mapping_schema.TypeString}, - }, - "labels": { - Type: resource_cloud_run_domain_mapping_schema.TypeMap, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: domainMappingLabelDiffSuppress, - Description: `Map of string keys and values that can be used to organize and categorize -(scope and select) objects. May match selectors of replication controllers -and routes. -More info: http://kubernetes.io/docs/user-guide/labels`, - Elem: &resource_cloud_run_domain_mapping_schema.Schema{Type: resource_cloud_run_domain_mapping_schema.TypeString}, - }, - "generation": { - Type: resource_cloud_run_domain_mapping_schema.TypeInt, - Computed: true, - Description: `A sequence number representing a specific generation of the desired state.`, - }, - "resource_version": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `An opaque value that represents the internal version of this object that -can be used by clients to determine when objects have changed. May be used -for optimistic concurrency, change detection, and the watch operation on a -resource or set of resources. They may only be valid for a -particular resource or set of resources. - -More info: -https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, - }, - "self_link": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `SelfLink is a URL representing this object.`, - }, - "uid": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `UID is a unique id generated by the server on successful creation of a resource and is not -allowed to change on PUT operations. - -More info: http://kubernetes.io/docs/user-guide/identifiers#uids`, - }, - }, - }, - }, - "name": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name should be a [verified](https://support.google.com/webmasters/answer/9008080) domain`, - }, - "spec": { - Type: resource_cloud_run_domain_mapping_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The spec for this DomainMapping.`, - MaxItems: 1, - Elem: &resource_cloud_run_domain_mapping_schema.Resource{ - Schema: map[string]*resource_cloud_run_domain_mapping_schema.Schema{ - "route_name": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Cloud Run Service that this DomainMapping applies to. -The route must exist.`, - }, - "certificate_mode": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_cloud_run_domain_mapping_validation.StringInSlice([]string{"NONE", "AUTOMATIC", ""}, false), - Description: `The mode of the certificate. Default value: "AUTOMATIC" Possible values: ["NONE", "AUTOMATIC"]`, - Default: "AUTOMATIC", - }, - "force_override": { - Type: resource_cloud_run_domain_mapping_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If set, the mapping will override any mapping set before this spec was set. -It is recommended that the user leaves this empty to receive an error -warning about a potential conflict and only set it once the respective UI -has given such a warning.`, - }, - }, - }, - }, - "status": { - Type: resource_cloud_run_domain_mapping_schema.TypeList, - Computed: true, - Description: `The current status of the DomainMapping.`, - Elem: &resource_cloud_run_domain_mapping_schema.Resource{ - Schema: map[string]*resource_cloud_run_domain_mapping_schema.Schema{ - "resource_records": { - Type: resource_cloud_run_domain_mapping_schema.TypeList, - Optional: true, - Description: `The resource records required to configure this domain mapping. These -records must be added to the domain's DNS configuration in order to -serve the application via this domain mapping.`, - Elem: &resource_cloud_run_domain_mapping_schema.Resource{ - Schema: map[string]*resource_cloud_run_domain_mapping_schema.Schema{ - "type": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloud_run_domain_mapping_validation.StringInSlice([]string{"A", "AAAA", "CNAME", ""}, false), - Description: `Resource record type. Example: 'AAAA'. Possible values: ["A", "AAAA", "CNAME"]`, - }, - "name": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `Relative name of the object affected by this record. Only applicable for -'CNAME' records. Example: 'www'.`, - }, - "rrdata": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `Data for this record. Values vary by record type, as defined in RFC 1035 -(section 5) and RFC 1034 (section 3.6.1).`, - }, - }, - }, - }, - "conditions": { - Type: resource_cloud_run_domain_mapping_schema.TypeList, - Computed: true, - Description: `Array of observed DomainMappingConditions, indicating the current state -of the DomainMapping.`, - Elem: &resource_cloud_run_domain_mapping_schema.Resource{ - Schema: map[string]*resource_cloud_run_domain_mapping_schema.Schema{ - "message": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `Human readable message indicating details about the current status.`, - }, - "reason": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `One-word CamelCase reason for the condition's current status.`, - }, - "status": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `Status of the condition, one of True, False, Unknown.`, - }, - "type": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `Type of domain mapping condition.`, - }, - }, - }, - }, - "mapped_route_name": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Computed: true, - Description: `The name of the route that the mapping currently points to.`, - }, - "observed_generation": { - Type: resource_cloud_run_domain_mapping_schema.TypeInt, - Computed: true, - Description: `ObservedGeneration is the 'Generation' of the DomainMapping that -was last processed by the controller.`, - }, - }, - }, - }, - "project": { - Type: resource_cloud_run_domain_mapping_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudRunDomainMappingCreate(d *resource_cloud_run_domain_mapping_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - specProp, err := expandCloudRunDomainMappingSpec(d.Get("spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("spec"); !isEmptyValue(resource_cloud_run_domain_mapping_reflect.ValueOf(specProp)) && (ok || !resource_cloud_run_domain_mapping_reflect.DeepEqual(v, specProp)) { - obj["spec"] = specProp - } - metadataProp, err := expandCloudRunDomainMappingMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_cloud_run_domain_mapping_reflect.ValueOf(metadataProp)) && (ok || !resource_cloud_run_domain_mapping_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - - obj, err = resourceCloudRunDomainMappingEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings") - if err != nil { - return err - } - - resource_cloud_run_domain_mapping_log.Printf("[DEBUG] Creating new DomainMapping: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_run_domain_mapping_schema.TimeoutCreate), isCloudRunCreationConflict) - if err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error creating DomainMapping: %s", err) - } - - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceCloudRunDomainMappingPollRead(d, meta), PollCheckKnativeStatusFunc(res), "Creating DomainMapping", d.Timeout(resource_cloud_run_domain_mapping_schema.TimeoutCreate), 1) - if err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error waiting to create DomainMapping: %s", err) - } - - resource_cloud_run_domain_mapping_log.Printf("[DEBUG] Finished creating DomainMapping %q: %#v", d.Id(), res) - - return resourceCloudRunDomainMappingRead(d, meta) -} - -func resourceCloudRunDomainMappingPollRead(d *resource_cloud_run_domain_mapping_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_cloud_run_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return res, err - } - res, err = resourceCloudRunDomainMappingDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - - return nil, &resource_cloud_run_domain_mapping_googleapi.Error{ - Code: 404, - Message: "could not find object CloudRunDomainMapping", - } - } - - return res, nil - } -} - -func resourceCloudRunDomainMappingRead(d *resource_cloud_run_domain_mapping_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_run_domain_mapping_fmt.Sprintf("CloudRunDomainMapping %q", d.Id())) - } - - res, err = resourceCloudRunDomainMappingDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_cloud_run_domain_mapping_log.Printf("[DEBUG] Removing CloudRunDomainMapping because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - - if err := d.Set("status", flattenCloudRunDomainMappingStatus(res["status"], d, config)); err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("spec", flattenCloudRunDomainMappingSpec(res["spec"], d, config)); err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - if err := d.Set("metadata", flattenCloudRunDomainMappingMetadata(res["metadata"], d, config)); err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error reading DomainMapping: %s", err) - } - - return nil -} - -func resourceCloudRunDomainMappingDelete(d *resource_cloud_run_domain_mapping_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_run_domain_mapping_fmt.Errorf("Error fetching project for DomainMapping: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/domains.cloudrun.com/v1/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloud_run_domain_mapping_log.Printf("[DEBUG] Deleting DomainMapping %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_run_domain_mapping_schema.TimeoutDelete), isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, "DomainMapping") - } - - resource_cloud_run_domain_mapping_log.Printf("[DEBUG] Finished deleting DomainMapping %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudRunDomainMappingImport(d *resource_cloud_run_domain_mapping_schema.ResourceData, meta interface{}) ([]*resource_cloud_run_domain_mapping_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/(?P[^/]+)/namespaces/(?P[^/]+)/domainmappings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/domainmappings/{{name}}") - if err != nil { - return nil, resource_cloud_run_domain_mapping_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_cloud_run_domain_mapping_schema.ResourceData{d}, nil -} - -func flattenCloudRunDomainMappingStatus(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["conditions"] = - flattenCloudRunDomainMappingStatusConditions(original["conditions"], d, config) - transformed["observed_generation"] = - flattenCloudRunDomainMappingStatusObservedGeneration(original["observedGeneration"], d, config) - transformed["resource_records"] = - flattenCloudRunDomainMappingStatusResourceRecords(original["resourceRecords"], d, config) - transformed["mapped_route_name"] = - flattenCloudRunDomainMappingStatusMappedRouteName(original["mappedRouteName"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunDomainMappingStatusConditions(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "message": flattenCloudRunDomainMappingStatusConditionsMessage(original["message"], d, config), - "status": flattenCloudRunDomainMappingStatusConditionsStatus(original["status"], d, config), - "reason": flattenCloudRunDomainMappingStatusConditionsReason(original["reason"], d, config), - "type": flattenCloudRunDomainMappingStatusConditionsType(original["type"], d, config), - }) - } - return transformed -} - -func flattenCloudRunDomainMappingStatusConditionsMessage(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusConditionsStatus(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusConditionsReason(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusConditionsType(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusObservedGeneration(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_domain_mapping_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunDomainMappingStatusResourceRecords(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "type": flattenCloudRunDomainMappingStatusResourceRecordsType(original["type"], d, config), - "rrdata": flattenCloudRunDomainMappingStatusResourceRecordsRrdata(original["rrdata"], d, config), - "name": flattenCloudRunDomainMappingStatusResourceRecordsName(original["name"], d, config), - }) - } - return transformed -} - -func flattenCloudRunDomainMappingStatusResourceRecordsType(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusResourceRecordsRrdata(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusResourceRecordsName(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingStatusMappedRouteName(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingSpec(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["force_override"] = - flattenCloudRunDomainMappingSpecForceOverride(original["forceOverride"], d, config) - transformed["route_name"] = - flattenCloudRunDomainMappingSpecRouteName(original["routeName"], d, config) - transformed["certificate_mode"] = - flattenCloudRunDomainMappingSpecCertificateMode(original["certificateMode"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunDomainMappingSpecForceOverride(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - - return d.Get("spec.0.force_override") -} - -func flattenCloudRunDomainMappingSpecRouteName(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingSpecCertificateMode(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadata(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["labels"] = - flattenCloudRunDomainMappingMetadataLabels(original["labels"], d, config) - transformed["generation"] = - flattenCloudRunDomainMappingMetadataGeneration(original["generation"], d, config) - transformed["resource_version"] = - flattenCloudRunDomainMappingMetadataResourceVersion(original["resourceVersion"], d, config) - transformed["self_link"] = - flattenCloudRunDomainMappingMetadataSelfLink(original["selfLink"], d, config) - transformed["uid"] = - flattenCloudRunDomainMappingMetadataUid(original["uid"], d, config) - transformed["namespace"] = - flattenCloudRunDomainMappingMetadataNamespace(original["namespace"], d, config) - transformed["annotations"] = - flattenCloudRunDomainMappingMetadataAnnotations(original["annotations"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunDomainMappingMetadataLabels(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataGeneration(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_domain_mapping_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunDomainMappingMetadataResourceVersion(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataSelfLink(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataUid(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunDomainMappingMetadataNamespace(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return d.Get("project") -} - -func flattenCloudRunDomainMappingMetadataAnnotations(v interface{}, d *resource_cloud_run_domain_mapping_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudRunDomainMappingSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedForceOverride, err := expandCloudRunDomainMappingSpecForceOverride(original["force_override"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedForceOverride); val.IsValid() && !isEmptyValue(val) { - transformed["forceOverride"] = transformedForceOverride - } - - transformedRouteName, err := expandCloudRunDomainMappingSpecRouteName(original["route_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedRouteName); val.IsValid() && !isEmptyValue(val) { - transformed["routeName"] = transformedRouteName - } - - transformedCertificateMode, err := expandCloudRunDomainMappingSpecCertificateMode(original["certificate_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedCertificateMode); val.IsValid() && !isEmptyValue(val) { - transformed["certificateMode"] = transformedCertificateMode - } - - return transformed, nil -} - -func expandCloudRunDomainMappingSpecForceOverride(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingSpecRouteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandCloudRunDomainMappingSpecCertificateMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandCloudRunDomainMappingMetadataLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedGeneration, err := expandCloudRunDomainMappingMetadataGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - transformedResourceVersion, err := expandCloudRunDomainMappingMetadataResourceVersion(original["resource_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedResourceVersion); val.IsValid() && !isEmptyValue(val) { - transformed["resourceVersion"] = transformedResourceVersion - } - - transformedSelfLink, err := expandCloudRunDomainMappingMetadataSelfLink(original["self_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedSelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["selfLink"] = transformedSelfLink - } - - transformedUid, err := expandCloudRunDomainMappingMetadataUid(original["uid"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedUid); val.IsValid() && !isEmptyValue(val) { - transformed["uid"] = transformedUid - } - - transformedNamespace, err := expandCloudRunDomainMappingMetadataNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - transformedAnnotations, err := expandCloudRunDomainMappingMetadataAnnotations(original["annotations"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_domain_mapping_reflect.ValueOf(transformedAnnotations); val.IsValid() && !isEmptyValue(val) { - transformed["annotations"] = transformedAnnotations - } - - return transformed, nil -} - -func expandCloudRunDomainMappingMetadataLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunDomainMappingMetadataGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataResourceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataSelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataUid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunDomainMappingMetadataAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceCloudRunDomainMappingEncoder(d *resource_cloud_run_domain_mapping_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - name := d.Get("name").(string) - metadata := obj["metadata"].(map[string]interface{}) - metadata["name"] = name - - obj["apiVersion"] = "domains.cloudrun.com/v1" - obj["kind"] = "DomainMapping" - return obj, nil -} - -func resourceCloudRunDomainMappingDecoder(d *resource_cloud_run_domain_mapping_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - if obj, ok := res["metadata"]; ok { - if meta, ok := obj.(map[string]interface{}); ok { - res["name"] = meta["name"] - } else { - return nil, resource_cloud_run_domain_mapping_fmt.Errorf("Unable to decode 'metadata' block from API response.") - } - } - return res, nil -} - -func revisionNameCustomizeDiff(_ resource_cloud_run_service_context.Context, diff *resource_cloud_run_service_schema.ResourceDiff, v interface{}) error { - autogen := diff.Get("autogenerate_revision_name").(bool) - if autogen && diff.HasChange("template.0.metadata.0.name") { - return resource_cloud_run_service_fmt.Errorf("google_cloud_run_service: `template.metadata.name` cannot be set while `autogenerate_revision_name` is true. Please remove the field or set `autogenerate_revision_name` to false.") - } - - return nil -} - -var cloudRunGoogleProvidedAnnotations = resource_cloud_run_service_regexp.MustCompile(`serving\.knative\.dev/(?:(?:creator)|(?:lastModifier))$|run\.googleapis\.com/(?:(?:ingress-status))$|cloud\.googleapis\.com/(?:(?:location))`) - -func cloudrunAnnotationDiffSuppress(k, old, new string, d *resource_cloud_run_service_schema.ResourceData) bool { - - if cloudRunGoogleProvidedAnnotations.MatchString(k) && new == "" { - return true - } - - if resource_cloud_run_service_strings.Contains(k, "annotations.%") { - return true - } - - return false -} - -var cloudRunGoogleProvidedTemplateAnnotations = resource_cloud_run_service_regexp.MustCompile(`template\.0\.metadata\.0\.annotations\.run\.googleapis\.com/sandbox`) - -func cloudrunTemplateAnnotationDiffSuppress(k, old, new string, d *resource_cloud_run_service_schema.ResourceData) bool { - - if cloudRunGoogleProvidedTemplateAnnotations.MatchString(k) && - old == "gvisor" && new == "" { - return true - } - - return false -} - -var cloudRunGoogleProvidedLabels = resource_cloud_run_service_regexp.MustCompile(`cloud\.googleapis\.com/(?:(?:location))`) - -func cloudrunLabelDiffSuppress(k, old, new string, d *resource_cloud_run_service_schema.ResourceData) bool { - - if cloudRunGoogleProvidedLabels.MatchString(k) && new == "" { - return true - } - - if resource_cloud_run_service_strings.Contains(k, "labels.%") { - return true - } - - return false -} - -func resourceCloudRunService() *resource_cloud_run_service_schema.Resource { - return &resource_cloud_run_service_schema.Resource{ - Create: resourceCloudRunServiceCreate, - Read: resourceCloudRunServiceRead, - Update: resourceCloudRunServiceUpdate, - Delete: resourceCloudRunServiceDelete, - - Importer: &resource_cloud_run_service_schema.ResourceImporter{ - State: resourceCloudRunServiceImport, - }, - - Timeouts: &resource_cloud_run_service_schema.ResourceTimeout{ - Create: resource_cloud_run_service_schema.DefaultTimeout(6 * resource_cloud_run_service_time.Minute), - Update: resource_cloud_run_service_schema.DefaultTimeout(15 * resource_cloud_run_service_time.Minute), - Delete: resource_cloud_run_service_schema.DefaultTimeout(4 * resource_cloud_run_service_time.Minute), - }, - - SchemaVersion: 1, - CustomizeDiff: revisionNameCustomizeDiff, - - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "location": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `The location of the cloud run instance. eg us-central1`, - }, - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name must be unique within a namespace, within a Cloud Run region. -Is required when creating resources. Name is primarily intended -for creation idempotence and configuration definition. Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/identifiers#names`, - }, - "template": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `template holds the latest specification for the Revision to -be stamped out. The template references the container image, and may also -include labels and annotations that should be attached to the Revision. -To correlate a Revision, and/or to force a Revision to be created when the -spec doesn't otherwise change, a nonce label may be provided in the -template metadata. For more details, see: -https://github.com/knative/serving/blob/master/docs/client-conventions.md#associate-modifications-with-revisions - -Cloud Run does not currently support referencing a build that is -responsible for materializing the container image from source.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "spec": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `RevisionSpec holds the desired state of the Revision (from the client).`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "containers": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `Container defines the unit of execution for this Revision. -In the context of a Revision, we disallow a number of the fields of -this Container, including: name, ports, and volumeMounts. -The runtime contract is documented here: -https://github.com/knative/serving/blob/master/docs/runtime-contract.md`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "image": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `Docker image name. This is most often a reference to a container located -in the container registry, such as gcr.io/cloudrun/hello -More info: https://kubernetes.io/docs/concepts/containers/images`, - }, - "args": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `Arguments to the entrypoint. -The docker image's CMD is used if this is not provided. -Variable references $(VAR_NAME) are expanded using the container's -environment. If a variable cannot be resolved, the reference in the input -string will be unchanged. The $(VAR_NAME) syntax can be escaped with a -double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, -regardless of whether the variable exists or not. -More info: -https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, - Elem: &resource_cloud_run_service_schema.Schema{ - Type: resource_cloud_run_service_schema.TypeString, - }, - }, - "command": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `Entrypoint array. Not executed within a shell. -The docker image's ENTRYPOINT is used if this is not provided. -Variable references $(VAR_NAME) are expanded using the container's -environment. If a variable cannot be resolved, the reference in the input -string will be unchanged. The $(VAR_NAME) syntax can be escaped with a -double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, -regardless of whether the variable exists or not. -More info: -https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, - Elem: &resource_cloud_run_service_schema.Schema{ - Type: resource_cloud_run_service_schema.TypeString, - }, - }, - "env": { - Type: resource_cloud_run_service_schema.TypeSet, - Optional: true, - Description: `List of environment variables to set in the container.`, - Elem: cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema(), - }, - "env_from": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Deprecated: "Not supported by Cloud Run fully managed", - ForceNew: true, - Description: `List of sources to populate environment variables in the container. -All invalid keys will be reported as an event when the container is starting. -When a key exists in multiple sources, the value associated with the last source will -take precedence. Values defined by an Env with a duplicate key will take -precedence.`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "config_map_ref": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `The ConfigMap to select from.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "local_object_reference": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `The ConfigMap to select from.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `Name of the referent. -More info: -https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names`, - }, - }, - }, - }, - "optional": { - Type: resource_cloud_run_service_schema.TypeBool, - Optional: true, - Description: `Specify whether the ConfigMap must be defined`, - }, - }, - }, - }, - "prefix": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Description: `An optional identifier to prepend to each key in the ConfigMap.`, - }, - "secret_ref": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `The Secret to select from.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "local_object_reference": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `The Secret to select from.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `Name of the referent. -More info: -https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names`, - }, - }, - }, - }, - "optional": { - Type: resource_cloud_run_service_schema.TypeBool, - Optional: true, - Description: `Specify whether the Secret must be defined`, - }, - }, - }, - }, - }, - }, - }, - "ports": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `List of open ports in the container. -More Info: -https://cloud.google.com/run/docs/reference/rest/v1/RevisionSpec#ContainerPort`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "container_port": { - Type: resource_cloud_run_service_schema.TypeInt, - Required: true, - Description: `Port number.`, - }, - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `Name of the port.`, - }, - "protocol": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Description: `Protocol used on port. Defaults to TCP.`, - }, - }, - }, - }, - "resources": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `Compute Resources required by this container. Used to set values such as max memory -More info: -https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "limits": { - Type: resource_cloud_run_service_schema.TypeMap, - Computed: true, - Optional: true, - Description: `Limits describes the maximum amount of compute resources allowed. -The values of the map is string form of the 'quantity' k8s type: -https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, - Elem: &resource_cloud_run_service_schema.Schema{Type: resource_cloud_run_service_schema.TypeString}, - }, - "requests": { - Type: resource_cloud_run_service_schema.TypeMap, - Optional: true, - Description: `Requests describes the minimum amount of compute resources required. -If Requests is omitted for a container, it defaults to Limits if that is -explicitly specified, otherwise to an implementation-defined value. -The values of the map is string form of the 'quantity' k8s type: -https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, - Elem: &resource_cloud_run_service_schema.Schema{Type: resource_cloud_run_service_schema.TypeString}, - }, - }, - }, - }, - "volume_mounts": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `Volume to mount into the container's filesystem. -Only supports SecretVolumeSources.`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "mount_path": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `Path within the container at which the volume should be mounted. Must -not contain ':'.`, - }, - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `This must match the Name of a Volume.`, - }, - }, - }, - }, - "working_dir": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Deprecated: "Not supported by Cloud Run fully managed", - ForceNew: true, - Description: `Container's working directory. -If not specified, the container runtime's default will be used, which -might be configured in the container image.`, - }, - }, - }, - }, - "container_concurrency": { - Type: resource_cloud_run_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `ContainerConcurrency specifies the maximum allowed in-flight (concurrent) -requests per container of the Revision. Values are: -- '0' thread-safe, the system should manage the max concurrency. This is - the default value. -- '1' not-thread-safe. Single concurrency -- '2-N' thread-safe, max concurrency of N`, - }, - "service_account_name": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Description: `Email address of the IAM service account associated with the revision of the -service. The service account represents the identity of the running revision, -and determines what permissions the revision has. If not provided, the revision -will use the project's default service account.`, - }, - "timeout_seconds": { - Type: resource_cloud_run_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `TimeoutSeconds holds the max duration the instance is allowed for responding to a request.`, - }, - "volumes": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `Volume represents a named volume in a container.`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `Volume's name.`, - }, - "secret": { - Type: resource_cloud_run_service_schema.TypeList, - Required: true, - Description: `The secret's value will be presented as the content of a file whose -name is defined in the item path. If no items are defined, the name of -the file is the secret_name.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "secret_name": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. By default, the secret -is assumed to be in the same project. -If the secret is in another project, you must define an alias. -An alias definition has the form: -:projects//secrets/. -If multiple alias definitions are needed, they must be separated by -commas. -The alias definitions must be set on the run.googleapis.com/secrets -annotation.`, - }, - "items": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `If unspecified, the volume will expose a file whose name is the -secret_name. -If specified, the key will be used as the version to fetch from Cloud -Secret Manager and the path will be the name of the file exposed in the -volume. When items are defined, they must specify a key and a path.`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "key": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `The Cloud Secret Manager secret version. -Can be 'latest' for the latest value or an integer for a specific version.`, - }, - "path": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `The relative path of the file to map the key to. -May not be an absolute path. -May not contain the path element '..'. -May not start with the string '..'.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "serving_state": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Deprecated: "Not supported by Cloud Run fully managed", - Description: `ServingState holds a value describing the state the resources -are in for this Revision. -It is expected -that the system will manipulate this based on routability and load.`, - }, - }, - }, - }, - "metadata": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `Optional metadata for this Revision, including labels and annotations. -Name will be generated by the Configuration. To set minimum instances -for this revision, use the "autoscaling.knative.dev/minScale" annotation -key. To set maximum instances for this revision, use the -"autoscaling.knative.dev/maxScale" annotation key. To set Cloud SQL -connections for the revision, use the "run.googleapis.com/cloudsql-instances" -annotation key.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "annotations": { - Type: resource_cloud_run_service_schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: cloudrunTemplateAnnotationDiffSuppress, - Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: http://kubernetes.io/docs/user-guide/annotations - -**Note**: The Cloud Run API may add additional annotations that were not provided in your config. -If terraform plan shows a diff where a server-side annotation is added, you can add it to your config -or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.`, - Elem: &resource_cloud_run_service_schema.Schema{Type: resource_cloud_run_service_schema.TypeString}, - }, - "labels": { - Type: resource_cloud_run_service_schema.TypeMap, - Optional: true, - Description: `Map of string keys and values that can be used to organize and categorize -(scope and select) objects. May match selectors of replication controllers -and routes. -More info: http://kubernetes.io/docs/user-guide/labels`, - Elem: &resource_cloud_run_service_schema.Schema{Type: resource_cloud_run_service_schema.TypeString}, - }, - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `Name must be unique within a namespace, within a Cloud Run region. -Is required when creating resources. Name is primarily intended -for creation idempotence and configuration definition. Cannot be updated. -More info: http://kubernetes.io/docs/user-guide/identifiers#names`, - }, - "namespace": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `In Cloud Run the namespace must be equal to either the -project ID or project number. It will default to the resource's project.`, - }, - "generation": { - Type: resource_cloud_run_service_schema.TypeInt, - Computed: true, - Description: `A sequence number representing a specific generation of the desired state.`, - }, - "resource_version": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `An opaque value that represents the internal version of this object that -can be used by clients to determine when objects have changed. May be used -for optimistic concurrency, change detection, and the watch operation on a -resource or set of resources. They may only be valid for a -particular resource or set of resources. - -More info: -https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, - }, - "self_link": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `SelfLink is a URL representing this object.`, - }, - "uid": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `UID is a unique id generated by the server on successful creation of a resource and is not -allowed to change on PUT operations. - -More info: http://kubernetes.io/docs/user-guide/identifiers#uids`, - }, - }, - }, - }, - }, - }, - }, - "traffic": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `Traffic specifies how to distribute traffic over a collection of Knative Revisions -and Configurations`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "percent": { - Type: resource_cloud_run_service_schema.TypeInt, - Required: true, - Description: `Percent specifies percent of the traffic to this Revision or Configuration.`, - }, - "latest_revision": { - Type: resource_cloud_run_service_schema.TypeBool, - Optional: true, - Description: `LatestRevision may be optionally provided to indicate that the latest ready -Revision of the Configuration should be used for this traffic target. When -provided LatestRevision must be true if RevisionName is empty; it must be -false when RevisionName is non-empty.`, - }, - "revision_name": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Description: `RevisionName of a specific revision to which to send this portion of traffic.`, - }, - }, - }, - }, - - "metadata": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `Metadata associated with this Service, including name, namespace, labels, -and annotations.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "annotations": { - Type: resource_cloud_run_service_schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: cloudrunAnnotationDiffSuppress, - Description: `Annotations is a key value map stored with a resource that -may be set by external tools to store and retrieve arbitrary metadata. More -info: http://kubernetes.io/docs/user-guide/annotations - -**Note**: The Cloud Run API may add additional annotations that were not provided in your config. -If terraform plan shows a diff where a server-side annotation is added, you can add it to your config -or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field. - -Cloud Run (fully managed) uses the following annotation keys to configure features on a Service: - -- 'run.googleapis.com/ingress' sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress) - for the Service. For example, '"run.googleapis.com/ingress" = "all"'.`, - Elem: &resource_cloud_run_service_schema.Schema{Type: resource_cloud_run_service_schema.TypeString}, - }, - "labels": { - Type: resource_cloud_run_service_schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: cloudrunLabelDiffSuppress, - Description: `Map of string keys and values that can be used to organize and categorize -(scope and select) objects. May match selectors of replication controllers -and routes. -More info: http://kubernetes.io/docs/user-guide/labels`, - Elem: &resource_cloud_run_service_schema.Schema{Type: resource_cloud_run_service_schema.TypeString}, - }, - "namespace": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `In Cloud Run the namespace must be equal to either the -project ID or project number.`, - }, - "generation": { - Type: resource_cloud_run_service_schema.TypeInt, - Computed: true, - Description: `A sequence number representing a specific generation of the desired state.`, - }, - "resource_version": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `An opaque value that represents the internal version of this object that -can be used by clients to determine when objects have changed. May be used -for optimistic concurrency, change detection, and the watch operation on a -resource or set of resources. They may only be valid for a -particular resource or set of resources. - -More info: -https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency`, - }, - "self_link": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `SelfLink is a URL representing this object.`, - }, - "uid": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `UID is a unique id generated by the server on successful creation of a resource and is not -allowed to change on PUT operations. - -More info: http://kubernetes.io/docs/user-guide/identifiers#uids`, - }, - }, - }, - }, - "status": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Description: `The current status of the Service.`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "conditions": { - Type: resource_cloud_run_service_schema.TypeList, - Computed: true, - Description: `Array of observed Service Conditions, indicating the current ready state of the service.`, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "message": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `Human readable message indicating details about the current status.`, - }, - "reason": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `One-word CamelCase reason for the condition's current status.`, - }, - "status": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `Status of the condition, one of True, False, Unknown.`, - }, - "type": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `Type of domain mapping condition.`, - }, - }, - }, - }, - "latest_created_revision_name": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `From ConfigurationStatus. LatestCreatedRevisionName is the last revision that was created -from this Service's Configuration. It might not be ready yet, for that use -LatestReadyRevisionName.`, - }, - "latest_ready_revision_name": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `From ConfigurationStatus. LatestReadyRevisionName holds the name of the latest Revision -stamped out from this Service's Configuration that has had its "Ready" condition become -"True".`, - }, - "observed_generation": { - Type: resource_cloud_run_service_schema.TypeInt, - Computed: true, - Description: `ObservedGeneration is the 'Generation' of the Route that was last processed by the -controller. - -Clients polling for completed reconciliation should poll until observedGeneration = -metadata.generation and the Ready condition's status is True or False.`, - }, - "url": { - Type: resource_cloud_run_service_schema.TypeString, - Computed: true, - Description: `From RouteStatus. URL holds the url that will distribute traffic over the provided traffic -targets. It generally has the form -https://{route-hash}-{project-hash}-{cluster-level-suffix}.a.run.app`, - }, - }, - }, - }, - "autogenerate_revision_name": { - Type: resource_cloud_run_service_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema() *resource_cloud_run_service_schema.Resource { - return &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Description: `Name of the environment variable.`, - }, - "value": { - Type: resource_cloud_run_service_schema.TypeString, - Optional: true, - Description: `Variable references $(VAR_NAME) are expanded -using the previous defined environment variables in the container and -any route environment variables. If a variable cannot be resolved, -the reference in the input string will be unchanged. The $(VAR_NAME) -syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped -references will never be expanded, regardless of whether the variable -exists or not. -Defaults to "".`, - }, - "value_from": { - Type: resource_cloud_run_service_schema.TypeList, - Optional: true, - Description: `Source for the environment variable's value. Only supports secret_key_ref.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "secret_key_ref": { - Type: resource_cloud_run_service_schema.TypeList, - Required: true, - Description: `Selects a key (version) of a secret in Secret Manager.`, - MaxItems: 1, - Elem: &resource_cloud_run_service_schema.Resource{ - Schema: map[string]*resource_cloud_run_service_schema.Schema{ - "key": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `A Cloud Secret Manager secret version. Must be 'latest' for the latest -version or an integer for a specific version.`, - }, - "name": { - Type: resource_cloud_run_service_schema.TypeString, - Required: true, - Description: `The name of the secret in Cloud Secret Manager. By default, the secret -is assumed to be in the same project. -If the secret is in another project, you must define an alias. -You set the in this field, and create an annotation with the -following structure -"run.googleapis.com/secrets" = ":projects//secrets/". -If multiple alias definitions are needed, they must be separated by -commas in the annotation field.`, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func resourceCloudRunServiceCreate(d *resource_cloud_run_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - specProp, err := expandCloudRunServiceSpec(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_cloud_run_service_reflect.ValueOf(specProp)) { - obj["spec"] = specProp - } - metadataProp, err := expandCloudRunServiceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_cloud_run_service_reflect.ValueOf(metadataProp)) && (ok || !resource_cloud_run_service_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - - obj, err = resourceCloudRunServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services") - if err != nil { - return err - } - - resource_cloud_run_service_log.Printf("[DEBUG] Creating new Service: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_run_service_schema.TimeoutCreate), isCloudRunCreationConflict) - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error creating Service: %s", err) - } - - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceCloudRunServicePollRead(d, meta), PollCheckKnativeStatusFunc(res), "Creating Service", d.Timeout(resource_cloud_run_service_schema.TimeoutCreate), 1) - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error waiting to create Service: %s", err) - } - - resource_cloud_run_service_log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) - - return resourceCloudRunServiceRead(d, meta) -} - -func resourceCloudRunServicePollRead(d *resource_cloud_run_service_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_cloud_run_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return res, err - } - res, err = resourceCloudRunServiceDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - - return nil, &resource_cloud_run_service_googleapi.Error{ - Code: 404, - Message: "could not find object CloudRunService", - } - } - - return res, nil - } -} - -func resourceCloudRunServiceRead(d *resource_cloud_run_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_run_service_fmt.Sprintf("CloudRunService %q", d.Id())) - } - - res, err = resourceCloudRunServiceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_cloud_run_service_log.Printf("[DEBUG] Removing CloudRunService because it no longer exists.") - d.SetId("") - return nil - } - - if _, ok := d.GetOkExists("autogenerate_revision_name"); !ok { - if err := d.Set("autogenerate_revision_name", false); err != nil { - return resource_cloud_run_service_fmt.Errorf("Error setting autogenerate_revision_name: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_cloud_run_service_fmt.Errorf("Error reading Service: %s", err) - } - - if flattenedProp := flattenCloudRunServiceSpec(res["spec"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_cloud_run_service_googleapi.Error); ok { - return resource_cloud_run_service_fmt.Errorf("Error reading Service: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_cloud_run_service_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("status", flattenCloudRunServiceStatus(res["status"], d, config)); err != nil { - return resource_cloud_run_service_fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("metadata", flattenCloudRunServiceMetadata(res["metadata"], d, config)); err != nil { - return resource_cloud_run_service_fmt.Errorf("Error reading Service: %s", err) - } - - return nil -} - -func resourceCloudRunServiceUpdate(d *resource_cloud_run_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - specProp, err := expandCloudRunServiceSpec(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_cloud_run_service_reflect.ValueOf(specProp)) { - obj["spec"] = specProp - } - metadataProp, err := expandCloudRunServiceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_cloud_run_service_reflect.ValueOf(v)) && (ok || !resource_cloud_run_service_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - - obj, err = resourceCloudRunServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return err - } - - resource_cloud_run_service_log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_run_service_schema.TimeoutUpdate), isCloudRunCreationConflict) - - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error updating Service %q: %s", d.Id(), err) - } else { - resource_cloud_run_service_log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) - } - - err = PollingWaitTime(resourceCloudRunServicePollRead(d, meta), PollCheckKnativeStatusFunc(res), "Updating Service", d.Timeout(resource_cloud_run_service_schema.TimeoutUpdate), 1) - if err != nil { - return err - } - - return resourceCloudRunServiceRead(d, meta) -} - -func resourceCloudRunServiceDelete(d *resource_cloud_run_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_run_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudRunBasePath}}apis/serving.knative.dev/v1/namespaces/{{project}}/services/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloud_run_service_log.Printf("[DEBUG] Deleting Service %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_run_service_schema.TimeoutDelete), isCloudRunCreationConflict) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - - resource_cloud_run_service_log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudRunServiceImport(d *resource_cloud_run_service_schema.ResourceData, meta interface{}) ([]*resource_cloud_run_service_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/(?P[^/]+)/namespaces/(?P[^/]+)/services/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "locations/{{location}}/namespaces/{{project}}/services/{{name}}") - if err != nil { - return nil, resource_cloud_run_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("autogenerate_revision_name", false); err != nil { - return nil, resource_cloud_run_service_fmt.Errorf("Error setting autogenerate_revision_name: %s", err) - } - - return []*resource_cloud_run_service_schema.ResourceData{d}, nil -} - -func flattenCloudRunServiceSpec(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["traffic"] = - flattenCloudRunServiceSpecTraffic(original["traffic"], d, config) - transformed["template"] = - flattenCloudRunServiceSpecTemplate(original["template"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTraffic(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "revision_name": flattenCloudRunServiceSpecTrafficRevisionName(original["revisionName"], d, config), - "percent": flattenCloudRunServiceSpecTrafficPercent(original["percent"], d, config), - "latest_revision": flattenCloudRunServiceSpecTrafficLatestRevision(original["latestRevision"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTrafficRevisionName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTrafficPercent(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunServiceSpecTrafficLatestRevision(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplate(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["metadata"] = - flattenCloudRunServiceSpecTemplateMetadata(original["metadata"], d, config) - transformed["spec"] = - flattenCloudRunServiceSpecTemplateSpec(original["spec"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateMetadata(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["labels"] = - flattenCloudRunServiceSpecTemplateMetadataLabels(original["labels"], d, config) - transformed["generation"] = - flattenCloudRunServiceSpecTemplateMetadataGeneration(original["generation"], d, config) - transformed["resource_version"] = - flattenCloudRunServiceSpecTemplateMetadataResourceVersion(original["resourceVersion"], d, config) - transformed["self_link"] = - flattenCloudRunServiceSpecTemplateMetadataSelfLink(original["selfLink"], d, config) - transformed["uid"] = - flattenCloudRunServiceSpecTemplateMetadataUid(original["uid"], d, config) - transformed["namespace"] = - flattenCloudRunServiceSpecTemplateMetadataNamespace(original["namespace"], d, config) - transformed["annotations"] = - flattenCloudRunServiceSpecTemplateMetadataAnnotations(original["annotations"], d, config) - transformed["name"] = - flattenCloudRunServiceSpecTemplateMetadataName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateMetadataLabels(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataGeneration(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataResourceVersion(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataSelfLink(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataUid(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataNamespace(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataAnnotations(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateMetadataName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpec(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["containers"] = - flattenCloudRunServiceSpecTemplateSpecContainers(original["containers"], d, config) - transformed["container_concurrency"] = - flattenCloudRunServiceSpecTemplateSpecContainerConcurrency(original["containerConcurrency"], d, config) - transformed["timeout_seconds"] = - flattenCloudRunServiceSpecTemplateSpecTimeoutSeconds(original["timeoutSeconds"], d, config) - transformed["service_account_name"] = - flattenCloudRunServiceSpecTemplateSpecServiceAccountName(original["serviceAccountName"], d, config) - transformed["volumes"] = - flattenCloudRunServiceSpecTemplateSpecVolumes(original["volumes"], d, config) - transformed["serving_state"] = - flattenCloudRunServiceSpecTemplateSpecServingState(original["servingState"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainers(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "working_dir": flattenCloudRunServiceSpecTemplateSpecContainersWorkingDir(original["workingDir"], d, config), - "args": flattenCloudRunServiceSpecTemplateSpecContainersArgs(original["args"], d, config), - "env_from": flattenCloudRunServiceSpecTemplateSpecContainersEnvFrom(original["envFrom"], d, config), - "image": flattenCloudRunServiceSpecTemplateSpecContainersImage(original["image"], d, config), - "command": flattenCloudRunServiceSpecTemplateSpecContainersCommand(original["command"], d, config), - "env": flattenCloudRunServiceSpecTemplateSpecContainersEnv(original["env"], d, config), - "ports": flattenCloudRunServiceSpecTemplateSpecContainersPorts(original["ports"], d, config), - "resources": flattenCloudRunServiceSpecTemplateSpecContainersResources(original["resources"], d, config), - "volume_mounts": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMounts(original["volumeMounts"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTemplateSpecContainersWorkingDir(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersArgs(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFrom(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "prefix": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(original["prefix"], d, config), - "config_map_ref": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(original["configMapRef"], d, config), - "secret_ref": flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(original["secretRef"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["optional"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(original["optional"], d, config) - transformed["local_object_reference"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(original["localObjectReference"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["local_object_reference"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(original["localObjectReference"], d, config) - transformed["optional"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(original["optional"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersImage(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersCommand(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnv(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_cloud_run_service_schema.NewSet(resource_cloud_run_service_schema.HashResource(cloudrunServiceSpecTemplateSpecContainersContainersEnvSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "name": flattenCloudRunServiceSpecTemplateSpecContainersEnvName(original["name"], d, config), - "value": flattenCloudRunServiceSpecTemplateSpecContainersEnvValue(original["value"], d, config), - "value_from": flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(original["valueFrom"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValue(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret_key_ref"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(original["secretKeyRef"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(original["key"], d, config) - transformed["name"] = - flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersPorts(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunServiceSpecTemplateSpecContainersPortsName(original["name"], d, config), - "protocol": flattenCloudRunServiceSpecTemplateSpecContainersPortsProtocol(original["protocol"], d, config), - "container_port": flattenCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(original["containerPort"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTemplateSpecContainersPortsName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersPortsProtocol(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersResources(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["limits"] = - flattenCloudRunServiceSpecTemplateSpecContainersResourcesLimits(original["limits"], d, config) - transformed["requests"] = - flattenCloudRunServiceSpecTemplateSpecContainersResourcesRequests(original["requests"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecContainersResourcesLimits(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersResourcesRequests(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMounts(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "mount_path": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(original["mountPath"], d, config), - "name": flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(original["name"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecContainerConcurrency(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunServiceSpecTemplateSpecTimeoutSeconds(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunServiceSpecTemplateSpecServiceAccountName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudRunServiceSpecTemplateSpecVolumesName(original["name"], d, config), - "secret": flattenCloudRunServiceSpecTemplateSpecVolumesSecret(original["secret"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecret(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret_name"] = - flattenCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(original["secretName"], d, config) - transformed["items"] = - flattenCloudRunServiceSpecTemplateSpecVolumesSecretItems(original["items"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItems(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "key": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(original["key"], d, config), - "path": flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(original["path"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceSpecTemplateSpecServingState(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatus(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["conditions"] = - flattenCloudRunServiceStatusConditions(original["conditions"], d, config) - transformed["url"] = - flattenCloudRunServiceStatusUrl(original["url"], d, config) - transformed["observed_generation"] = - flattenCloudRunServiceStatusObservedGeneration(original["observedGeneration"], d, config) - transformed["latest_created_revision_name"] = - flattenCloudRunServiceStatusLatestCreatedRevisionName(original["latestCreatedRevisionName"], d, config) - transformed["latest_ready_revision_name"] = - flattenCloudRunServiceStatusLatestReadyRevisionName(original["latestReadyRevisionName"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceStatusConditions(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "message": flattenCloudRunServiceStatusConditionsMessage(original["message"], d, config), - "status": flattenCloudRunServiceStatusConditionsStatus(original["status"], d, config), - "reason": flattenCloudRunServiceStatusConditionsReason(original["reason"], d, config), - "type": flattenCloudRunServiceStatusConditionsType(original["type"], d, config), - }) - } - return transformed -} - -func flattenCloudRunServiceStatusConditionsMessage(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusConditionsStatus(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusConditionsReason(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusConditionsType(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusUrl(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusObservedGeneration(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunServiceStatusLatestCreatedRevisionName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceStatusLatestReadyRevisionName(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadata(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["labels"] = - flattenCloudRunServiceMetadataLabels(original["labels"], d, config) - transformed["generation"] = - flattenCloudRunServiceMetadataGeneration(original["generation"], d, config) - transformed["resource_version"] = - flattenCloudRunServiceMetadataResourceVersion(original["resourceVersion"], d, config) - transformed["self_link"] = - flattenCloudRunServiceMetadataSelfLink(original["selfLink"], d, config) - transformed["uid"] = - flattenCloudRunServiceMetadataUid(original["uid"], d, config) - transformed["namespace"] = - flattenCloudRunServiceMetadataNamespace(original["namespace"], d, config) - transformed["annotations"] = - flattenCloudRunServiceMetadataAnnotations(original["annotations"], d, config) - return []interface{}{transformed} -} - -func flattenCloudRunServiceMetadataLabels(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataGeneration(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_run_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudRunServiceMetadataResourceVersion(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataSelfLink(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataUid(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudRunServiceMetadataNamespace(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return d.Get("project") -} - -func flattenCloudRunServiceMetadataAnnotations(v interface{}, d *resource_cloud_run_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudRunServiceSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedTraffic, err := expandCloudRunServiceSpecTraffic(d.Get("traffic"), d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedTraffic); val.IsValid() && !isEmptyValue(val) { - transformed["traffic"] = transformedTraffic - } - - transformedTemplate, err := expandCloudRunServiceSpecTemplate(d.Get("template"), d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedTemplate); val.IsValid() && !isEmptyValue(val) { - transformed["template"] = transformedTemplate - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTraffic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRevisionName, err := expandCloudRunServiceSpecTrafficRevisionName(original["revision_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedRevisionName); val.IsValid() && !isEmptyValue(val) { - transformed["revisionName"] = transformedRevisionName - } - - transformedPercent, err := expandCloudRunServiceSpecTrafficPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedLatestRevision, err := expandCloudRunServiceSpecTrafficLatestRevision(original["latest_revision"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedLatestRevision); val.IsValid() && !isEmptyValue(val) { - transformed["latestRevision"] = transformedLatestRevision - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTrafficRevisionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTrafficPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTrafficLatestRevision(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMetadata, err := expandCloudRunServiceSpecTemplateMetadata(original["metadata"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedMetadata); val.IsValid() && !isEmptyValue(val) { - transformed["metadata"] = transformedMetadata - } - - transformedSpec, err := expandCloudRunServiceSpecTemplateSpec(original["spec"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedSpec); val.IsValid() && !isEmptyValue(val) { - transformed["spec"] = transformedSpec - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandCloudRunServiceSpecTemplateMetadataLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedGeneration, err := expandCloudRunServiceSpecTemplateMetadataGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - transformedResourceVersion, err := expandCloudRunServiceSpecTemplateMetadataResourceVersion(original["resource_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedResourceVersion); val.IsValid() && !isEmptyValue(val) { - transformed["resourceVersion"] = transformedResourceVersion - } - - transformedSelfLink, err := expandCloudRunServiceSpecTemplateMetadataSelfLink(original["self_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedSelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["selfLink"] = transformedSelfLink - } - - transformedUid, err := expandCloudRunServiceSpecTemplateMetadataUid(original["uid"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedUid); val.IsValid() && !isEmptyValue(val) { - transformed["uid"] = transformedUid - } - - transformedNamespace, err := expandCloudRunServiceSpecTemplateMetadataNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - transformedAnnotations, err := expandCloudRunServiceSpecTemplateMetadataAnnotations(original["annotations"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedAnnotations); val.IsValid() && !isEmptyValue(val) { - transformed["annotations"] = transformedAnnotations - } - - transformedName, err := expandCloudRunServiceSpecTemplateMetadataName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateMetadataLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateMetadataGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataResourceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataSelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataUid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - project, err := getProject(d, config) - if err != nil { - return project, nil - } - } - return v, nil -} - -func expandCloudRunServiceSpecTemplateMetadataAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateMetadataName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if d.Get("autogenerate_revision_name") == true { - return nil, nil - } - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContainers, err := expandCloudRunServiceSpecTemplateSpecContainers(original["containers"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedContainers); val.IsValid() && !isEmptyValue(val) { - transformed["containers"] = transformedContainers - } - - transformedContainerConcurrency, err := expandCloudRunServiceSpecTemplateSpecContainerConcurrency(original["container_concurrency"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedContainerConcurrency); val.IsValid() && !isEmptyValue(val) { - transformed["containerConcurrency"] = transformedContainerConcurrency - } - - transformedTimeoutSeconds, err := expandCloudRunServiceSpecTemplateSpecTimeoutSeconds(original["timeout_seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["timeoutSeconds"] = transformedTimeoutSeconds - } - - transformedServiceAccountName, err := expandCloudRunServiceSpecTemplateSpecServiceAccountName(original["service_account_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedServiceAccountName); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountName"] = transformedServiceAccountName - } - - transformedVolumes, err := expandCloudRunServiceSpecTemplateSpecVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - transformedServingState, err := expandCloudRunServiceSpecTemplateSpecServingState(original["serving_state"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedServingState); val.IsValid() && !isEmptyValue(val) { - transformed["servingState"] = transformedServingState - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWorkingDir, err := expandCloudRunServiceSpecTemplateSpecContainersWorkingDir(original["working_dir"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedWorkingDir); val.IsValid() && !isEmptyValue(val) { - transformed["workingDir"] = transformedWorkingDir - } - - transformedArgs, err := expandCloudRunServiceSpecTemplateSpecContainersArgs(original["args"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedArgs); val.IsValid() && !isEmptyValue(val) { - transformed["args"] = transformedArgs - } - - transformedEnvFrom, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFrom(original["env_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedEnvFrom); val.IsValid() && !isEmptyValue(val) { - transformed["envFrom"] = transformedEnvFrom - } - - transformedImage, err := expandCloudRunServiceSpecTemplateSpecContainersImage(original["image"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedImage); val.IsValid() && !isEmptyValue(val) { - transformed["image"] = transformedImage - } - - transformedCommand, err := expandCloudRunServiceSpecTemplateSpecContainersCommand(original["command"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedCommand); val.IsValid() && !isEmptyValue(val) { - transformed["command"] = transformedCommand - } - - transformedEnv, err := expandCloudRunServiceSpecTemplateSpecContainersEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - transformedPorts, err := expandCloudRunServiceSpecTemplateSpecContainersPorts(original["ports"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["ports"] = transformedPorts - } - - transformedResources, err := expandCloudRunServiceSpecTemplateSpecContainersResources(original["resources"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { - transformed["resources"] = transformedResources - } - - transformedVolumeMounts, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMounts(original["volume_mounts"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !isEmptyValue(val) { - transformed["volumeMounts"] = transformedVolumeMounts - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersWorkingDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersArgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPrefix, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(original["prefix"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedPrefix); val.IsValid() && !isEmptyValue(val) { - transformed["prefix"] = transformedPrefix - } - - transformedConfigMapRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(original["config_map_ref"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedConfigMapRef); val.IsValid() && !isEmptyValue(val) { - transformed["configMapRef"] = transformedConfigMapRef - } - - transformedSecretRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(original["secret_ref"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedSecretRef); val.IsValid() && !isEmptyValue(val) { - transformed["secretRef"] = transformedSecretRef - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromPrefix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOptional, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(original["optional"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedOptional); val.IsValid() && !isEmptyValue(val) { - transformed["optional"] = transformedOptional - } - - transformedLocalObjectReference, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(original["local_object_reference"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedLocalObjectReference); val.IsValid() && !isEmptyValue(val) { - transformed["localObjectReference"] = transformedLocalObjectReference - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefOptional(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromConfigMapRefLocalObjectReferenceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLocalObjectReference, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(original["local_object_reference"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedLocalObjectReference); val.IsValid() && !isEmptyValue(val) { - transformed["localObjectReference"] = transformedLocalObjectReference - } - - transformedOptional, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(original["optional"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedOptional); val.IsValid() && !isEmptyValue(val) { - transformed["optional"] = transformedOptional - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefLocalObjectReferenceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvFromSecretRefOptional(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersCommand(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_cloud_run_service_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedValueFrom, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(original["value_from"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedValueFrom); val.IsValid() && !isEmptyValue(val) { - transformed["valueFrom"] = transformedValueFrom - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFrom(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecretKeyRef, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(original["secret_key_ref"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !isEmptyValue(val) { - transformed["secretKeyRef"] = transformedSecretKeyRef - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersEnvValueFromSecretKeyRefName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersPortsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedProtocol, err := expandCloudRunServiceSpecTemplateSpecContainersPortsProtocol(original["protocol"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedProtocol); val.IsValid() && !isEmptyValue(val) { - transformed["protocol"] = transformedProtocol - } - - transformedContainerPort, err := expandCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(original["container_port"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedContainerPort); val.IsValid() && !isEmptyValue(val) { - transformed["containerPort"] = transformedContainerPort - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPortsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPortsProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersPortsContainerPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLimits, err := expandCloudRunServiceSpecTemplateSpecContainersResourcesLimits(original["limits"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedLimits); val.IsValid() && !isEmptyValue(val) { - transformed["limits"] = transformedLimits - } - - transformedRequests, err := expandCloudRunServiceSpecTemplateSpecContainersResourcesRequests(original["requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedRequests); val.IsValid() && !isEmptyValue(val) { - transformed["requests"] = transformedRequests - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersResourcesLimits(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersResourcesRequests(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersVolumeMounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMountPath, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(original["mount_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedMountPath); val.IsValid() && !isEmptyValue(val) { - transformed["mountPath"] = transformedMountPath - } - - transformedName, err := expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsMountPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainersVolumeMountsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecContainerConcurrency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecServiceAccountName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudRunServiceSpecTemplateSpecVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedSecret, err := expandCloudRunServiceSpecTemplateSpecVolumesSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secret"] = transformedSecret - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecretName, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(original["secret_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedSecretName); val.IsValid() && !isEmptyValue(val) { - transformed["secretName"] = transformedSecretName - } - - transformedItems, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItems(original["items"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedItems); val.IsValid() && !isEmptyValue(val) { - transformed["items"] = transformedItems - } - - return transformed, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretSecretName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretItems(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedPath, err := expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecVolumesSecretItemsPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceSpecTemplateSpecServingState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandCloudRunServiceMetadataLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedGeneration, err := expandCloudRunServiceMetadataGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - transformedResourceVersion, err := expandCloudRunServiceMetadataResourceVersion(original["resource_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedResourceVersion); val.IsValid() && !isEmptyValue(val) { - transformed["resourceVersion"] = transformedResourceVersion - } - - transformedSelfLink, err := expandCloudRunServiceMetadataSelfLink(original["self_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedSelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["selfLink"] = transformedSelfLink - } - - transformedUid, err := expandCloudRunServiceMetadataUid(original["uid"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedUid); val.IsValid() && !isEmptyValue(val) { - transformed["uid"] = transformedUid - } - - transformedNamespace, err := expandCloudRunServiceMetadataNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - transformedAnnotations, err := expandCloudRunServiceMetadataAnnotations(original["annotations"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_run_service_reflect.ValueOf(transformedAnnotations); val.IsValid() && !isEmptyValue(val) { - transformed["annotations"] = transformedAnnotations - } - - return transformed, nil -} - -func expandCloudRunServiceMetadataLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudRunServiceMetadataGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadataResourceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadataSelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadataUid(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudRunServiceMetadataNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - project, err := getProject(d, config) - if err != nil { - return project, nil - } - } - return v, nil -} - -func expandCloudRunServiceMetadataAnnotations(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceCloudRunServiceEncoder(d *resource_cloud_run_service_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - name := d.Get("name").(string) - if obj["metadata"] == nil { - obj["metadata"] = make(map[string]interface{}) - } - metadata := obj["metadata"].(map[string]interface{}) - metadata["name"] = name - - obj["apiVersion"] = "serving.knative.dev/v1" - obj["kind"] = "Service" - return obj, nil -} - -func resourceCloudRunServiceDecoder(d *resource_cloud_run_service_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - if obj, ok := res["metadata"]; ok { - if meta, ok := obj.(map[string]interface{}); ok { - res["name"] = meta["name"] - } else { - return nil, resource_cloud_run_service_fmt.Errorf("Unable to decode 'metadata' block from API response.") - } - } - return res, nil -} - -func validateAuthHeaders(_ resource_cloud_scheduler_job_context.Context, diff *resource_cloud_scheduler_job_schema.ResourceDiff, v interface{}) error { - httpBlock := diff.Get("http_target.0").(map[string]interface{}) - - if httpBlock != nil { - oauth := httpBlock["oauth_token"] - oidc := httpBlock["oidc_token"] - - if oauth != nil && oidc != nil { - if len(oidc.([]interface{})) > 0 && len(oauth.([]interface{})) > 0 { - return resource_cloud_scheduler_job_fmt.Errorf("Error in http_target: only one of oauth_token or oidc_token can be specified, but not both.") - } - } - } - - return nil -} - -func authHeaderDiffSuppress(k, old, new string, d *resource_cloud_scheduler_job_schema.ResourceData) bool { - - b := resource_cloud_scheduler_job_strings.Split(k, ".") - if b[0] == "http_target" && len(b) > 4 { - block := b[2] - attr := b[4] - - if block == "oauth_token" && attr == "scope" { - if old == canonicalizeServiceScope("cloud-platform") && new == "" { - return true - } - } - - if block == "oidc_token" && attr == "audience" { - uri := d.Get(resource_cloud_scheduler_job_strings.Join(b[0:2], ".") + ".uri") - if old == uri && new == "" { - return true - } - } - - } - - return false -} - -func validateHttpHeaders() resource_cloud_scheduler_job_schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - headers := i.(map[string]interface{}) - if _, ok := headers["Content-Length"]; ok { - es = append(es, resource_cloud_scheduler_job_fmt.Errorf("Cannot set the Content-Length header on %s", k)) - return - } - r := resource_cloud_scheduler_job_regexp.MustCompile(`(X-Google-|X-AppEngine-).*`) - for key := range headers { - if r.MatchString(key) { - es = append(es, resource_cloud_scheduler_job_fmt.Errorf("Cannot set the %s header on %s", key, k)) - return - } - } - - return - } -} - -func resourceCloudSchedulerJob() *resource_cloud_scheduler_job_schema.Resource { - return &resource_cloud_scheduler_job_schema.Resource{ - Create: resourceCloudSchedulerJobCreate, - Read: resourceCloudSchedulerJobRead, - Update: resourceCloudSchedulerJobUpdate, - Delete: resourceCloudSchedulerJobDelete, - - Importer: &resource_cloud_scheduler_job_schema.ResourceImporter{ - State: resourceCloudSchedulerJobImport, - }, - - Timeouts: &resource_cloud_scheduler_job_schema.ResourceTimeout{ - Create: resource_cloud_scheduler_job_schema.DefaultTimeout(4 * resource_cloud_scheduler_job_time.Minute), - Update: resource_cloud_scheduler_job_schema.DefaultTimeout(4 * resource_cloud_scheduler_job_time.Minute), - Delete: resource_cloud_scheduler_job_schema.DefaultTimeout(4 * resource_cloud_scheduler_job_time.Minute), - }, - - CustomizeDiff: validateAuthHeaders, - - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "name": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the job.`, - }, - "app_engine_http_target": { - Type: resource_cloud_scheduler_job_schema.TypeList, - Optional: true, - Description: `App Engine HTTP target. -If the job providers a App Engine HTTP target the cron will -send a request to the service instance`, - MaxItems: 1, - Elem: &resource_cloud_scheduler_job_schema.Resource{ - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "relative_uri": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Required: true, - Description: `The relative URI. -The relative URL must begin with "/" and must be a valid HTTP relative URL. -It can contain a path, query string arguments, and \# fragments. -If the relative URL is empty, then the root path "/" will be used. -No spaces are allowed, and the maximum length allowed is 2083 characters`, - }, - "app_engine_routing": { - Type: resource_cloud_scheduler_job_schema.TypeList, - Optional: true, - Description: `App Engine Routing setting for the job.`, - MaxItems: 1, - Elem: &resource_cloud_scheduler_job_schema.Resource{ - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "instance": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `App instance. -By default, the job is sent to an instance which is available when the job is attempted.`, - AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, - }, - "service": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `App service. -By default, the job is sent to the service which is the default service when the job is attempted.`, - AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, - }, - "version": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `App version. -By default, the job is sent to the version which is the default version when the job is attempted.`, - AtLeastOneOf: []string{"app_engine_http_target.0.app_engine_routing.0.service", "app_engine_http_target.0.app_engine_routing.0.version", "app_engine_http_target.0.app_engine_routing.0.instance"}, - }, - }, - }, - }, - "body": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `HTTP request body. -A request body is allowed only if the HTTP method is POST or PUT. -It will result in invalid argument error to set a body on a job with an incompatible HttpMethod. - -A base64-encoded string.`, - }, - "headers": { - Type: resource_cloud_scheduler_job_schema.TypeMap, - Optional: true, - ValidateFunc: validateHttpHeaders(), - Description: `HTTP request headers. -This map contains the header field names and values. -Headers can be set when the job is created.`, - Elem: &resource_cloud_scheduler_job_schema.Schema{Type: resource_cloud_scheduler_job_schema.TypeString}, - }, - "http_method": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `Which HTTP method to use for the request.`, - }, - }, - }, - ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, - }, - "attempt_deadline": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress("180s"), - Description: `The deadline for job attempts. If the request handler does not respond by this deadline then the request is -cancelled and the attempt is marked as a DEADLINE_EXCEEDED failure. The failed attempt can be viewed in -execution logs. Cloud Scheduler will retry the job according to the RetryConfig. -The allowed duration for this deadline is: -* For HTTP targets, between 15 seconds and 30 minutes. -* For App Engine HTTP targets, between 15 seconds and 24 hours. -* **Note**: For PubSub targets, this field is ignored - setting it will introduce an unresolvable diff. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"`, - Default: "180s", - }, - "description": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `A human-readable description for the job. -This string must not contain more than 500 characters.`, - }, - "http_target": { - Type: resource_cloud_scheduler_job_schema.TypeList, - Optional: true, - Description: `HTTP target. -If the job providers a http_target the cron will -send a request to the targeted url`, - MaxItems: 1, - Elem: &resource_cloud_scheduler_job_schema.Resource{ - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "uri": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Required: true, - Description: `The full URI path that the request will be sent to.`, - }, - "body": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `HTTP request body. -A request body is allowed only if the HTTP method is POST, PUT, or PATCH. -It is an error to set body on a job with an incompatible HttpMethod. - -A base64-encoded string.`, - }, - "headers": { - Type: resource_cloud_scheduler_job_schema.TypeMap, - Optional: true, - ValidateFunc: validateHttpHeaders(), - Description: `This map contains the header field names and values. -Repeated headers are not supported, but a header value can contain commas.`, - Elem: &resource_cloud_scheduler_job_schema.Schema{Type: resource_cloud_scheduler_job_schema.TypeString}, - }, - "http_method": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `Which HTTP method to use for the request.`, - }, - "oauth_token": { - Type: resource_cloud_scheduler_job_schema.TypeList, - Optional: true, - DiffSuppressFunc: authHeaderDiffSuppress, - Description: `Contains information needed for generating an OAuth token. -This type of authorization should be used when sending requests to a GCP endpoint.`, - MaxItems: 1, - Elem: &resource_cloud_scheduler_job_schema.Resource{ - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "service_account_email": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Required: true, - Description: `Service account email to be used for generating OAuth token. -The service account must be within the same project as the job.`, - }, - "scope": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `OAuth scope to be used for generating OAuth access token. If not specified, -"https://www.googleapis.com/auth/cloud-platform" will be used.`, - }, - }, - }, - }, - "oidc_token": { - Type: resource_cloud_scheduler_job_schema.TypeList, - Optional: true, - DiffSuppressFunc: authHeaderDiffSuppress, - Description: `Contains information needed for generating an OpenID Connect token. -This type of authorization should be used when sending requests to third party endpoints or Cloud Run.`, - MaxItems: 1, - Elem: &resource_cloud_scheduler_job_schema.Resource{ - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "service_account_email": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Required: true, - Description: `Service account email to be used for generating OAuth token. -The service account must be within the same project as the job.`, - }, - "audience": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `Audience to be used when generating OIDC token. If not specified, -the URI specified in target will be used.`, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, - }, - "pubsub_target": { - Type: resource_cloud_scheduler_job_schema.TypeList, - Optional: true, - Description: `Pub/Sub target -If the job providers a Pub/Sub target the cron will publish -a message to the provided topic`, - MaxItems: 1, - Elem: &resource_cloud_scheduler_job_schema.Resource{ - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "topic_name": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Required: true, - Description: `The full resource name for the Cloud Pub/Sub topic to which -messages will be published when a job is delivered. ~>**NOTE:** -The topic name must be in the same format as required by PubSub's -PublishRequest.name, e.g. 'projects/my-project/topics/my-topic'.`, - }, - "attributes": { - Type: resource_cloud_scheduler_job_schema.TypeMap, - Optional: true, - Description: `Attributes for PubsubMessage. -Pubsub message must contain either non-empty data, or at least one attribute.`, - Elem: &resource_cloud_scheduler_job_schema.Schema{Type: resource_cloud_scheduler_job_schema.TypeString}, - }, - "data": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `The message payload for PubsubMessage. -Pubsub message must contain either non-empty data, or at least one attribute. - - A base64-encoded string.`, - }, - }, - }, - ExactlyOneOf: []string{"pubsub_target", "http_target", "app_engine_http_target"}, - }, - "region": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Region where the scheduler job resides. If it is not provided, Terraform will use the provider default.`, - }, - "retry_config": { - Type: resource_cloud_scheduler_job_schema.TypeList, - Optional: true, - Description: `By default, if a job does not complete successfully, -meaning that an acknowledgement is not received from the handler, -then it will be retried with exponential backoff according to the settings`, - MaxItems: 1, - Elem: &resource_cloud_scheduler_job_schema.Resource{ - Schema: map[string]*resource_cloud_scheduler_job_schema.Schema{ - "max_backoff_duration": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Computed: true, - Optional: true, - Description: `The maximum amount of time to wait before retrying a job after it fails. -A duration in seconds with up to nine fractional digits, terminated by 's'.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "max_doublings": { - Type: resource_cloud_scheduler_job_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The time between retries will double maxDoublings times. -A job's retry interval starts at minBackoffDuration, -then doubles maxDoublings times, then increases linearly, -and finally retries retries at intervals of maxBackoffDuration up to retryCount times.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "max_retry_duration": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Computed: true, - Optional: true, - Description: `The time limit for retrying a failed job, measured from time when an execution was first attempted. -If specified with retryCount, the job will be retried until both limits are reached. -A duration in seconds with up to nine fractional digits, terminated by 's'.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "min_backoff_duration": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Computed: true, - Optional: true, - Description: `The minimum amount of time to wait before retrying a job after it fails. -A duration in seconds with up to nine fractional digits, terminated by 's'.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - "retry_count": { - Type: resource_cloud_scheduler_job_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The number of attempts that the system will make to run a -job using the exponential backoff procedure described by maxDoublings. -Values greater than 5 and negative values are not allowed.`, - AtLeastOneOf: []string{"retry_config.0.retry_count", "retry_config.0.max_retry_duration", "retry_config.0.min_backoff_duration", "retry_config.0.max_backoff_duration", "retry_config.0.max_doublings"}, - }, - }, - }, - }, - "schedule": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `Describes the schedule on which the job will be executed.`, - }, - "time_zone": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Description: `Specifies the time zone to be used in interpreting schedule. -The value of this field must be a time zone name from the tz database.`, - Default: "Etc/UTC", - }, - "project": { - Type: resource_cloud_scheduler_job_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudSchedulerJobCreate(d *resource_cloud_scheduler_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandCloudSchedulerJobName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(nameProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandCloudSchedulerJobDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(descriptionProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - scheduleProp, err := expandCloudSchedulerJobSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(scheduleProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - timeZoneProp, err := expandCloudSchedulerJobTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(timeZoneProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attempt_deadline"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(attemptDeadlineProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, attemptDeadlineProp)) { - obj["attemptDeadline"] = attemptDeadlineProp - } - retryConfigProp, err := expandCloudSchedulerJobRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(retryConfigProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - pubsubTargetProp, err := expandCloudSchedulerJobPubsubTarget(d.Get("pubsub_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_target"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(pubsubTargetProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, pubsubTargetProp)) { - obj["pubsubTarget"] = pubsubTargetProp - } - appEngineHttpTargetProp, err := expandCloudSchedulerJobAppEngineHttpTarget(d.Get("app_engine_http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_http_target"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(appEngineHttpTargetProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, appEngineHttpTargetProp)) { - obj["appEngineHttpTarget"] = appEngineHttpTargetProp - } - httpTargetProp, err := expandCloudSchedulerJobHttpTarget(d.Get("http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_target"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(httpTargetProp)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, httpTargetProp)) { - obj["httpTarget"] = httpTargetProp - } - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs") - if err != nil { - return err - } - - resource_cloud_scheduler_job_log.Printf("[DEBUG] Creating new Job: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_scheduler_job_schema.TimeoutCreate)) - if err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error creating Job: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_cloud_scheduler_job_log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) - - return resourceCloudSchedulerJobRead(d, meta) -} - -func resourceCloudSchedulerJobRead(d *resource_cloud_scheduler_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_scheduler_job_fmt.Sprintf("CloudSchedulerJob %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - - if err := d.Set("name", flattenCloudSchedulerJobName(res["name"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("description", flattenCloudSchedulerJobDescription(res["description"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("schedule", flattenCloudSchedulerJobSchedule(res["schedule"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("time_zone", flattenCloudSchedulerJobTimeZone(res["timeZone"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("attempt_deadline", flattenCloudSchedulerJobAttemptDeadline(res["attemptDeadline"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("retry_config", flattenCloudSchedulerJobRetryConfig(res["retryConfig"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("pubsub_target", flattenCloudSchedulerJobPubsubTarget(res["pubsubTarget"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("app_engine_http_target", flattenCloudSchedulerJobAppEngineHttpTarget(res["appEngineHttpTarget"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - if err := d.Set("http_target", flattenCloudSchedulerJobHttpTarget(res["httpTarget"], d, config)); err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error reading Job: %s", err) - } - - return nil -} - -func resourceCloudSchedulerJobUpdate(d *resource_cloud_scheduler_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandCloudSchedulerJobDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - scheduleProp, err := expandCloudSchedulerJobSchedule(d.Get("schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schedule"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, scheduleProp)) { - obj["schedule"] = scheduleProp - } - timeZoneProp, err := expandCloudSchedulerJobTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - attemptDeadlineProp, err := expandCloudSchedulerJobAttemptDeadline(d.Get("attempt_deadline"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attempt_deadline"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, attemptDeadlineProp)) { - obj["attemptDeadline"] = attemptDeadlineProp - } - retryConfigProp, err := expandCloudSchedulerJobRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - pubsubTargetProp, err := expandCloudSchedulerJobPubsubTarget(d.Get("pubsub_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_target"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, pubsubTargetProp)) { - obj["pubsubTarget"] = pubsubTargetProp - } - appEngineHttpTargetProp, err := expandCloudSchedulerJobAppEngineHttpTarget(d.Get("app_engine_http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_http_target"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, appEngineHttpTargetProp)) { - obj["appEngineHttpTarget"] = appEngineHttpTargetProp - } - httpTargetProp, err := expandCloudSchedulerJobHttpTarget(d.Get("http_target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_target"); !isEmptyValue(resource_cloud_scheduler_job_reflect.ValueOf(v)) && (ok || !resource_cloud_scheduler_job_reflect.DeepEqual(v, httpTargetProp)) { - obj["httpTarget"] = httpTargetProp - } - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return err - } - - resource_cloud_scheduler_job_log.Printf("[DEBUG] Updating Job %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_scheduler_job_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error updating Job %q: %s", d.Id(), err) - } else { - resource_cloud_scheduler_job_log.Printf("[DEBUG] Finished updating Job %q: %#v", d.Id(), res) - } - - return resourceCloudSchedulerJobRead(d, meta) -} - -func resourceCloudSchedulerJobDelete(d *resource_cloud_scheduler_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_scheduler_job_fmt.Errorf("Error fetching project for Job: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudSchedulerBasePath}}projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloud_scheduler_job_log.Printf("[DEBUG] Deleting Job %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_scheduler_job_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Job") - } - - resource_cloud_scheduler_job_log.Printf("[DEBUG] Finished deleting Job %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudSchedulerJobImport(d *resource_cloud_scheduler_job_schema.ResourceData, meta interface{}) ([]*resource_cloud_scheduler_job_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") - if err != nil { - return nil, resource_cloud_scheduler_job_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_cloud_scheduler_job_schema.ResourceData{d}, nil -} - -func flattenCloudSchedulerJobName(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenCloudSchedulerJobDescription(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobSchedule(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobTimeZone(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAttemptDeadline(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfig(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["retry_count"] = - flattenCloudSchedulerJobRetryConfigRetryCount(original["retryCount"], d, config) - transformed["max_retry_duration"] = - flattenCloudSchedulerJobRetryConfigMaxRetryDuration(original["maxRetryDuration"], d, config) - transformed["min_backoff_duration"] = - flattenCloudSchedulerJobRetryConfigMinBackoffDuration(original["minBackoffDuration"], d, config) - transformed["max_backoff_duration"] = - flattenCloudSchedulerJobRetryConfigMaxBackoffDuration(original["maxBackoffDuration"], d, config) - transformed["max_doublings"] = - flattenCloudSchedulerJobRetryConfigMaxDoublings(original["maxDoublings"], d, config) - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobRetryConfigRetryCount(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_scheduler_job_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudSchedulerJobRetryConfigMaxRetryDuration(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfigMinBackoffDuration(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfigMaxBackoffDuration(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobRetryConfigMaxDoublings(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_scheduler_job_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudSchedulerJobPubsubTarget(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["topic_name"] = - flattenCloudSchedulerJobPubsubTargetTopicName(original["topicName"], d, config) - transformed["data"] = - flattenCloudSchedulerJobPubsubTargetData(original["data"], d, config) - transformed["attributes"] = - flattenCloudSchedulerJobPubsubTargetAttributes(original["attributes"], d, config) - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobPubsubTargetTopicName(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobPubsubTargetData(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobPubsubTargetAttributes(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAppEngineHttpTarget(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_method"] = - flattenCloudSchedulerJobAppEngineHttpTargetHttpMethod(original["httpMethod"], d, config) - transformed["app_engine_routing"] = - flattenCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(original["appEngineRouting"], d, config) - transformed["relative_uri"] = - flattenCloudSchedulerJobAppEngineHttpTargetRelativeUri(original["relativeUri"], d, config) - transformed["body"] = - flattenCloudSchedulerJobAppEngineHttpTargetBody(original["body"], d, config) - transformed["headers"] = - flattenCloudSchedulerJobAppEngineHttpTargetHeaders(original["headers"], d, config) - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobAppEngineHttpTargetHttpMethod(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - if stateV, ok := d.GetOk("app_engine_http_target"); ok && len(stateV.([]interface{})) > 0 { - return d.Get("app_engine_http_target.0.app_engine_routing") - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service"] = original["service"] - transformed["version"] = original["version"] - transformed["instance"] = original["instance"] - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobAppEngineHttpTargetRelativeUri(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAppEngineHttpTargetBody(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobAppEngineHttpTargetHeaders(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - var headers = v.(map[string]interface{}) - if v, ok := headers["User-Agent"]; ok { - if v.(string) == "AppEngine-Google; (+http://code.google.com/appengine)" { - delete(headers, "User-Agent") - } else if v.(string) == "Google-Cloud-Scheduler" { - delete(headers, "User-Agent") - } else { - headers["User-Agent"] = resource_cloud_scheduler_job_strings.TrimSpace(resource_cloud_scheduler_job_strings.Replace(v.(string), "AppEngine-Google; (+http://code.google.com/appengine)", "", -1)) - } - } - if v, ok := headers["Content-Type"]; ok { - if v.(string) == "application/octet-stream" { - delete(headers, "Content-Type") - } - } - r := resource_cloud_scheduler_job_regexp.MustCompile(`(X-Google-|X-AppEngine-|Content-Length).*`) - for key := range headers { - if r.MatchString(key) { - delete(headers, key) - } - } - return headers -} - -func flattenCloudSchedulerJobHttpTarget(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["uri"] = - flattenCloudSchedulerJobHttpTargetUri(original["uri"], d, config) - transformed["http_method"] = - flattenCloudSchedulerJobHttpTargetHttpMethod(original["httpMethod"], d, config) - transformed["body"] = - flattenCloudSchedulerJobHttpTargetBody(original["body"], d, config) - transformed["headers"] = - flattenCloudSchedulerJobHttpTargetHeaders(original["headers"], d, config) - transformed["oauth_token"] = - flattenCloudSchedulerJobHttpTargetOauthToken(original["oauthToken"], d, config) - transformed["oidc_token"] = - flattenCloudSchedulerJobHttpTargetOidcToken(original["oidcToken"], d, config) - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobHttpTargetUri(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetHttpMethod(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetBody(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetHeaders(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - var headers = v.(map[string]interface{}) - if v, ok := headers["User-Agent"]; ok { - if v.(string) == "AppEngine-Google; (+http://code.google.com/appengine)" { - delete(headers, "User-Agent") - } else if v.(string) == "Google-Cloud-Scheduler" { - delete(headers, "User-Agent") - } else { - headers["User-Agent"] = resource_cloud_scheduler_job_strings.TrimSpace(resource_cloud_scheduler_job_strings.Replace(v.(string), "AppEngine-Google; (+http://code.google.com/appengine)", "", -1)) - } - } - if v, ok := headers["Content-Type"]; ok { - if v.(string) == "application/octet-stream" { - delete(headers, "Content-Type") - } - } - r := resource_cloud_scheduler_job_regexp.MustCompile(`(X-Google-|X-AppEngine-|Content-Length).*`) - for key := range headers { - if r.MatchString(key) { - delete(headers, key) - } - } - return headers -} - -func flattenCloudSchedulerJobHttpTargetOauthToken(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_account_email"] = - flattenCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) - transformed["scope"] = - flattenCloudSchedulerJobHttpTargetOauthTokenScope(original["scope"], d, config) - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetOauthTokenScope(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetOidcToken(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_account_email"] = - flattenCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) - transformed["audience"] = - flattenCloudSchedulerJobHttpTargetOidcTokenAudience(original["audience"], d, config) - return []interface{}{transformed} -} - -func flattenCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d *resource_cloud_scheduler_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudSchedulerJobName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{region}}/jobs/{{name}}") -} - -func expandCloudSchedulerJobDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAttemptDeadline(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRetryCount, err := expandCloudSchedulerJobRetryConfigRetryCount(original["retry_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedRetryCount); val.IsValid() && !isEmptyValue(val) { - transformed["retryCount"] = transformedRetryCount - } - - transformedMaxRetryDuration, err := expandCloudSchedulerJobRetryConfigMaxRetryDuration(original["max_retry_duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedMaxRetryDuration); val.IsValid() && !isEmptyValue(val) { - transformed["maxRetryDuration"] = transformedMaxRetryDuration - } - - transformedMinBackoffDuration, err := expandCloudSchedulerJobRetryConfigMinBackoffDuration(original["min_backoff_duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedMinBackoffDuration); val.IsValid() && !isEmptyValue(val) { - transformed["minBackoffDuration"] = transformedMinBackoffDuration - } - - transformedMaxBackoffDuration, err := expandCloudSchedulerJobRetryConfigMaxBackoffDuration(original["max_backoff_duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedMaxBackoffDuration); val.IsValid() && !isEmptyValue(val) { - transformed["maxBackoffDuration"] = transformedMaxBackoffDuration - } - - transformedMaxDoublings, err := expandCloudSchedulerJobRetryConfigMaxDoublings(original["max_doublings"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedMaxDoublings); val.IsValid() && !isEmptyValue(val) { - transformed["maxDoublings"] = transformedMaxDoublings - } - - return transformed, nil -} - -func expandCloudSchedulerJobRetryConfigRetryCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMaxRetryDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMinBackoffDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMaxBackoffDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobRetryConfigMaxDoublings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobPubsubTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTopicName, err := expandCloudSchedulerJobPubsubTargetTopicName(original["topic_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedTopicName); val.IsValid() && !isEmptyValue(val) { - transformed["topicName"] = transformedTopicName - } - - transformedData, err := expandCloudSchedulerJobPubsubTargetData(original["data"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedData); val.IsValid() && !isEmptyValue(val) { - transformed["data"] = transformedData - } - - transformedAttributes, err := expandCloudSchedulerJobPubsubTargetAttributes(original["attributes"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedAttributes); val.IsValid() && !isEmptyValue(val) { - transformed["attributes"] = transformedAttributes - } - - return transformed, nil -} - -func expandCloudSchedulerJobPubsubTargetTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobPubsubTargetData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobPubsubTargetAttributes(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudSchedulerJobAppEngineHttpTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpMethod, err := expandCloudSchedulerJobAppEngineHttpTargetHttpMethod(original["http_method"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedHttpMethod); val.IsValid() && !isEmptyValue(val) { - transformed["httpMethod"] = transformedHttpMethod - } - - transformedAppEngineRouting, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(original["app_engine_routing"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedAppEngineRouting); val.IsValid() && !isEmptyValue(val) { - transformed["appEngineRouting"] = transformedAppEngineRouting - } - - transformedRelativeUri, err := expandCloudSchedulerJobAppEngineHttpTargetRelativeUri(original["relative_uri"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedRelativeUri); val.IsValid() && !isEmptyValue(val) { - transformed["relativeUri"] = transformedRelativeUri - } - - transformedBody, err := expandCloudSchedulerJobAppEngineHttpTargetBody(original["body"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedBody); val.IsValid() && !isEmptyValue(val) { - transformed["body"] = transformedBody - } - - transformedHeaders, err := expandCloudSchedulerJobAppEngineHttpTargetHeaders(original["headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["headers"] = transformedHeaders - } - - return transformed, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetHttpMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRouting(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedVersion, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedInstance, err := expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - return transformed, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetAppEngineRoutingInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetRelativeUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobAppEngineHttpTargetHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudSchedulerJobHttpTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUri, err := expandCloudSchedulerJobHttpTargetUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedHttpMethod, err := expandCloudSchedulerJobHttpTargetHttpMethod(original["http_method"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedHttpMethod); val.IsValid() && !isEmptyValue(val) { - transformed["httpMethod"] = transformedHttpMethod - } - - transformedBody, err := expandCloudSchedulerJobHttpTargetBody(original["body"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedBody); val.IsValid() && !isEmptyValue(val) { - transformed["body"] = transformedBody - } - - transformedHeaders, err := expandCloudSchedulerJobHttpTargetHeaders(original["headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["headers"] = transformedHeaders - } - - transformedOauthToken, err := expandCloudSchedulerJobHttpTargetOauthToken(original["oauth_token"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedOauthToken); val.IsValid() && !isEmptyValue(val) { - transformed["oauthToken"] = transformedOauthToken - } - - transformedOidcToken, err := expandCloudSchedulerJobHttpTargetOidcToken(original["oidc_token"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedOidcToken); val.IsValid() && !isEmptyValue(val) { - transformed["oidcToken"] = transformedOidcToken - } - - return transformed, nil -} - -func expandCloudSchedulerJobHttpTargetUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetHttpMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudSchedulerJobHttpTargetOauthToken(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceAccountEmail, err := expandCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountEmail"] = transformedServiceAccountEmail - } - - transformedScope, err := expandCloudSchedulerJobHttpTargetOauthTokenScope(original["scope"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedScope); val.IsValid() && !isEmptyValue(val) { - transformed["scope"] = transformedScope - } - - return transformed, nil -} - -func expandCloudSchedulerJobHttpTargetOauthTokenServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetOauthTokenScope(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetOidcToken(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceAccountEmail, err := expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountEmail"] = transformedServiceAccountEmail - } - - transformedAudience, err := expandCloudSchedulerJobHttpTargetOidcTokenAudience(original["audience"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_scheduler_job_reflect.ValueOf(transformedAudience); val.IsValid() && !isEmptyValue(val) { - transformed["audience"] = transformedAudience - } - - return transformed, nil -} - -func expandCloudSchedulerJobHttpTargetOidcTokenServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudSchedulerJobHttpTargetOidcTokenAudience(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func suppressOmittedMaxDuration(_, old, new string, _ *resource_cloud_tasks_queue_schema.ResourceData) bool { - if old == "" && new == "0s" { - resource_cloud_tasks_queue_log.Printf("[INFO] max retry is 0s and api omitted field, suppressing diff") - return true - } - return false -} - -func resourceCloudTasksQueue() *resource_cloud_tasks_queue_schema.Resource { - return &resource_cloud_tasks_queue_schema.Resource{ - Create: resourceCloudTasksQueueCreate, - Read: resourceCloudTasksQueueRead, - Update: resourceCloudTasksQueueUpdate, - Delete: resourceCloudTasksQueueDelete, - - Importer: &resource_cloud_tasks_queue_schema.ResourceImporter{ - State: resourceCloudTasksQueueImport, - }, - - Timeouts: &resource_cloud_tasks_queue_schema.ResourceTimeout{ - Create: resource_cloud_tasks_queue_schema.DefaultTimeout(4 * resource_cloud_tasks_queue_time.Minute), - Update: resource_cloud_tasks_queue_schema.DefaultTimeout(4 * resource_cloud_tasks_queue_time.Minute), - Delete: resource_cloud_tasks_queue_schema.DefaultTimeout(4 * resource_cloud_tasks_queue_time.Minute), - }, - - Schema: map[string]*resource_cloud_tasks_queue_schema.Schema{ - "location": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the queue`, - }, - "app_engine_routing_override": { - Type: resource_cloud_tasks_queue_schema.TypeList, - Optional: true, - Description: `Overrides for task-level appEngineRouting. These settings apply only -to App Engine tasks in this queue`, - MaxItems: 1, - Elem: &resource_cloud_tasks_queue_schema.Resource{ - Schema: map[string]*resource_cloud_tasks_queue_schema.Schema{ - "instance": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Optional: true, - Description: `App instance. - -By default, the task is sent to an instance which is available when the task is attempted.`, - }, - "service": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Optional: true, - Description: `App service. - -By default, the task is sent to the service which is the default service when the task is attempted.`, - }, - "version": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Optional: true, - Description: `App version. - -By default, the task is sent to the version which is the default version when the task is attempted.`, - }, - "host": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Computed: true, - Description: `The host that the task is sent to.`, - }, - }, - }, - }, - "name": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The queue name.`, - }, - "rate_limits": { - Type: resource_cloud_tasks_queue_schema.TypeList, - Computed: true, - Optional: true, - Description: `Rate limits for task dispatches. - -The queue's actual dispatch rate is the result of: - -* Number of tasks in the queue -* User-specified throttling: rateLimits, retryConfig, and the queue's state. -* System throttling due to 429 (Too Many Requests) or 503 (Service - Unavailable) responses from the worker, high error rates, or to - smooth sudden large traffic spikes.`, - MaxItems: 1, - Elem: &resource_cloud_tasks_queue_schema.Resource{ - Schema: map[string]*resource_cloud_tasks_queue_schema.Schema{ - "max_concurrent_dispatches": { - Type: resource_cloud_tasks_queue_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The maximum number of concurrent tasks that Cloud Tasks allows to -be dispatched for this queue. After this threshold has been -reached, Cloud Tasks stops dispatching tasks until the number of -concurrent requests decreases.`, - }, - "max_dispatches_per_second": { - Type: resource_cloud_tasks_queue_schema.TypeFloat, - Computed: true, - Optional: true, - Description: `The maximum rate at which tasks are dispatched from this queue. - -If unspecified when the queue is created, Cloud Tasks will pick the default.`, - }, - "max_burst_size": { - Type: resource_cloud_tasks_queue_schema.TypeInt, - Computed: true, - Description: `The max burst size. - -Max burst size limits how fast tasks in queue are processed when many tasks are -in the queue and the rate is high. This field allows the queue to have a high -rate so processing starts shortly after a task is enqueued, but still limits -resource usage when many tasks are enqueued in a short period of time.`, - }, - }, - }, - }, - "retry_config": { - Type: resource_cloud_tasks_queue_schema.TypeList, - Computed: true, - Optional: true, - Description: `Settings that determine the retry behavior.`, - MaxItems: 1, - Elem: &resource_cloud_tasks_queue_schema.Resource{ - Schema: map[string]*resource_cloud_tasks_queue_schema.Schema{ - "max_attempts": { - Type: resource_cloud_tasks_queue_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Number of attempts per task. - -Cloud Tasks will attempt the task maxAttempts times (that is, if -the first attempt fails, then there will be maxAttempts - 1 -retries). Must be >= -1. - -If unspecified when the queue is created, Cloud Tasks will pick -the default. - --1 indicates unlimited attempts.`, - }, - "max_backoff": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Computed: true, - Optional: true, - Description: `A task will be scheduled for retry between minBackoff and -maxBackoff duration after it fails, if the queue's RetryConfig -specifies that the task should be retried.`, - }, - "max_doublings": { - Type: resource_cloud_tasks_queue_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The time between retries will double maxDoublings times. - -A task's retry interval starts at minBackoff, then doubles maxDoublings times, -then increases linearly, and finally retries retries at intervals of maxBackoff -up to maxAttempts times.`, - }, - "max_retry_duration": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: suppressOmittedMaxDuration, - Description: `If positive, maxRetryDuration specifies the time limit for -retrying a failed task, measured from when the task was first -attempted. Once maxRetryDuration time has passed and the task has -been attempted maxAttempts times, no further attempts will be -made and the task will be deleted. - -If zero, then the task age is unlimited.`, - }, - "min_backoff": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Computed: true, - Optional: true, - Description: `A task will be scheduled for retry between minBackoff and -maxBackoff duration after it fails, if the queue's RetryConfig -specifies that the task should be retried.`, - }, - }, - }, - }, - "stackdriver_logging_config": { - Type: resource_cloud_tasks_queue_schema.TypeList, - Optional: true, - Description: `Configuration options for writing logs to Stackdriver Logging.`, - MaxItems: 1, - Elem: &resource_cloud_tasks_queue_schema.Resource{ - Schema: map[string]*resource_cloud_tasks_queue_schema.Schema{ - "sampling_ratio": { - Type: resource_cloud_tasks_queue_schema.TypeFloat, - Required: true, - Description: `Specifies the fraction of operations to write to Stackdriver Logging. -This field may contain any value between 0.0 and 1.0, inclusive. 0.0 is the -default and means that no operations are logged.`, - }, - }, - }, - }, - "project": { - Type: resource_cloud_tasks_queue_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudTasksQueueCreate(d *resource_cloud_tasks_queue_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandCloudTasksQueueName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(nameProp)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - appEngineRoutingOverrideProp, err := expandCloudTasksQueueAppEngineRoutingOverride(d.Get("app_engine_routing_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_routing_override"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(appEngineRoutingOverrideProp)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, appEngineRoutingOverrideProp)) { - obj["appEngineRoutingOverride"] = appEngineRoutingOverrideProp - } - rateLimitsProp, err := expandCloudTasksQueueRateLimits(d.Get("rate_limits"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rate_limits"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(rateLimitsProp)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, rateLimitsProp)) { - obj["rateLimits"] = rateLimitsProp - } - retryConfigProp, err := expandCloudTasksQueueRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(retryConfigProp)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - stackdriverLoggingConfigProp, err := expandCloudTasksQueueStackdriverLoggingConfig(d.Get("stackdriver_logging_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(stackdriverLoggingConfigProp)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { - obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp - } - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues") - if err != nil { - return err - } - - resource_cloud_tasks_queue_log.Printf("[DEBUG] Creating new Queue: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_tasks_queue_schema.TimeoutCreate)) - if err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error creating Queue: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_cloud_tasks_queue_log.Printf("[DEBUG] Finished creating Queue %q: %#v", d.Id(), res) - - return resourceCloudTasksQueueRead(d, meta) -} - -func resourceCloudTasksQueueRead(d *resource_cloud_tasks_queue_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloud_tasks_queue_fmt.Sprintf("CloudTasksQueue %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error reading Queue: %s", err) - } - - if err := d.Set("name", flattenCloudTasksQueueName(res["name"], d, config)); err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("app_engine_routing_override", flattenCloudTasksQueueAppEngineRoutingOverride(res["appEngineRoutingOverride"], d, config)); err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("rate_limits", flattenCloudTasksQueueRateLimits(res["rateLimits"], d, config)); err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("retry_config", flattenCloudTasksQueueRetryConfig(res["retryConfig"], d, config)); err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error reading Queue: %s", err) - } - if err := d.Set("stackdriver_logging_config", flattenCloudTasksQueueStackdriverLoggingConfig(res["stackdriverLoggingConfig"], d, config)); err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error reading Queue: %s", err) - } - - return nil -} - -func resourceCloudTasksQueueUpdate(d *resource_cloud_tasks_queue_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - appEngineRoutingOverrideProp, err := expandCloudTasksQueueAppEngineRoutingOverride(d.Get("app_engine_routing_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine_routing_override"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(v)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, appEngineRoutingOverrideProp)) { - obj["appEngineRoutingOverride"] = appEngineRoutingOverrideProp - } - rateLimitsProp, err := expandCloudTasksQueueRateLimits(d.Get("rate_limits"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rate_limits"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(v)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, rateLimitsProp)) { - obj["rateLimits"] = rateLimitsProp - } - retryConfigProp, err := expandCloudTasksQueueRetryConfig(d.Get("retry_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_config"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(v)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, retryConfigProp)) { - obj["retryConfig"] = retryConfigProp - } - stackdriverLoggingConfigProp, err := expandCloudTasksQueueStackdriverLoggingConfig(d.Get("stackdriver_logging_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stackdriver_logging_config"); !isEmptyValue(resource_cloud_tasks_queue_reflect.ValueOf(v)) && (ok || !resource_cloud_tasks_queue_reflect.DeepEqual(v, stackdriverLoggingConfigProp)) { - obj["stackdriverLoggingConfig"] = stackdriverLoggingConfigProp - } - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return err - } - - resource_cloud_tasks_queue_log.Printf("[DEBUG] Updating Queue %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("app_engine_routing_override") { - updateMask = append(updateMask, "appEngineRoutingOverride") - } - - if d.HasChange("rate_limits") { - updateMask = append(updateMask, "rateLimits") - } - - if d.HasChange("retry_config") { - updateMask = append(updateMask, "retryConfig") - } - - if d.HasChange("stackdriver_logging_config") { - updateMask = append(updateMask, "stackdriverLoggingConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloud_tasks_queue_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_tasks_queue_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error updating Queue %q: %s", d.Id(), err) - } else { - resource_cloud_tasks_queue_log.Printf("[DEBUG] Finished updating Queue %q: %#v", d.Id(), res) - } - - return resourceCloudTasksQueueRead(d, meta) -} - -func resourceCloudTasksQueueDelete(d *resource_cloud_tasks_queue_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloud_tasks_queue_fmt.Errorf("Error fetching project for Queue: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudTasksBasePath}}projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloud_tasks_queue_log.Printf("[DEBUG] Deleting Queue %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloud_tasks_queue_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Queue") - } - - resource_cloud_tasks_queue_log.Printf("[DEBUG] Finished deleting Queue %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudTasksQueueImport(d *resource_cloud_tasks_queue_schema.ResourceData, meta interface{}) ([]*resource_cloud_tasks_queue_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/queues/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") - if err != nil { - return nil, resource_cloud_tasks_queue_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_cloud_tasks_queue_schema.ResourceData{d}, nil -} - -func flattenCloudTasksQueueName(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenCloudTasksQueueAppEngineRoutingOverride(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = original["host"] - if override, ok := d.GetOk("app_engine_routing_override"); ok && len(override.([]interface{})) > 0 { - transformed["service"] = d.Get("app_engine_routing_override.0.service") - transformed["version"] = d.Get("app_engine_routing_override.0.version") - transformed["instance"] = d.Get("app_engine_routing_override.0.instance") - } - return []interface{}{transformed} -} - -func flattenCloudTasksQueueRateLimits(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_dispatches_per_second"] = - flattenCloudTasksQueueRateLimitsMaxDispatchesPerSecond(original["maxDispatchesPerSecond"], d, config) - transformed["max_concurrent_dispatches"] = - flattenCloudTasksQueueRateLimitsMaxConcurrentDispatches(original["maxConcurrentDispatches"], d, config) - transformed["max_burst_size"] = - flattenCloudTasksQueueRateLimitsMaxBurstSize(original["maxBurstSize"], d, config) - return []interface{}{transformed} -} - -func flattenCloudTasksQueueRateLimitsMaxDispatchesPerSecond(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRateLimitsMaxConcurrentDispatches(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_tasks_queue_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudTasksQueueRateLimitsMaxBurstSize(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_tasks_queue_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudTasksQueueRetryConfig(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_attempts"] = - flattenCloudTasksQueueRetryConfigMaxAttempts(original["maxAttempts"], d, config) - transformed["max_retry_duration"] = - flattenCloudTasksQueueRetryConfigMaxRetryDuration(original["maxRetryDuration"], d, config) - transformed["min_backoff"] = - flattenCloudTasksQueueRetryConfigMinBackoff(original["minBackoff"], d, config) - transformed["max_backoff"] = - flattenCloudTasksQueueRetryConfigMaxBackoff(original["maxBackoff"], d, config) - transformed["max_doublings"] = - flattenCloudTasksQueueRetryConfigMaxDoublings(original["maxDoublings"], d, config) - return []interface{}{transformed} -} - -func flattenCloudTasksQueueRetryConfigMaxAttempts(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_tasks_queue_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudTasksQueueRetryConfigMaxRetryDuration(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRetryConfigMinBackoff(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRetryConfigMaxBackoff(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudTasksQueueRetryConfigMaxDoublings(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloud_tasks_queue_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudTasksQueueStackdriverLoggingConfig(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["sampling_ratio"] = - flattenCloudTasksQueueStackdriverLoggingConfigSamplingRatio(original["samplingRatio"], d, config) - return []interface{}{transformed} -} - -func flattenCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, d *resource_cloud_tasks_queue_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudTasksQueueName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{location}}/queues/{{name}}") -} - -func expandCloudTasksQueueAppEngineRoutingOverride(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandCloudTasksQueueAppEngineRoutingOverrideService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedVersion, err := expandCloudTasksQueueAppEngineRoutingOverrideVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedInstance, err := expandCloudTasksQueueAppEngineRoutingOverrideInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - transformedHost, err := expandCloudTasksQueueAppEngineRoutingOverrideHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - return transformed, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueAppEngineRoutingOverrideHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRateLimits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxDispatchesPerSecond, err := expandCloudTasksQueueRateLimitsMaxDispatchesPerSecond(original["max_dispatches_per_second"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMaxDispatchesPerSecond); val.IsValid() && !isEmptyValue(val) { - transformed["maxDispatchesPerSecond"] = transformedMaxDispatchesPerSecond - } - - transformedMaxConcurrentDispatches, err := expandCloudTasksQueueRateLimitsMaxConcurrentDispatches(original["max_concurrent_dispatches"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMaxConcurrentDispatches); val.IsValid() && !isEmptyValue(val) { - transformed["maxConcurrentDispatches"] = transformedMaxConcurrentDispatches - } - - transformedMaxBurstSize, err := expandCloudTasksQueueRateLimitsMaxBurstSize(original["max_burst_size"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMaxBurstSize); val.IsValid() && !isEmptyValue(val) { - transformed["maxBurstSize"] = transformedMaxBurstSize - } - - return transformed, nil -} - -func expandCloudTasksQueueRateLimitsMaxDispatchesPerSecond(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRateLimitsMaxConcurrentDispatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRateLimitsMaxBurstSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxAttempts, err := expandCloudTasksQueueRetryConfigMaxAttempts(original["max_attempts"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMaxAttempts); val.IsValid() && !isEmptyValue(val) { - transformed["maxAttempts"] = transformedMaxAttempts - } - - transformedMaxRetryDuration, err := expandCloudTasksQueueRetryConfigMaxRetryDuration(original["max_retry_duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMaxRetryDuration); val.IsValid() && !isEmptyValue(val) { - transformed["maxRetryDuration"] = transformedMaxRetryDuration - } - - transformedMinBackoff, err := expandCloudTasksQueueRetryConfigMinBackoff(original["min_backoff"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMinBackoff); val.IsValid() && !isEmptyValue(val) { - transformed["minBackoff"] = transformedMinBackoff - } - - transformedMaxBackoff, err := expandCloudTasksQueueRetryConfigMaxBackoff(original["max_backoff"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMaxBackoff); val.IsValid() && !isEmptyValue(val) { - transformed["maxBackoff"] = transformedMaxBackoff - } - - transformedMaxDoublings, err := expandCloudTasksQueueRetryConfigMaxDoublings(original["max_doublings"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedMaxDoublings); val.IsValid() && !isEmptyValue(val) { - transformed["maxDoublings"] = transformedMaxDoublings - } - - return transformed, nil -} - -func expandCloudTasksQueueRetryConfigMaxAttempts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMaxRetryDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMinBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMaxBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueRetryConfigMaxDoublings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudTasksQueueStackdriverLoggingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSamplingRatio, err := expandCloudTasksQueueStackdriverLoggingConfigSamplingRatio(original["sampling_ratio"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloud_tasks_queue_reflect.ValueOf(transformedSamplingRatio); val.IsValid() && !isEmptyValue(val) { - transformed["samplingRatio"] = transformedSamplingRatio - } - - return transformed, nil -} - -func expandCloudTasksQueueStackdriverLoggingConfigSamplingRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func stepTimeoutCustomizeDiff(_ resource_cloudbuild_trigger_context.Context, diff *resource_cloudbuild_trigger_schema.ResourceDiff, v interface{}) error { - buildList := diff.Get("build").([]interface{}) - if len(buildList) == 0 || buildList[0] == nil { - return nil - } - build := buildList[0].(map[string]interface{}) - buildTimeoutString := build["timeout"].(string) - - buildTimeout, err := resource_cloudbuild_trigger_time.ParseDuration(buildTimeoutString) - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error parsing build timeout : %s", err) - } - - var stepTimeoutSum resource_cloudbuild_trigger_time.Duration = 0 - steps := build["step"].([]interface{}) - for _, rawstep := range steps { - if rawstep == nil { - continue - } - step := rawstep.(map[string]interface{}) - timeoutString := step["timeout"].(string) - if len(timeoutString) == 0 { - continue - } - - timeout, err := resource_cloudbuild_trigger_time.ParseDuration(timeoutString) - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error parsing build step timeout: %s", err) - } - stepTimeoutSum += timeout - } - if stepTimeoutSum > buildTimeout { - return resource_cloudbuild_trigger_fmt.Errorf("Step timeout sum (%v) cannot be greater than build timeout (%v)", stepTimeoutSum, buildTimeout) - } - return nil -} - -func resourceCloudBuildTrigger() *resource_cloudbuild_trigger_schema.Resource { - return &resource_cloudbuild_trigger_schema.Resource{ - Create: resourceCloudBuildTriggerCreate, - Read: resourceCloudBuildTriggerRead, - Update: resourceCloudBuildTriggerUpdate, - Delete: resourceCloudBuildTriggerDelete, - - Importer: &resource_cloudbuild_trigger_schema.ResourceImporter{ - State: resourceCloudBuildTriggerImport, - }, - - Timeouts: &resource_cloudbuild_trigger_schema.ResourceTimeout{ - Create: resource_cloudbuild_trigger_schema.DefaultTimeout(4 * resource_cloudbuild_trigger_time.Minute), - Update: resource_cloudbuild_trigger_schema.DefaultTimeout(4 * resource_cloudbuild_trigger_time.Minute), - Delete: resource_cloudbuild_trigger_schema.DefaultTimeout(4 * resource_cloudbuild_trigger_time.Minute), - }, - - SchemaVersion: 1, - CustomizeDiff: stepTimeoutCustomizeDiff, - - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "build": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Contents of the build template. Either a filename or build template must be provided.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "step": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Required: true, - Description: `The operations to be performed on the workspace.`, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `The name of the container image that will run this particular build step. - -If the image is available in the host's Docker daemon's cache, it will be -run directly. If not, the host will attempt to pull the image first, using -the builder service account's credentials if necessary. - -The Docker daemon's cache will already have the latest versions of all of -the officially supported build steps (see https://github.com/GoogleCloudPlatform/cloud-builders -for images and examples). -The Docker daemon will also have cached many of the layers for some popular -images, like "ubuntu", "debian", but they will be refreshed at the time -you attempt to use them. - -If you built an image in a previous build step, it will be stored in the -host's Docker daemon's cache and is available to use as the name for a -later build step.`, - }, - "args": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of arguments that will be presented to the step when it is started. - -If the image used to run the step's container has an entrypoint, the args -are used as arguments to that entrypoint. If the image does not define an -entrypoint, the first element in args is used as the entrypoint, and the -remainder will be used as arguments.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "dir": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Working directory to use when running this step's container. - -If this value is a relative path, it is relative to the build's working -directory. If this value is absolute, it may be outside the build's working -directory, in which case the contents of the path may not be persisted -across build step executions, unless a 'volume' for that path is specified. - -If the build specifies a 'RepoSource' with 'dir' and a step with a -'dir', -which specifies an absolute path, the 'RepoSource' 'dir' is ignored -for the step's execution.`, - }, - "entrypoint": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Entrypoint to be used instead of the build step image's -default entrypoint. -If unset, the image's default entrypoint is used`, - }, - "env": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of environment variable definitions to be used when -running a step. - -The elements are of the form "KEY=VALUE" for the environment variable -"KEY" being given the value "VALUE".`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "id": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Unique identifier for this build step, used in 'wait_for' to -reference this build step as a dependency.`, - }, - "secret_env": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of environment variables which are encrypted using -a Cloud Key -Management Service crypto key. These values must be specified in -the build's 'Secret'.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "timeout": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Time limit for executing this build step. If not defined, -the step has no -time limit and will be allowed to continue to run until either it -completes or the build itself times out.`, - }, - "timing": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Output only. Stores timing information for executing this -build step.`, - }, - "volumes": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `List of volumes to mount into the build step. - -Each volume is created as an empty volume prior to execution of the -build step. Upon completion of the build, volumes and their contents -are discarded. - -Using a named volume in only one step is not valid as it is -indicative of a build request with an incorrect configuration.`, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Name of the volume to mount. - -Volume names must be unique per build step and must be valid names for -Docker volumes. Each named volume must be used by at least two build steps.`, - }, - "path": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Path at which to mount the volume. - -Paths must be absolute and cannot conflict with other volume paths on -the same build step or with certain reserved volume paths.`, - }, - }, - }, - }, - "wait_for": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `The ID(s) of the step(s) that this build step depends on. - -This build step will not start until all the build steps in 'wait_for' -have completed successfully. If 'wait_for' is empty, this build step -will start when all previous build steps in the 'Build.Steps' list -have completed successfully.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - }, - }, - }, - "artifacts": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Artifacts produced by the build that should be uploaded upon successful completion of all build steps.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "images": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of images to be pushed upon the successful completion of all build steps. - -The images will be pushed using the builder service account's credentials. - -The digests of the pushed images will be stored in the Build resource's results field. - -If any of the images fail to be pushed, the build is marked FAILURE.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "objects": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of objects to be uploaded to Cloud Storage upon successful completion of all build steps. - -Files in the workspace matching specified paths globs will be uploaded to the -Cloud Storage location using the builder service account's credentials. - -The location and generation of the uploaded objects will be stored in the Build resource's results field. - -If any objects fail to be pushed, the build is marked FAILURE.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "location": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Cloud Storage bucket and optional object path, in the form "gs://bucket/path/to/somewhere/". - -Files in the workspace matching any path pattern will be uploaded to Cloud Storage with -this location as a prefix.`, - }, - "paths": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Path globs used to match files in the build's workspace.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "timing": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Computed: true, - Description: `Output only. Stores timing information for pushing all artifact objects.`, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "end_time": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `End of time span. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to -nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "start_time": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Start of time span. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to -nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "images": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of images to be pushed upon the successful completion of all build steps. -The images are pushed using the builder service account's credentials. -The digests of the pushed images will be stored in the Build resource's results field. -If any of the images fail to be pushed, the build status is marked FAILURE.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "logs_bucket": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Google Cloud Storage bucket where logs should be written. -Logs file names will be of the format ${logsBucket}/log-${build_id}.txt.`, - }, - "options": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Special options for this build.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "disk_size_gb": { - Type: resource_cloudbuild_trigger_schema.TypeInt, - Optional: true, - Description: `Requested disk size for the VM that runs the build. Note that this is NOT "disk free"; -some of the space will be used by the operating system and build utilities. -Also note that this is the minimum disk size that will be allocated for the build -- -the build may run with a larger disk than requested. At present, the maximum disk size -is 1000GB; builds that request more than the maximum are rejected with an error.`, - }, - "dynamic_substitutions": { - Type: resource_cloudbuild_trigger_schema.TypeBool, - Optional: true, - Description: `Option to specify whether or not to apply bash style string operations to the substitutions. - -NOTE this is always enabled for triggered builds and cannot be overridden in the build configuration file.`, - }, - "env": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of global environment variable definitions that will exist for all build steps -in this build. If a variable is defined in both globally and in a build step, -the variable will use the build step value. - -The elements are of the form "KEY=VALUE" for the environment variable "KEY" being given the value "VALUE".`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "log_streaming_option": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudbuild_trigger_validation.StringInSlice([]string{"STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF", ""}, false), - Description: `Option to define build log streaming behavior to Google Cloud Storage. Possible values: ["STREAM_DEFAULT", "STREAM_ON", "STREAM_OFF"]`, - }, - "logging": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudbuild_trigger_validation.StringInSlice([]string{"LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "NONE", ""}, false), - Description: `Option to specify the logging mode, which determines if and where build logs are stored. Possible values: ["LOGGING_UNSPECIFIED", "LEGACY", "GCS_ONLY", "STACKDRIVER_ONLY", "NONE"]`, - }, - "machine_type": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudbuild_trigger_validation.StringInSlice([]string{"UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32", ""}, false), - Description: `Compute Engine machine type on which to run the build. Possible values: ["UNSPECIFIED", "N1_HIGHCPU_8", "N1_HIGHCPU_32", "E2_HIGHCPU_8", "E2_HIGHCPU_32"]`, - }, - "requested_verify_option": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudbuild_trigger_validation.StringInSlice([]string{"NOT_VERIFIED", "VERIFIED", ""}, false), - Description: `Requested verifiability options. Possible values: ["NOT_VERIFIED", "VERIFIED"]`, - }, - "secret_env": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `A list of global environment variables, which are encrypted using a Cloud Key Management -Service crypto key. These values must be specified in the build's Secret. These variables -will be available to all build steps in this build.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "source_provenance_hash": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Requested hash for SourceProvenance. Possible values: ["NONE", "SHA256", "MD5"]`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - ValidateFunc: resource_cloudbuild_trigger_validation.StringInSlice([]string{"NONE", "SHA256", "MD5"}, false), - }, - }, - "substitution_option": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudbuild_trigger_validation.StringInSlice([]string{"MUST_MATCH", "ALLOW_LOOSE", ""}, false), - Description: `Option to specify behavior when there is an error in the substitution checks. - -NOTE this is always set to ALLOW_LOOSE for triggered builds and cannot be overridden -in the build configuration file. Possible values: ["MUST_MATCH", "ALLOW_LOOSE"]`, - }, - "volumes": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Global list of volumes to mount for ALL build steps - -Each volume is created as an empty volume prior to starting the build process. -Upon completion of the build, volumes and their contents are discarded. Global -volume names and paths cannot conflict with the volumes defined a build step. - -Using a global volume in a build with only one step is not valid as it is indicative -of a build request with an incorrect configuration.`, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Name of the volume to mount. - -Volume names must be unique per build step and must be valid names for Docker volumes. -Each named volume must be used by at least two build steps.`, - }, - "path": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Path at which to mount the volume. - -Paths must be absolute and cannot conflict with other volume paths on the same -build step or with certain reserved volume paths.`, - }, - }, - }, - }, - "worker_pool": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Option to specify a WorkerPool for the build. Format projects/{project}/workerPools/{workerPool} - -This field is experimental.`, - }, - }, - }, - }, - "queue_ttl": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `TTL in queue for this build. If provided and the build is enqueued longer than this value, -the build will expire and the build status will be EXPIRED. -The TTL starts ticking from createTime. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "secret": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Secrets to decrypt using Cloud Key Management Service.`, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "kms_key_name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Cloud KMS key name to use to decrypt these envs.`, - }, - "secret_env": { - Type: resource_cloudbuild_trigger_schema.TypeMap, - Optional: true, - Description: `Map of environment variable name to its encrypted value. -Secret environment variables must be unique across all of a build's secrets, -and must be used by at least one build step. Values can be at most 64 KB in size. -There can be at most 100 secret values across all of a build's secrets.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{Type: resource_cloudbuild_trigger_schema.TypeString}, - }, - }, - }, - }, - "source": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `The location of the source files to build. - -One of 'storageSource' or 'repoSource' must be provided.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "repo_source": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Location of the source in a Google Cloud Source Repository.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "repo_name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Name of the Cloud Source Repository.`, - }, - "branch_name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Regex matching branches to build. Exactly one a of branch name, tag, or commit SHA must be provided. -The syntax of the regular expressions accepted is the syntax accepted by RE2 and -described at https://github.com/google/re2/wiki/Syntax`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - "commit_sha": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Explicit commit SHA to build. Exactly one a of branch name, tag, or commit SHA must be provided.`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - "dir": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Directory, relative to the source root, in which to run the build. -This must be a relative path. If a step's dir is specified and is an absolute path, -this value is ignored for that step's execution.`, - }, - "invert_regex": { - Type: resource_cloudbuild_trigger_schema.TypeBool, - Optional: true, - Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, - }, - "project_id": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `ID of the project that owns the Cloud Source Repository. -If omitted, the project ID requesting the build is assumed.`, - }, - "substitutions": { - Type: resource_cloudbuild_trigger_schema.TypeMap, - Optional: true, - Description: `Substitutions to use in a triggered build. Should only be used with triggers.run`, - Elem: &resource_cloudbuild_trigger_schema.Schema{Type: resource_cloudbuild_trigger_schema.TypeString}, - }, - "tag_name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Regex matching tags to build. Exactly one a of branch name, tag, or commit SHA must be provided. -The syntax of the regular expressions accepted is the syntax accepted by RE2 and -described at https://github.com/google/re2/wiki/Syntax`, - ExactlyOneOf: []string{"build.0.source.0.repo_source.0.branch_name", "build.0.source.0.repo_source.0.commit_sha", "build.0.source.0.repo_source.0.tag_name"}, - }, - }, - }, - }, - "storage_source": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Location of the source in an archive file in Google Cloud Storage.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "bucket": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Google Cloud Storage bucket containing the source.`, - }, - "object": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Google Cloud Storage object containing the source. -This object must be a gzipped archive file (.tar.gz) containing source to build.`, - }, - "generation": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Google Cloud Storage generation for the object. -If the generation is omitted, the latest generation will be used`, - }, - }, - }, - }, - }, - }, - }, - "substitutions": { - Type: resource_cloudbuild_trigger_schema.TypeMap, - Optional: true, - Description: `Substitutions data for Build resource.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{Type: resource_cloudbuild_trigger_schema.TypeString}, - }, - "tags": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Tags for annotation of a Build. These are not docker tags.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "timeout": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Amount of time that this build should be allowed to run, to second granularity. -If this amount of time elapses, work on the build will cease and the build status will be TIMEOUT. -This timeout must be equal to or greater than the sum of the timeouts for build steps within the build. -The expected format is the number of seconds followed by s. -Default time is ten minutes (600s).`, - Default: "600s", - }, - }, - }, - ExactlyOneOf: []string{"filename", "build"}, - }, - "description": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Human-readable description of the trigger.`, - }, - "disabled": { - Type: resource_cloudbuild_trigger_schema.TypeBool, - Optional: true, - Description: `Whether the trigger is disabled or not. If true, the trigger will never result in a build.`, - }, - "filename": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Path, from the source root, to a file whose contents is used for the template. Either a filename or build template must be provided.`, - ExactlyOneOf: []string{"filename", "build"}, - }, - "github": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Describes the configuration of a trigger that creates a build whenever a GitHub event is received. - -One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Name of the repository. For example: The name for -https://github.com/googlecloudplatform/cloud-builders is "cloud-builders".`, - }, - "owner": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Owner of the repository. For example: The owner for -https://github.com/googlecloudplatform/cloud-builders is "googlecloudplatform".`, - }, - "pull_request": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `filter to match changes in pull requests. Specify only one of pullRequest or push.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "branch": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Regex of branches to match.`, - }, - "comment_control": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudbuild_trigger_validation.StringInSlice([]string{"COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY", ""}, false), - Description: `Whether to block builds on a "/gcbrun" comment from a repository owner or collaborator. Possible values: ["COMMENTS_DISABLED", "COMMENTS_ENABLED", "COMMENTS_ENABLED_FOR_EXTERNAL_CONTRIBUTORS_ONLY"]`, - }, - "invert_regex": { - Type: resource_cloudbuild_trigger_schema.TypeBool, - Optional: true, - Description: `If true, branches that do NOT match the git_ref will trigger a build.`, - }, - }, - }, - ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, - }, - "push": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `filter to match changes in refs, like branches or tags. Specify only one of pullRequest or push.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "branch": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Regex of branches to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, - }, - "invert_regex": { - Type: resource_cloudbuild_trigger_schema.TypeBool, - Optional: true, - Description: `When true, only trigger a build if the revision regex does NOT match the git_ref regex.`, - }, - "tag": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Regex of tags to match. Specify only one of branch or tag.`, - ExactlyOneOf: []string{"github.0.push.0.branch", "github.0.push.0.tag"}, - }, - }, - }, - ExactlyOneOf: []string{"github.0.pull_request", "github.0.push"}, - }, - }, - }, - ExactlyOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config"}, - }, - "ignored_files": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match -extended with support for '**'. - -If ignoredFiles and changed files are both empty, then they are not -used to determine whether or not to trigger a build. - -If ignoredFiles is not empty, then we ignore any files that match any -of the ignored_file globs. If the change has no files that are outside -of the ignoredFiles globs, then we do not trigger a build.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "included_files": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `ignoredFiles and includedFiles are file glob matches using https://golang.org/pkg/path/filepath/#Match -extended with support for '**'. - -If any of the files altered in the commit pass the ignoredFiles filter -and includedFiles is empty, then as far as this filter is concerned, we -should trigger the build. - -If any of the files altered in the commit pass the ignoredFiles filter -and includedFiles is not empty, then we make sure that at least one of -those files matches a includedFiles glob. If not, then we do not trigger -a build.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Computed: true, - Optional: true, - Description: `Name of the trigger. Must be unique within the project.`, - }, - "pubsub_config": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `PubsubConfig describes the configuration of a trigger that creates -a build whenever a Pub/Sub message is published. - -One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "topic": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `The name of the topic from which this subscription is receiving messages.`, - }, - "service_account_email": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Service account that will make the push request.`, - }, - "state": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Computed: true, - Description: `Potential issues with the underlying Pub/Sub subscription configuration. -Only populated on get requests.`, - }, - "subscription": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Computed: true, - Description: `Output only. Name of the subscription.`, - }, - }, - }, - ExactlyOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config"}, - }, - "service_account": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `The service account used for all user-controlled operations including -triggers.patch, triggers.run, builds.create, and builds.cancel. - -If no service account is set, then the standard Cloud Build service account -([PROJECT_NUM]@system.gserviceaccount.com) will be used instead. - -Format: projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT_ID_OR_EMAIL}`, - }, - "substitutions": { - Type: resource_cloudbuild_trigger_schema.TypeMap, - Optional: true, - Description: `Substitutions data for Build resource.`, - Elem: &resource_cloudbuild_trigger_schema.Schema{Type: resource_cloudbuild_trigger_schema.TypeString}, - }, - "tags": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Tags for annotation of a BuildTrigger`, - Elem: &resource_cloudbuild_trigger_schema.Schema{ - Type: resource_cloudbuild_trigger_schema.TypeString, - }, - }, - "trigger_template": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `Template describing the types of source changes to trigger a build. - -Branch and tag names in trigger templates are interpreted as regular -expressions. Any branch or tag change that matches that regular -expression will trigger a build. - -One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "branch_name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Name of the branch to build. Exactly one a of branch name, tag, or commit SHA must be provided. -This field is a regular expression.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - "commit_sha": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Explicit commit SHA to build. Exactly one of a branch name, tag, or commit SHA must be provided.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - "dir": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Directory, relative to the source root, in which to run the build. - -This must be a relative path. If a step's dir is specified and -is an absolute path, this value is ignored for that step's -execution.`, - }, - "invert_regex": { - Type: resource_cloudbuild_trigger_schema.TypeBool, - Optional: true, - Description: `Only trigger a build if the revision regex does NOT match the revision regex.`, - }, - "project_id": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Computed: true, - Optional: true, - Description: `ID of the project that owns the Cloud Source Repository. If -omitted, the project ID requesting the build is assumed.`, - }, - "repo_name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Name of the Cloud Source Repository. If omitted, the name "default" is assumed.`, - Default: "default", - }, - "tag_name": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Description: `Name of the tag to build. Exactly one of a branch name, tag, or commit SHA must be provided. -This field is a regular expression.`, - ExactlyOneOf: []string{"trigger_template.0.branch_name", "trigger_template.0.tag_name", "trigger_template.0.commit_sha"}, - }, - }, - }, - ExactlyOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config"}, - }, - "webhook_config": { - Type: resource_cloudbuild_trigger_schema.TypeList, - Optional: true, - Description: `WebhookConfig describes the configuration of a trigger that creates -a build whenever a webhook is sent to a trigger's webhook URL. - -One of 'trigger_template', 'github', 'pubsub_config' or 'webhook_config' must be provided.`, - MaxItems: 1, - Elem: &resource_cloudbuild_trigger_schema.Resource{ - Schema: map[string]*resource_cloudbuild_trigger_schema.Schema{ - "secret": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Required: true, - Description: `Resource name for the secret required as a URL parameter.`, - }, - "state": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Computed: true, - Description: `Potential issues with the underlying Pub/Sub subscription configuration. -Only populated on get requests.`, - }, - }, - }, - ExactlyOneOf: []string{"trigger_template", "github", "pubsub_config", "webhook_config"}, - }, - "create_time": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Computed: true, - Description: `Time when the trigger was created.`, - }, - "trigger_id": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Computed: true, - Description: `The unique identifier for the trigger.`, - }, - "project": { - Type: resource_cloudbuild_trigger_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudBuildTriggerCreate(d *resource_cloudbuild_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandCloudBuildTriggerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(nameProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandCloudBuildTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(descriptionProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - tagsProp, err := expandCloudBuildTriggerTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(tagsProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - disabledProp, err := expandCloudBuildTriggerDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(disabledProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - substitutionsProp, err := expandCloudBuildTriggerSubstitutions(d.Get("substitutions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("substitutions"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(substitutionsProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, substitutionsProp)) { - obj["substitutions"] = substitutionsProp - } - serviceAccountProp, err := expandCloudBuildTriggerServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(serviceAccountProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - filenameProp, err := expandCloudBuildTriggerFilename(d.Get("filename"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filename"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(filenameProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, filenameProp)) { - obj["filename"] = filenameProp - } - ignoredFilesProp, err := expandCloudBuildTriggerIgnoredFiles(d.Get("ignored_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignored_files"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(ignoredFilesProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, ignoredFilesProp)) { - obj["ignoredFiles"] = ignoredFilesProp - } - includedFilesProp, err := expandCloudBuildTriggerIncludedFiles(d.Get("included_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("included_files"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(includedFilesProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, includedFilesProp)) { - obj["includedFiles"] = includedFilesProp - } - triggerTemplateProp, err := expandCloudBuildTriggerTriggerTemplate(d.Get("trigger_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trigger_template"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(triggerTemplateProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, triggerTemplateProp)) { - obj["triggerTemplate"] = triggerTemplateProp - } - githubProp, err := expandCloudBuildTriggerGithub(d.Get("github"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("github"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(githubProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, githubProp)) { - obj["github"] = githubProp - } - pubsubConfigProp, err := expandCloudBuildTriggerPubsubConfig(d.Get("pubsub_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_config"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(pubsubConfigProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, pubsubConfigProp)) { - obj["pubsubConfig"] = pubsubConfigProp - } - webhookConfigProp, err := expandCloudBuildTriggerWebhookConfig(d.Get("webhook_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_config"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(webhookConfigProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, webhookConfigProp)) { - obj["webhookConfig"] = webhookConfigProp - } - buildProp, err := expandCloudBuildTriggerBuild(d.Get("build"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("build"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(buildProp)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, buildProp)) { - obj["build"] = buildProp - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/triggers") - if err != nil { - return err - } - - resource_cloudbuild_trigger_log.Printf("[DEBUG] Creating new Trigger: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloudbuild_trigger_schema.TimeoutCreate)) - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error creating Trigger: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/triggers/{{trigger_id}}") - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - triggerId, ok := res["id"] - if !ok { - return resource_cloudbuild_trigger_fmt.Errorf("Create response didn't contain id. Create may not have succeeded.") - } - if err := d.Set("trigger_id", triggerId.(string)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error setting trigger_id: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/triggers/{{trigger_id}}") - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_cloudbuild_trigger_log.Printf("[DEBUG] Finished creating Trigger %q: %#v", d.Id(), res) - - return resourceCloudBuildTriggerRead(d, meta) -} - -func resourceCloudBuildTriggerRead(d *resource_cloudbuild_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/triggers/{{trigger_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloudbuild_trigger_fmt.Sprintf("CloudBuildTrigger %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - - if err := d.Set("trigger_id", flattenCloudBuildTriggerTriggerId(res["id"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("name", flattenCloudBuildTriggerName(res["name"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("description", flattenCloudBuildTriggerDescription(res["description"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("tags", flattenCloudBuildTriggerTags(res["tags"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("disabled", flattenCloudBuildTriggerDisabled(res["disabled"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("create_time", flattenCloudBuildTriggerCreateTime(res["createTime"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("substitutions", flattenCloudBuildTriggerSubstitutions(res["substitutions"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("service_account", flattenCloudBuildTriggerServiceAccount(res["serviceAccount"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("filename", flattenCloudBuildTriggerFilename(res["filename"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("ignored_files", flattenCloudBuildTriggerIgnoredFiles(res["ignoredFiles"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("included_files", flattenCloudBuildTriggerIncludedFiles(res["includedFiles"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("trigger_template", flattenCloudBuildTriggerTriggerTemplate(res["triggerTemplate"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("github", flattenCloudBuildTriggerGithub(res["github"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("pubsub_config", flattenCloudBuildTriggerPubsubConfig(res["pubsubConfig"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("webhook_config", flattenCloudBuildTriggerWebhookConfig(res["webhookConfig"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - if err := d.Set("build", flattenCloudBuildTriggerBuild(res["build"], d, config)); err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error reading Trigger: %s", err) - } - - return nil -} - -func resourceCloudBuildTriggerUpdate(d *resource_cloudbuild_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandCloudBuildTriggerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandCloudBuildTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - tagsProp, err := expandCloudBuildTriggerTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - disabledProp, err := expandCloudBuildTriggerDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, disabledProp)) { - obj["disabled"] = disabledProp - } - substitutionsProp, err := expandCloudBuildTriggerSubstitutions(d.Get("substitutions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("substitutions"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, substitutionsProp)) { - obj["substitutions"] = substitutionsProp - } - serviceAccountProp, err := expandCloudBuildTriggerServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - filenameProp, err := expandCloudBuildTriggerFilename(d.Get("filename"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filename"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, filenameProp)) { - obj["filename"] = filenameProp - } - ignoredFilesProp, err := expandCloudBuildTriggerIgnoredFiles(d.Get("ignored_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ignored_files"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, ignoredFilesProp)) { - obj["ignoredFiles"] = ignoredFilesProp - } - includedFilesProp, err := expandCloudBuildTriggerIncludedFiles(d.Get("included_files"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("included_files"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, includedFilesProp)) { - obj["includedFiles"] = includedFilesProp - } - triggerTemplateProp, err := expandCloudBuildTriggerTriggerTemplate(d.Get("trigger_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("trigger_template"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, triggerTemplateProp)) { - obj["triggerTemplate"] = triggerTemplateProp - } - githubProp, err := expandCloudBuildTriggerGithub(d.Get("github"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("github"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, githubProp)) { - obj["github"] = githubProp - } - pubsubConfigProp, err := expandCloudBuildTriggerPubsubConfig(d.Get("pubsub_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_config"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, pubsubConfigProp)) { - obj["pubsubConfig"] = pubsubConfigProp - } - webhookConfigProp, err := expandCloudBuildTriggerWebhookConfig(d.Get("webhook_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_config"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, webhookConfigProp)) { - obj["webhookConfig"] = webhookConfigProp - } - buildProp, err := expandCloudBuildTriggerBuild(d.Get("build"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("build"); !isEmptyValue(resource_cloudbuild_trigger_reflect.ValueOf(v)) && (ok || !resource_cloudbuild_trigger_reflect.DeepEqual(v, buildProp)) { - obj["build"] = buildProp - } - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/triggers/{{trigger_id}}") - if err != nil { - return err - } - - resource_cloudbuild_trigger_log.Printf("[DEBUG] Updating Trigger %q: %#v", d.Id(), obj) - obj["id"] = d.Get("trigger_id") - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloudbuild_trigger_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error updating Trigger %q: %s", d.Id(), err) - } else { - resource_cloudbuild_trigger_log.Printf("[DEBUG] Finished updating Trigger %q: %#v", d.Id(), res) - } - - return resourceCloudBuildTriggerRead(d, meta) -} - -func resourceCloudBuildTriggerDelete(d *resource_cloudbuild_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudbuild_trigger_fmt.Errorf("Error fetching project for Trigger: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudBuildBasePath}}projects/{{project}}/triggers/{{trigger_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloudbuild_trigger_log.Printf("[DEBUG] Deleting Trigger %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloudbuild_trigger_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Trigger") - } - - resource_cloudbuild_trigger_log.Printf("[DEBUG] Finished deleting Trigger %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudBuildTriggerImport(d *resource_cloudbuild_trigger_schema.ResourceData, meta interface{}) ([]*resource_cloudbuild_trigger_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/triggers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/triggers/{{trigger_id}}") - if err != nil { - return nil, resource_cloudbuild_trigger_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_cloudbuild_trigger_schema.ResourceData{d}, nil -} - -func flattenCloudBuildTriggerTriggerId(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerDescription(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTags(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerDisabled(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerCreateTime(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerSubstitutions(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerServiceAccount(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerFilename(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerIgnoredFiles(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerIncludedFiles(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplate(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenCloudBuildTriggerTriggerTemplateProjectId(original["projectId"], d, config) - transformed["repo_name"] = - flattenCloudBuildTriggerTriggerTemplateRepoName(original["repoName"], d, config) - transformed["dir"] = - flattenCloudBuildTriggerTriggerTemplateDir(original["dir"], d, config) - transformed["invert_regex"] = - flattenCloudBuildTriggerTriggerTemplateInvertRegex(original["invertRegex"], d, config) - transformed["branch_name"] = - flattenCloudBuildTriggerTriggerTemplateBranchName(original["branchName"], d, config) - transformed["tag_name"] = - flattenCloudBuildTriggerTriggerTemplateTagName(original["tagName"], d, config) - transformed["commit_sha"] = - flattenCloudBuildTriggerTriggerTemplateCommitSha(original["commitSha"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerTriggerTemplateProjectId(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateRepoName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateDir(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateInvertRegex(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateBranchName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateTagName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerTriggerTemplateCommitSha(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithub(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["owner"] = - flattenCloudBuildTriggerGithubOwner(original["owner"], d, config) - transformed["name"] = - flattenCloudBuildTriggerGithubName(original["name"], d, config) - transformed["pull_request"] = - flattenCloudBuildTriggerGithubPullRequest(original["pullRequest"], d, config) - transformed["push"] = - flattenCloudBuildTriggerGithubPush(original["push"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerGithubOwner(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPullRequest(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["branch"] = - flattenCloudBuildTriggerGithubPullRequestBranch(original["branch"], d, config) - transformed["comment_control"] = - flattenCloudBuildTriggerGithubPullRequestCommentControl(original["commentControl"], d, config) - transformed["invert_regex"] = - flattenCloudBuildTriggerGithubPullRequestInvertRegex(original["invertRegex"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerGithubPullRequestBranch(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPullRequestCommentControl(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPullRequestInvertRegex(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPush(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["invert_regex"] = - flattenCloudBuildTriggerGithubPushInvertRegex(original["invertRegex"], d, config) - transformed["branch"] = - flattenCloudBuildTriggerGithubPushBranch(original["branch"], d, config) - transformed["tag"] = - flattenCloudBuildTriggerGithubPushTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerGithubPushInvertRegex(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPushBranch(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerGithubPushTag(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfig(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subscription"] = - flattenCloudBuildTriggerPubsubConfigSubscription(original["subscription"], d, config) - transformed["topic"] = - flattenCloudBuildTriggerPubsubConfigTopic(original["topic"], d, config) - transformed["service_account_email"] = - flattenCloudBuildTriggerPubsubConfigServiceAccountEmail(original["service_account_email"], d, config) - transformed["state"] = - flattenCloudBuildTriggerPubsubConfigState(original["state"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerPubsubConfigSubscription(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfigTopic(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfigServiceAccountEmail(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerPubsubConfigState(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerWebhookConfig(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["secret"] = - flattenCloudBuildTriggerWebhookConfigSecret(original["secret"], d, config) - transformed["state"] = - flattenCloudBuildTriggerWebhookConfigState(original["state"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerWebhookConfigSecret(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerWebhookConfigState(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuild(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["source"] = - flattenCloudBuildTriggerBuildSource(original["source"], d, config) - transformed["tags"] = - flattenCloudBuildTriggerBuildTags(original["tags"], d, config) - transformed["images"] = - flattenCloudBuildTriggerBuildImages(original["images"], d, config) - transformed["substitutions"] = - flattenCloudBuildTriggerBuildSubstitutions(original["substitutions"], d, config) - transformed["queue_ttl"] = - flattenCloudBuildTriggerBuildQueueTtl(original["queueTtl"], d, config) - transformed["logs_bucket"] = - flattenCloudBuildTriggerBuildLogsBucket(original["logsBucket"], d, config) - transformed["timeout"] = - flattenCloudBuildTriggerBuildTimeout(original["timeout"], d, config) - transformed["secret"] = - flattenCloudBuildTriggerBuildSecret(original["secrets"], d, config) - transformed["step"] = - flattenCloudBuildTriggerBuildStep(original["steps"], d, config) - transformed["artifacts"] = - flattenCloudBuildTriggerBuildArtifacts(original["artifacts"], d, config) - transformed["options"] = - flattenCloudBuildTriggerBuildOptions(original["options"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildSource(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["storage_source"] = - flattenCloudBuildTriggerBuildSourceStorageSource(original["storageSource"], d, config) - transformed["repo_source"] = - flattenCloudBuildTriggerBuildSourceRepoSource(original["repoSource"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildSourceStorageSource(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket"] = - flattenCloudBuildTriggerBuildSourceStorageSourceBucket(original["bucket"], d, config) - transformed["object"] = - flattenCloudBuildTriggerBuildSourceStorageSourceObject(original["object"], d, config) - transformed["generation"] = - flattenCloudBuildTriggerBuildSourceStorageSourceGeneration(original["generation"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildSourceStorageSourceBucket(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceStorageSourceObject(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceStorageSourceGeneration(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSource(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenCloudBuildTriggerBuildSourceRepoSourceProjectId(original["projectId"], d, config) - transformed["repo_name"] = - flattenCloudBuildTriggerBuildSourceRepoSourceRepoName(original["repoName"], d, config) - transformed["dir"] = - flattenCloudBuildTriggerBuildSourceRepoSourceDir(original["dir"], d, config) - transformed["invert_regex"] = - flattenCloudBuildTriggerBuildSourceRepoSourceInvertRegex(original["invertRegex"], d, config) - transformed["substitutions"] = - flattenCloudBuildTriggerBuildSourceRepoSourceSubstitutions(original["substitutions"], d, config) - transformed["branch_name"] = - flattenCloudBuildTriggerBuildSourceRepoSourceBranchName(original["branchName"], d, config) - transformed["tag_name"] = - flattenCloudBuildTriggerBuildSourceRepoSourceTagName(original["tagName"], d, config) - transformed["commit_sha"] = - flattenCloudBuildTriggerBuildSourceRepoSourceCommitSha(original["commitSha"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceProjectId(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceRepoName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceDir(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceInvertRegex(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceSubstitutions(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceBranchName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceTagName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSourceRepoSourceCommitSha(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildTags(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildImages(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSubstitutions(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildQueueTtl(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildLogsBucket(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildTimeout(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSecret(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "kms_key_name": flattenCloudBuildTriggerBuildSecretKmsKeyName(original["kmsKeyName"], d, config), - "secret_env": flattenCloudBuildTriggerBuildSecretSecretEnv(original["secretEnv"], d, config), - }) - } - return transformed -} - -func flattenCloudBuildTriggerBuildSecretKmsKeyName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildSecretSecretEnv(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStep(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudBuildTriggerBuildStepName(original["name"], d, config), - "args": flattenCloudBuildTriggerBuildStepArgs(original["args"], d, config), - "env": flattenCloudBuildTriggerBuildStepEnv(original["env"], d, config), - "id": flattenCloudBuildTriggerBuildStepId(original["id"], d, config), - "entrypoint": flattenCloudBuildTriggerBuildStepEntrypoint(original["entrypoint"], d, config), - "dir": flattenCloudBuildTriggerBuildStepDir(original["dir"], d, config), - "secret_env": flattenCloudBuildTriggerBuildStepSecretEnv(original["secretEnv"], d, config), - "timeout": flattenCloudBuildTriggerBuildStepTimeout(original["timeout"], d, config), - "timing": flattenCloudBuildTriggerBuildStepTiming(original["timing"], d, config), - "volumes": flattenCloudBuildTriggerBuildStepVolumes(original["volumes"], d, config), - "wait_for": flattenCloudBuildTriggerBuildStepWaitFor(original["waitFor"], d, config), - }) - } - return transformed -} - -func flattenCloudBuildTriggerBuildStepName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepArgs(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepEnv(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepId(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepEntrypoint(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepDir(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepSecretEnv(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepTimeout(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepTiming(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepVolumes(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudBuildTriggerBuildStepVolumesName(original["name"], d, config), - "path": flattenCloudBuildTriggerBuildStepVolumesPath(original["path"], d, config), - }) - } - return transformed -} - -func flattenCloudBuildTriggerBuildStepVolumesName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepVolumesPath(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildStepWaitFor(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifacts(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["images"] = - flattenCloudBuildTriggerBuildArtifactsImages(original["images"], d, config) - transformed["objects"] = - flattenCloudBuildTriggerBuildArtifactsObjects(original["objects"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildArtifactsImages(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjects(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["location"] = - flattenCloudBuildTriggerBuildArtifactsObjectsLocation(original["location"], d, config) - transformed["paths"] = - flattenCloudBuildTriggerBuildArtifactsObjectsPaths(original["paths"], d, config) - transformed["timing"] = - flattenCloudBuildTriggerBuildArtifactsObjectsTiming(original["timing"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsLocation(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsPaths(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsTiming(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["start_time"] = - flattenCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(original["endTime"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptions(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["source_provenance_hash"] = - flattenCloudBuildTriggerBuildOptionsSourceProvenanceHash(original["sourceProvenanceHash"], d, config) - transformed["requested_verify_option"] = - flattenCloudBuildTriggerBuildOptionsRequestedVerifyOption(original["requestedVerifyOption"], d, config) - transformed["machine_type"] = - flattenCloudBuildTriggerBuildOptionsMachineType(original["machineType"], d, config) - transformed["disk_size_gb"] = - flattenCloudBuildTriggerBuildOptionsDiskSizeGb(original["diskSizeGb"], d, config) - transformed["substitution_option"] = - flattenCloudBuildTriggerBuildOptionsSubstitutionOption(original["substitutionOption"], d, config) - transformed["dynamic_substitutions"] = - flattenCloudBuildTriggerBuildOptionsDynamicSubstitutions(original["dynamicSubstitutions"], d, config) - transformed["log_streaming_option"] = - flattenCloudBuildTriggerBuildOptionsLogStreamingOption(original["logStreamingOption"], d, config) - transformed["worker_pool"] = - flattenCloudBuildTriggerBuildOptionsWorkerPool(original["workerPool"], d, config) - transformed["logging"] = - flattenCloudBuildTriggerBuildOptionsLogging(original["logging"], d, config) - transformed["env"] = - flattenCloudBuildTriggerBuildOptionsEnv(original["env"], d, config) - transformed["secret_env"] = - flattenCloudBuildTriggerBuildOptionsSecretEnv(original["secretEnv"], d, config) - transformed["volumes"] = - flattenCloudBuildTriggerBuildOptionsVolumes(original["volumes"], d, config) - return []interface{}{transformed} -} - -func flattenCloudBuildTriggerBuildOptionsSourceProvenanceHash(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsRequestedVerifyOption(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsMachineType(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsDiskSizeGb(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloudbuild_trigger_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudBuildTriggerBuildOptionsSubstitutionOption(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsDynamicSubstitutions(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsLogStreamingOption(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsWorkerPool(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsLogging(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsEnv(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsSecretEnv(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsVolumes(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenCloudBuildTriggerBuildOptionsVolumesName(original["name"], d, config), - "path": flattenCloudBuildTriggerBuildOptionsVolumesPath(original["path"], d, config), - }) - } - return transformed -} - -func flattenCloudBuildTriggerBuildOptionsVolumesName(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudBuildTriggerBuildOptionsVolumesPath(v interface{}, d *resource_cloudbuild_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudBuildTriggerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerSubstitutions(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerFilename(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerIgnoredFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerIncludedFiles(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandCloudBuildTriggerTriggerTemplateProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedRepoName, err := expandCloudBuildTriggerTriggerTemplateRepoName(original["repo_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedRepoName); val.IsValid() && !isEmptyValue(val) { - transformed["repoName"] = transformedRepoName - } - - transformedDir, err := expandCloudBuildTriggerTriggerTemplateDir(original["dir"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedDir); val.IsValid() && !isEmptyValue(val) { - transformed["dir"] = transformedDir - } - - transformedInvertRegex, err := expandCloudBuildTriggerTriggerTemplateInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - transformedBranchName, err := expandCloudBuildTriggerTriggerTemplateBranchName(original["branch_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedBranchName); val.IsValid() && !isEmptyValue(val) { - transformed["branchName"] = transformedBranchName - } - - transformedTagName, err := expandCloudBuildTriggerTriggerTemplateTagName(original["tag_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTagName); val.IsValid() && !isEmptyValue(val) { - transformed["tagName"] = transformedTagName - } - - transformedCommitSha, err := expandCloudBuildTriggerTriggerTemplateCommitSha(original["commit_sha"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedCommitSha); val.IsValid() && !isEmptyValue(val) { - transformed["commitSha"] = transformedCommitSha - } - - return transformed, nil -} - -func expandCloudBuildTriggerTriggerTemplateProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateRepoName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateBranchName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateTagName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerTriggerTemplateCommitSha(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithub(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOwner, err := expandCloudBuildTriggerGithubOwner(original["owner"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedOwner); val.IsValid() && !isEmptyValue(val) { - transformed["owner"] = transformedOwner - } - - transformedName, err := expandCloudBuildTriggerGithubName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPullRequest, err := expandCloudBuildTriggerGithubPullRequest(original["pull_request"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedPullRequest); val.IsValid() && !isEmptyValue(val) { - transformed["pullRequest"] = transformedPullRequest - } - - transformedPush, err := expandCloudBuildTriggerGithubPush(original["push"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedPush); val.IsValid() && !isEmptyValue(val) { - transformed["push"] = transformedPush - } - - return transformed, nil -} - -func expandCloudBuildTriggerGithubOwner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPullRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBranch, err := expandCloudBuildTriggerGithubPullRequestBranch(original["branch"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedBranch); val.IsValid() && !isEmptyValue(val) { - transformed["branch"] = transformedBranch - } - - transformedCommentControl, err := expandCloudBuildTriggerGithubPullRequestCommentControl(original["comment_control"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedCommentControl); val.IsValid() && !isEmptyValue(val) { - transformed["commentControl"] = transformedCommentControl - } - - transformedInvertRegex, err := expandCloudBuildTriggerGithubPullRequestInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - return transformed, nil -} - -func expandCloudBuildTriggerGithubPullRequestBranch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPullRequestCommentControl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPullRequestInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPush(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInvertRegex, err := expandCloudBuildTriggerGithubPushInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - transformedBranch, err := expandCloudBuildTriggerGithubPushBranch(original["branch"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedBranch); val.IsValid() && !isEmptyValue(val) { - transformed["branch"] = transformedBranch - } - - transformedTag, err := expandCloudBuildTriggerGithubPushTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandCloudBuildTriggerGithubPushInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPushBranch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerGithubPushTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubscription, err := expandCloudBuildTriggerPubsubConfigSubscription(original["subscription"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSubscription); val.IsValid() && !isEmptyValue(val) { - transformed["subscription"] = transformedSubscription - } - - transformedTopic, err := expandCloudBuildTriggerPubsubConfigTopic(original["topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTopic); val.IsValid() && !isEmptyValue(val) { - transformed["topic"] = transformedTopic - } - - transformedServiceAccountEmail, err := expandCloudBuildTriggerPubsubConfigServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["service_account_email"] = transformedServiceAccountEmail - } - - transformedState, err := expandCloudBuildTriggerPubsubConfigState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - return transformed, nil -} - -func expandCloudBuildTriggerPubsubConfigSubscription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfigTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfigServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerPubsubConfigState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerWebhookConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecret, err := expandCloudBuildTriggerWebhookConfigSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secret"] = transformedSecret - } - - transformedState, err := expandCloudBuildTriggerWebhookConfigState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - return transformed, nil -} - -func expandCloudBuildTriggerWebhookConfigSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerWebhookConfigState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuild(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSource, err := expandCloudBuildTriggerBuildSource(original["source"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSource); val.IsValid() && !isEmptyValue(val) { - transformed["source"] = transformedSource - } - - transformedTags, err := expandCloudBuildTriggerBuildTags(original["tags"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTags); val.IsValid() && !isEmptyValue(val) { - transformed["tags"] = transformedTags - } - - transformedImages, err := expandCloudBuildTriggerBuildImages(original["images"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedImages); val.IsValid() && !isEmptyValue(val) { - transformed["images"] = transformedImages - } - - transformedSubstitutions, err := expandCloudBuildTriggerBuildSubstitutions(original["substitutions"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSubstitutions); val.IsValid() && !isEmptyValue(val) { - transformed["substitutions"] = transformedSubstitutions - } - - transformedQueueTtl, err := expandCloudBuildTriggerBuildQueueTtl(original["queue_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedQueueTtl); val.IsValid() && !isEmptyValue(val) { - transformed["queueTtl"] = transformedQueueTtl - } - - transformedLogsBucket, err := expandCloudBuildTriggerBuildLogsBucket(original["logs_bucket"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedLogsBucket); val.IsValid() && !isEmptyValue(val) { - transformed["logsBucket"] = transformedLogsBucket - } - - transformedTimeout, err := expandCloudBuildTriggerBuildTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedSecret, err := expandCloudBuildTriggerBuildSecret(original["secret"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { - transformed["secrets"] = transformedSecret - } - - transformedStep, err := expandCloudBuildTriggerBuildStep(original["step"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedStep); val.IsValid() && !isEmptyValue(val) { - transformed["steps"] = transformedStep - } - - transformedArtifacts, err := expandCloudBuildTriggerBuildArtifacts(original["artifacts"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedArtifacts); val.IsValid() && !isEmptyValue(val) { - transformed["artifacts"] = transformedArtifacts - } - - transformedOptions, err := expandCloudBuildTriggerBuildOptions(original["options"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedOptions); val.IsValid() && !isEmptyValue(val) { - transformed["options"] = transformedOptions - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStorageSource, err := expandCloudBuildTriggerBuildSourceStorageSource(original["storage_source"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedStorageSource); val.IsValid() && !isEmptyValue(val) { - transformed["storageSource"] = transformedStorageSource - } - - transformedRepoSource, err := expandCloudBuildTriggerBuildSourceRepoSource(original["repo_source"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedRepoSource); val.IsValid() && !isEmptyValue(val) { - transformed["repoSource"] = transformedRepoSource - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucket, err := expandCloudBuildTriggerBuildSourceStorageSourceBucket(original["bucket"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { - transformed["bucket"] = transformedBucket - } - - transformedObject, err := expandCloudBuildTriggerBuildSourceStorageSourceObject(original["object"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { - transformed["object"] = transformedObject - } - - transformedGeneration, err := expandCloudBuildTriggerBuildSourceStorageSourceGeneration(original["generation"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedGeneration); val.IsValid() && !isEmptyValue(val) { - transformed["generation"] = transformedGeneration - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSourceBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSourceObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceStorageSourceGeneration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandCloudBuildTriggerBuildSourceRepoSourceProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedRepoName, err := expandCloudBuildTriggerBuildSourceRepoSourceRepoName(original["repo_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedRepoName); val.IsValid() && !isEmptyValue(val) { - transformed["repoName"] = transformedRepoName - } - - transformedDir, err := expandCloudBuildTriggerBuildSourceRepoSourceDir(original["dir"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedDir); val.IsValid() && !isEmptyValue(val) { - transformed["dir"] = transformedDir - } - - transformedInvertRegex, err := expandCloudBuildTriggerBuildSourceRepoSourceInvertRegex(original["invert_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedInvertRegex); val.IsValid() && !isEmptyValue(val) { - transformed["invertRegex"] = transformedInvertRegex - } - - transformedSubstitutions, err := expandCloudBuildTriggerBuildSourceRepoSourceSubstitutions(original["substitutions"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSubstitutions); val.IsValid() && !isEmptyValue(val) { - transformed["substitutions"] = transformedSubstitutions - } - - transformedBranchName, err := expandCloudBuildTriggerBuildSourceRepoSourceBranchName(original["branch_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedBranchName); val.IsValid() && !isEmptyValue(val) { - transformed["branchName"] = transformedBranchName - } - - transformedTagName, err := expandCloudBuildTriggerBuildSourceRepoSourceTagName(original["tag_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTagName); val.IsValid() && !isEmptyValue(val) { - transformed["tagName"] = transformedTagName - } - - transformedCommitSha, err := expandCloudBuildTriggerBuildSourceRepoSourceCommitSha(original["commit_sha"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedCommitSha); val.IsValid() && !isEmptyValue(val) { - transformed["commitSha"] = transformedCommitSha - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceRepoName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceInvertRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceSubstitutions(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceBranchName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceTagName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSourceRepoSourceCommitSha(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildImages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSubstitutions(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerBuildQueueTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildLogsBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandCloudBuildTriggerBuildSecretKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - transformedSecretEnv, err := expandCloudBuildTriggerBuildSecretSecretEnv(original["secret_env"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSecretEnv); val.IsValid() && !isEmptyValue(val) { - transformed["secretEnv"] = transformedSecretEnv - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildSecretKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildSecretSecretEnv(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudBuildTriggerBuildStep(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudBuildTriggerBuildStepName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedArgs, err := expandCloudBuildTriggerBuildStepArgs(original["args"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedArgs); val.IsValid() && !isEmptyValue(val) { - transformed["args"] = transformedArgs - } - - transformedEnv, err := expandCloudBuildTriggerBuildStepEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - transformedId, err := expandCloudBuildTriggerBuildStepId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedEntrypoint, err := expandCloudBuildTriggerBuildStepEntrypoint(original["entrypoint"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedEntrypoint); val.IsValid() && !isEmptyValue(val) { - transformed["entrypoint"] = transformedEntrypoint - } - - transformedDir, err := expandCloudBuildTriggerBuildStepDir(original["dir"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedDir); val.IsValid() && !isEmptyValue(val) { - transformed["dir"] = transformedDir - } - - transformedSecretEnv, err := expandCloudBuildTriggerBuildStepSecretEnv(original["secret_env"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSecretEnv); val.IsValid() && !isEmptyValue(val) { - transformed["secretEnv"] = transformedSecretEnv - } - - transformedTimeout, err := expandCloudBuildTriggerBuildStepTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedTiming, err := expandCloudBuildTriggerBuildStepTiming(original["timing"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTiming); val.IsValid() && !isEmptyValue(val) { - transformed["timing"] = transformedTiming - } - - transformedVolumes, err := expandCloudBuildTriggerBuildStepVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - transformedWaitFor, err := expandCloudBuildTriggerBuildStepWaitFor(original["wait_for"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedWaitFor); val.IsValid() && !isEmptyValue(val) { - transformed["waitFor"] = transformedWaitFor - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildStepName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepArgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepEntrypoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepSecretEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepTiming(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudBuildTriggerBuildStepVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPath, err := expandCloudBuildTriggerBuildStepVolumesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildStepVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepVolumesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildStepWaitFor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifacts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedImages, err := expandCloudBuildTriggerBuildArtifactsImages(original["images"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedImages); val.IsValid() && !isEmptyValue(val) { - transformed["images"] = transformedImages - } - - transformedObjects, err := expandCloudBuildTriggerBuildArtifactsObjects(original["objects"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedObjects); val.IsValid() && !isEmptyValue(val) { - transformed["objects"] = transformedObjects - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildArtifactsImages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLocation, err := expandCloudBuildTriggerBuildArtifactsObjectsLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - transformedPaths, err := expandCloudBuildTriggerBuildArtifactsObjectsPaths(original["paths"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedPaths); val.IsValid() && !isEmptyValue(val) { - transformed["paths"] = transformedPaths - } - - transformedTiming, err := expandCloudBuildTriggerBuildArtifactsObjectsTiming(original["timing"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedTiming); val.IsValid() && !isEmptyValue(val) { - transformed["timing"] = transformedTiming - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsPaths(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsTiming(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsTimingStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildArtifactsObjectsTimingEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSourceProvenanceHash, err := expandCloudBuildTriggerBuildOptionsSourceProvenanceHash(original["source_provenance_hash"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSourceProvenanceHash); val.IsValid() && !isEmptyValue(val) { - transformed["sourceProvenanceHash"] = transformedSourceProvenanceHash - } - - transformedRequestedVerifyOption, err := expandCloudBuildTriggerBuildOptionsRequestedVerifyOption(original["requested_verify_option"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedRequestedVerifyOption); val.IsValid() && !isEmptyValue(val) { - transformed["requestedVerifyOption"] = transformedRequestedVerifyOption - } - - transformedMachineType, err := expandCloudBuildTriggerBuildOptionsMachineType(original["machine_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedMachineType); val.IsValid() && !isEmptyValue(val) { - transformed["machineType"] = transformedMachineType - } - - transformedDiskSizeGb, err := expandCloudBuildTriggerBuildOptionsDiskSizeGb(original["disk_size_gb"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !isEmptyValue(val) { - transformed["diskSizeGb"] = transformedDiskSizeGb - } - - transformedSubstitutionOption, err := expandCloudBuildTriggerBuildOptionsSubstitutionOption(original["substitution_option"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSubstitutionOption); val.IsValid() && !isEmptyValue(val) { - transformed["substitutionOption"] = transformedSubstitutionOption - } - - transformedDynamicSubstitutions, err := expandCloudBuildTriggerBuildOptionsDynamicSubstitutions(original["dynamic_substitutions"], d, config) - if err != nil { - return nil, err - } else { - transformed["dynamicSubstitutions"] = transformedDynamicSubstitutions - } - - transformedLogStreamingOption, err := expandCloudBuildTriggerBuildOptionsLogStreamingOption(original["log_streaming_option"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedLogStreamingOption); val.IsValid() && !isEmptyValue(val) { - transformed["logStreamingOption"] = transformedLogStreamingOption - } - - transformedWorkerPool, err := expandCloudBuildTriggerBuildOptionsWorkerPool(original["worker_pool"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedWorkerPool); val.IsValid() && !isEmptyValue(val) { - transformed["workerPool"] = transformedWorkerPool - } - - transformedLogging, err := expandCloudBuildTriggerBuildOptionsLogging(original["logging"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedLogging); val.IsValid() && !isEmptyValue(val) { - transformed["logging"] = transformedLogging - } - - transformedEnv, err := expandCloudBuildTriggerBuildOptionsEnv(original["env"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { - transformed["env"] = transformedEnv - } - - transformedSecretEnv, err := expandCloudBuildTriggerBuildOptionsSecretEnv(original["secret_env"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedSecretEnv); val.IsValid() && !isEmptyValue(val) { - transformed["secretEnv"] = transformedSecretEnv - } - - transformedVolumes, err := expandCloudBuildTriggerBuildOptionsVolumes(original["volumes"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { - transformed["volumes"] = transformedVolumes - } - - return transformed, nil -} - -func expandCloudBuildTriggerBuildOptionsSourceProvenanceHash(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsRequestedVerifyOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsSubstitutionOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsDynamicSubstitutions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsLogStreamingOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsWorkerPool(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsSecretEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandCloudBuildTriggerBuildOptionsVolumesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPath, err := expandCloudBuildTriggerBuildOptionsVolumesPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudbuild_trigger_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudBuildTriggerBuildOptionsVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudBuildTriggerBuildOptionsVolumesPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -var allowedIngressSettings = []string{ - "ALLOW_ALL", - "ALLOW_INTERNAL_AND_GCLB", - "ALLOW_INTERNAL_ONLY", -} - -var allowedVpcConnectorEgressSettings = []string{ - "ALL_TRAFFIC", - "PRIVATE_RANGES_ONLY", -} - -type cloudFunctionId struct { - Project string - Region string - Name string -} - -func (s *cloudFunctionId) cloudFunctionId() string { - return resource_cloudfunctions_function_fmt.Sprintf("projects/%s/locations/%s/functions/%s", s.Project, s.Region, s.Name) -} - -var labelKeyRegex = resource_cloudfunctions_function_regexp.MustCompile(`^[\p{Ll}0-9_-]+$`) - -func labelKeyValidator(val interface{}, key string) (warns []string, errs []error) { - if val == nil { - return - } - - m := val.(map[string]interface{}) - for k := range m { - if !labelKeyRegex.MatchString(k) { - errs = append(errs, resource_cloudfunctions_function_fmt.Errorf("%q is an invalid label key. See https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements", k)) - } - } - return -} - -func (s *cloudFunctionId) locationId() string { - return resource_cloudfunctions_function_fmt.Sprintf("projects/%s/locations/%s", s.Project, s.Region) -} - -func parseCloudFunctionId(d *resource_cloudfunctions_function_schema.ResourceData, config *Config) (*cloudFunctionId, error) { - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/functions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - return &cloudFunctionId{ - Project: d.Get("project").(string), - Region: d.Get("region").(string), - Name: d.Get("name").(string), - }, nil -} - -func validateResourceCloudFunctionsFunctionName(v interface{}, k string) (ws []string, errors []error) { - re := `^(?:[a-zA-Z](?:[-_a-zA-Z0-9]{0,61}[a-zA-Z0-9])?)$` - return validateRegexp(re)(v, k) -} - -func compareSelfLinkOrResourceNameWithMultipleParts(_, old, new string, _ *resource_cloudfunctions_function_schema.ResourceData) bool { - return resource_cloudfunctions_function_strings.HasSuffix(old, new) -} - -func resourceCloudFunctionsFunction() *resource_cloudfunctions_function_schema.Resource { - return &resource_cloudfunctions_function_schema.Resource{ - Create: resourceCloudFunctionsCreate, - Read: resourceCloudFunctionsRead, - Update: resourceCloudFunctionsUpdate, - Delete: resourceCloudFunctionsDestroy, - - Importer: &resource_cloudfunctions_function_schema.ResourceImporter{ - State: resource_cloudfunctions_function_schema.ImportStatePassthrough, - }, - - Timeouts: &resource_cloudfunctions_function_schema.ResourceTimeout{ - Create: resource_cloudfunctions_function_schema.DefaultTimeout(5 * resource_cloudfunctions_function_time.Minute), - Read: resource_cloudfunctions_function_schema.DefaultTimeout(5 * resource_cloudfunctions_function_time.Minute), - Update: resource_cloudfunctions_function_schema.DefaultTimeout(5 * resource_cloudfunctions_function_time.Minute), - Delete: resource_cloudfunctions_function_schema.DefaultTimeout(5 * resource_cloudfunctions_function_time.Minute), - }, - - Schema: map[string]*resource_cloudfunctions_function_schema.Schema{ - "name": { - Type: resource_cloudfunctions_function_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A user-defined name of the function. Function names must be unique globally.`, - ValidateFunc: validateResourceCloudFunctionsFunctionName, - }, - - "source_archive_bucket": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Description: `The GCS bucket containing the zip archive which contains the function.`, - }, - - "source_archive_object": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Description: `The source archive object (file) in archive bucket.`, - }, - - "source_repository": { - Type: resource_cloudfunctions_function_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Represents parameters related to source repository where a function is hosted. Cannot be set alongside source_archive_bucket or source_archive_object.`, - ConflictsWith: []string{"source_archive_bucket", "source_archive_object"}, - Elem: &resource_cloudfunctions_function_schema.Resource{ - Schema: map[string]*resource_cloudfunctions_function_schema.Schema{ - "url": { - Type: resource_cloudfunctions_function_schema.TypeString, - Required: true, - Description: `The URL pointing to the hosted repository where the function is defined.`, - }, - "deployed_url": { - Type: resource_cloudfunctions_function_schema.TypeString, - Computed: true, - Description: `The URL pointing to the hosted repository where the function was defined at the time of deployment.`, - }, - }, - }, - }, - - "description": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Description: `Description of the function.`, - }, - - "available_memory_mb": { - Type: resource_cloudfunctions_function_schema.TypeInt, - Optional: true, - Default: 256, - Description: `Memory (in MB), available to the function. Default value is 256. Possible values include 128, 256, 512, 1024, etc.`, - }, - - "timeout": { - Type: resource_cloudfunctions_function_schema.TypeInt, - Optional: true, - Default: 60, - ValidateFunc: resource_cloudfunctions_function_validation.IntBetween(1, 540), - Description: `Timeout (in seconds) for the function. Default value is 60 seconds. Cannot be more than 540 seconds.`, - }, - - "entry_point": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the function that will be executed when the Google Cloud Function is triggered.`, - }, - - "ingress_settings": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Default: "ALLOW_ALL", - ValidateFunc: resource_cloudfunctions_function_validation.StringInSlice(allowedIngressSettings, true), - Description: `String value that controls what traffic can reach the function. Allowed values are ALLOW_ALL and ALLOW_INTERNAL_ONLY. Changes to this field will recreate the cloud function.`, - }, - - "vpc_connector_egress_settings": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resource_cloudfunctions_function_validation.StringInSlice(allowedVpcConnectorEgressSettings, true), - Description: `The egress settings for the connector, controlling what traffic is diverted through it. Allowed values are ALL_TRAFFIC and PRIVATE_RANGES_ONLY. Defaults to PRIVATE_RANGES_ONLY. If unset, this field preserves the previously set value.`, - }, - - "labels": { - Type: resource_cloudfunctions_function_schema.TypeMap, - ValidateFunc: labelKeyValidator, - Optional: true, - Description: `A set of key/value label pairs to assign to the function. Label keys must follow the requirements at https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements.`, - }, - - "runtime": { - Type: resource_cloudfunctions_function_schema.TypeString, - Required: true, - Description: `The runtime in which the function is going to run. Eg. "nodejs8", "nodejs10", "python37", "go111".`, - }, - - "service_account_email": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: ` If provided, the self-provided service account to run the function with.`, - }, - - "vpc_connector": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The VPC Network Connector that this cloud function can connect to. It can be either the fully-qualified URI, or the short name of the network connector resource. The format of this field is projects/*/locations/*/connectors/*.`, - }, - - "environment_variables": { - Type: resource_cloudfunctions_function_schema.TypeMap, - Optional: true, - Description: `A set of key/value environment variable pairs to assign to the function.`, - }, - - "build_environment_variables": { - Type: resource_cloudfunctions_function_schema.TypeMap, - Optional: true, - Description: ` A set of key/value environment variable pairs available during build time.`, - }, - - "trigger_http": { - Type: resource_cloudfunctions_function_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Boolean variable. Any HTTP request (of a supported type) to the endpoint will trigger function execution. Supported HTTP request types are: POST, PUT, GET, DELETE, and OPTIONS. Endpoint is returned as https_trigger_url. Cannot be used with trigger_bucket and trigger_topic.`, - }, - - "event_trigger": { - Type: resource_cloudfunctions_function_schema.TypeList, - Optional: true, - Computed: true, - ConflictsWith: []string{"trigger_http"}, - MaxItems: 1, - Description: `A source that fires events in response to a condition in another service. Cannot be used with trigger_http.`, - Elem: &resource_cloudfunctions_function_schema.Resource{ - Schema: map[string]*resource_cloudfunctions_function_schema.Schema{ - "event_type": { - Type: resource_cloudfunctions_function_schema.TypeString, - ForceNew: true, - Required: true, - Description: `The type of event to observe. For example: "google.storage.object.finalize". See the documentation on calling Cloud Functions for a full reference of accepted triggers.`, - }, - "resource": { - Type: resource_cloudfunctions_function_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceNameWithMultipleParts, - Description: `The name or partial URI of the resource from which to observe events. For example, "myBucket" or "projects/my-project/topics/my-topic"`, - }, - "failure_policy": { - Type: resource_cloudfunctions_function_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Specifies policy for failed executions`, - Elem: &resource_cloudfunctions_function_schema.Resource{ - Schema: map[string]*resource_cloudfunctions_function_schema.Schema{ - "retry": { - Type: resource_cloudfunctions_function_schema.TypeBool, - - Required: true, - Description: `Whether the function should be retried on failure. Defaults to false.`, - }, - }}, - }, - }, - }, - }, - - "https_trigger_url": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Computed: true, - Description: `URL which triggers function execution. Returned only if trigger_http is used.`, - }, - - "max_instances": { - Type: resource_cloudfunctions_function_schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: resource_cloudfunctions_function_validation.IntAtLeast(0), - Description: `The limit on the maximum number of function instances that may coexist at a given time.`, - }, - - "project": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Project of the function. If it is not provided, the provider project is used.`, - }, - - "region": { - Type: resource_cloudfunctions_function_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Region of function. If it is not provided, the provider region is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudFunctionsCreate(d *resource_cloudfunctions_function_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - cloudFuncId := &cloudFunctionId{ - Project: project, - Region: region, - Name: d.Get("name").(string), - } - - function := &resource_cloudfunctions_function_cloudfunctions.CloudFunction{ - Name: cloudFuncId.cloudFunctionId(), - Runtime: d.Get("runtime").(string), - ServiceAccountEmail: d.Get("service_account_email").(string), - ForceSendFields: []string{}, - } - - sourceRepos := d.Get("source_repository").([]interface{}) - if len(sourceRepos) > 0 { - function.SourceRepository = expandSourceRepository(sourceRepos) - } else { - sourceArchiveBucket := d.Get("source_archive_bucket").(string) - sourceArchiveObj := d.Get("source_archive_object").(string) - if sourceArchiveBucket == "" || sourceArchiveObj == "" { - return resource_cloudfunctions_function_fmt.Errorf("either source_repository or both of source_archive_bucket+source_archive_object must be set") - } - function.SourceArchiveUrl = resource_cloudfunctions_function_fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) - } - - if v, ok := d.GetOk("available_memory_mb"); ok { - availableMemoryMb := v.(int) - function.AvailableMemoryMb = int64(availableMemoryMb) - } - - if v, ok := d.GetOk("description"); ok { - function.Description = v.(string) - } - - if v, ok := d.GetOk("entry_point"); ok { - function.EntryPoint = v.(string) - } - - if v, ok := d.GetOk("timeout"); ok { - function.Timeout = resource_cloudfunctions_function_fmt.Sprintf("%vs", v.(int)) - } - - if v, ok := d.GetOk("event_trigger"); ok { - function.EventTrigger = expandEventTrigger(v.([]interface{}), project) - } else if v, ok := d.GetOk("trigger_http"); ok && v.(bool) { - function.HttpsTrigger = &resource_cloudfunctions_function_cloudfunctions.HttpsTrigger{} - } else { - return resource_cloudfunctions_function_fmt.Errorf("One of `event_trigger` or `trigger_http` is required: " + - "You must specify a trigger when deploying a new function.") - } - - if v, ok := d.GetOk("ingress_settings"); ok { - function.IngressSettings = v.(string) - } - - if _, ok := d.GetOk("labels"); ok { - function.Labels = expandLabels(d) - } - - if _, ok := d.GetOk("environment_variables"); ok { - function.EnvironmentVariables = expandEnvironmentVariables(d) - } - - if _, ok := d.GetOk("build_environment_variables"); ok { - function.BuildEnvironmentVariables = expandBuildEnvironmentVariables(d) - } - - if v, ok := d.GetOk("vpc_connector"); ok { - function.VpcConnector = v.(string) - } - - if v, ok := d.GetOk("vpc_connector_egress_settings"); ok { - function.VpcConnectorEgressSettings = v.(string) - } - - if v, ok := d.GetOk("max_instances"); ok { - function.MaxInstances = int64(v.(int)) - } - - resource_cloudfunctions_function_log.Printf("[DEBUG] Creating cloud function: %s", function.Name) - - rerr := retryTimeDuration(func() error { - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Create( - cloudFuncId.locationId(), function).Do() - if err != nil { - return err - } - - d.SetId(cloudFuncId.cloudFunctionId()) - - return cloudFunctionsOperationWait(config, op, "Creating CloudFunctions Function", userAgent, - d.Timeout(resource_cloudfunctions_function_schema.TimeoutCreate)) - }, d.Timeout(resource_cloudfunctions_function_schema.TimeoutCreate), isCloudFunctionsSourceCodeError) - if rerr != nil { - return rerr - } - resource_cloudfunctions_function_log.Printf("[DEBUG] Finished creating cloud function: %s", function.Name) - return resourceCloudFunctionsRead(d, meta) -} - -func resourceCloudFunctionsRead(d *resource_cloudfunctions_function_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - cloudFuncId, err := parseCloudFunctionId(d, config) - if err != nil { - return err - } - - function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_cloudfunctions_function_fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) - } - - if err := d.Set("name", cloudFuncId.Name); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", function.Description); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("entry_point", function.EntryPoint); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting entry_point: %s", err) - } - if err := d.Set("available_memory_mb", function.AvailableMemoryMb); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting available_memory_mb: %s", err) - } - sRemoved := resource_cloudfunctions_function_strings.Replace(function.Timeout, "s", "", -1) - timeout, err := resource_cloudfunctions_function_strconv.Atoi(sRemoved) - if err != nil { - return err - } - if err := d.Set("timeout", timeout); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting timeout: %s", err) - } - if err := d.Set("ingress_settings", function.IngressSettings); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting ingress_settings: %s", err) - } - if err := d.Set("labels", function.Labels); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("runtime", function.Runtime); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting runtime: %s", err) - } - if err := d.Set("service_account_email", function.ServiceAccountEmail); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting service_account_email: %s", err) - } - if err := d.Set("environment_variables", function.EnvironmentVariables); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting environment_variables: %s", err) - } - if err := d.Set("vpc_connector", function.VpcConnector); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting vpc_connector: %s", err) - } - if err := d.Set("vpc_connector_egress_settings", function.VpcConnectorEgressSettings); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting vpc_connector_egress_settings: %s", err) - } - if function.SourceArchiveUrl != "" { - - sourceURL, err := resource_cloudfunctions_function_url.Parse(function.SourceArchiveUrl) - if err != nil { - return err - } - bucket := sourceURL.Host - object := resource_cloudfunctions_function_strings.TrimLeft(sourceURL.Path, "/") - if err := d.Set("source_archive_bucket", bucket); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting source_archive_bucket: %s", err) - } - if err := d.Set("source_archive_object", object); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting source_archive_object: %s", err) - } - } - if err := d.Set("source_repository", flattenSourceRepository(function.SourceRepository)); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting source_repository: %s", err) - } - - if function.HttpsTrigger != nil { - if err := d.Set("trigger_http", true); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting trigger_http: %s", err) - } - if err := d.Set("https_trigger_url", function.HttpsTrigger.Url); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting https_trigger_url: %s", err) - } - } - - if err := d.Set("event_trigger", flattenEventTrigger(function.EventTrigger)); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting event_trigger: %s", err) - } - if err := d.Set("max_instances", function.MaxInstances); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting max_instances: %s", err) - } - if err := d.Set("region", cloudFuncId.Region); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("project", cloudFuncId.Project); err != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceCloudFunctionsUpdate(d *resource_cloudfunctions_function_schema.ResourceData, meta interface{}) error { - resource_cloudfunctions_function_log.Printf("[DEBUG]: Updating google_cloudfunctions_function") - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - cloudFuncId, err := parseCloudFunctionId(d, config) - if err != nil { - return err - } - - function, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Get(cloudFuncId.cloudFunctionId()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_cloudfunctions_function_fmt.Sprintf("Target CloudFunctions Function %q", cloudFuncId.Name)) - } - - function.SourceUploadUrl = "" - - d.Partial(true) - - var updateMaskArr []string - if d.HasChange("available_memory_mb") { - availableMemoryMb := d.Get("available_memory_mb").(int) - function.AvailableMemoryMb = int64(availableMemoryMb) - updateMaskArr = append(updateMaskArr, "availableMemoryMb") - } - - if d.HasChange("source_archive_bucket") || d.HasChange("source_archive_object") { - sourceArchiveBucket := d.Get("source_archive_bucket").(string) - sourceArchiveObj := d.Get("source_archive_object").(string) - function.SourceArchiveUrl = resource_cloudfunctions_function_fmt.Sprintf("gs://%v/%v", sourceArchiveBucket, sourceArchiveObj) - updateMaskArr = append(updateMaskArr, "sourceArchiveUrl") - } - - if d.HasChange("source_repository") { - function.SourceRepository = expandSourceRepository(d.Get("source_repository").([]interface{})) - updateMaskArr = append(updateMaskArr, "sourceRepository") - } - - if d.HasChange("description") { - function.Description = d.Get("description").(string) - updateMaskArr = append(updateMaskArr, "description") - } - - if d.HasChange("timeout") { - function.Timeout = resource_cloudfunctions_function_fmt.Sprintf("%vs", d.Get("timeout").(int)) - updateMaskArr = append(updateMaskArr, "timeout") - } - - if d.HasChange("ingress_settings") { - function.IngressSettings = d.Get("ingress_settings").(string) - updateMaskArr = append(updateMaskArr, "ingressSettings") - } - - if d.HasChange("labels") { - function.Labels = expandLabels(d) - updateMaskArr = append(updateMaskArr, "labels") - } - - if d.HasChange("runtime") { - function.Runtime = d.Get("runtime").(string) - updateMaskArr = append(updateMaskArr, "runtime") - } - - if d.HasChange("environment_variables") { - function.EnvironmentVariables = expandEnvironmentVariables(d) - updateMaskArr = append(updateMaskArr, "environmentVariables") - } - - if d.HasChange("build_environment_variables") { - function.BuildEnvironmentVariables = expandBuildEnvironmentVariables(d) - updateMaskArr = append(updateMaskArr, "buildEnvironmentVariables") - } - - if d.HasChange("vpc_connector") { - function.VpcConnector = d.Get("vpc_connector").(string) - updateMaskArr = append(updateMaskArr, "vpcConnector") - } - - if d.HasChange("vpc_connector_egress_settings") { - function.VpcConnectorEgressSettings = d.Get("vpc_connector_egress_settings").(string) - updateMaskArr = append(updateMaskArr, "vpcConnectorEgressSettings") - } - - if d.HasChange("event_trigger") { - function.EventTrigger = expandEventTrigger(d.Get("event_trigger").([]interface{}), project) - updateMaskArr = append(updateMaskArr, "eventTrigger", "eventTrigger.failurePolicy.retry") - } - - if d.HasChange("max_instances") { - function.MaxInstances = int64(d.Get("max_instances").(int)) - updateMaskArr = append(updateMaskArr, "maxInstances") - } - - if len(updateMaskArr) > 0 { - resource_cloudfunctions_function_log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function) - updateMask := resource_cloudfunctions_function_strings.Join(updateMaskArr, ",") - rerr := retryTimeDuration(func() error { - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Patch(function.Name, function). - UpdateMask(updateMask).Do() - if err != nil { - return err - } - - return cloudFunctionsOperationWait(config, op, "Updating CloudFunctions Function", userAgent, - d.Timeout(resource_cloudfunctions_function_schema.TimeoutUpdate)) - }, d.Timeout(resource_cloudfunctions_function_schema.TimeoutUpdate)) - if rerr != nil { - return resource_cloudfunctions_function_fmt.Errorf("Error while updating cloudfunction configuration: %s", rerr) - } - } - d.Partial(false) - - return resourceCloudFunctionsRead(d, meta) -} - -func resourceCloudFunctionsDestroy(d *resource_cloudfunctions_function_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - cloudFuncId, err := parseCloudFunctionId(d, config) - if err != nil { - return err - } - - op, err := config.NewCloudFunctionsClient(userAgent).Projects.Locations.Functions.Delete(cloudFuncId.cloudFunctionId()).Do() - if err != nil { - return err - } - err = cloudFunctionsOperationWait(config, op, "Deleting CloudFunctions Function", userAgent, - d.Timeout(resource_cloudfunctions_function_schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - - return nil -} - -func expandEventTrigger(configured []interface{}, project string) *resource_cloudfunctions_function_cloudfunctions.EventTrigger { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - data := configured[0].(map[string]interface{}) - eventType := data["event_type"].(string) - resource := data["resource"].(string) - - if !resource_cloudfunctions_function_strings.HasPrefix(resource, "projects/") { - shape := "" - switch { - case resource_cloudfunctions_function_strings.HasPrefix(eventType, "google.storage.object."): - shape = "projects/%s/buckets/%s" - case resource_cloudfunctions_function_strings.HasPrefix(eventType, "google.pubsub.topic."): - shape = "projects/%s/topics/%s" - - case resource_cloudfunctions_function_strings.HasPrefix(eventType, "providers/cloud.storage/eventTypes/"): - - shape = "projects/%s/buckets/%s" - case resource_cloudfunctions_function_strings.HasPrefix(eventType, "providers/cloud.pubsub/eventTypes/"): - shape = "projects/%s/topics/%s" - case resource_cloudfunctions_function_strings.HasPrefix(eventType, "providers/cloud.firestore/eventTypes/"): - - shape = "projects/%s/databases/(default)/documents/%s" - } - - resource = resource_cloudfunctions_function_fmt.Sprintf(shape, project, resource) - } - - return &resource_cloudfunctions_function_cloudfunctions.EventTrigger{ - EventType: eventType, - Resource: resource, - FailurePolicy: expandFailurePolicy(data["failure_policy"].([]interface{})), - } -} - -func flattenEventTrigger(eventTrigger *resource_cloudfunctions_function_cloudfunctions.EventTrigger) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - if eventTrigger == nil { - return result - } - - result = append(result, map[string]interface{}{ - "event_type": eventTrigger.EventType, - "resource": eventTrigger.Resource, - "failure_policy": flattenFailurePolicy(eventTrigger.FailurePolicy), - }) - - return result -} - -func expandFailurePolicy(configured []interface{}) *resource_cloudfunctions_function_cloudfunctions.FailurePolicy { - if len(configured) == 0 || configured[0] == nil { - return &resource_cloudfunctions_function_cloudfunctions.FailurePolicy{} - } - - if data := configured[0].(map[string]interface{}); data["retry"].(bool) { - return &resource_cloudfunctions_function_cloudfunctions.FailurePolicy{ - Retry: &resource_cloudfunctions_function_cloudfunctions.Retry{}, - } - } - - return nil -} - -func flattenFailurePolicy(failurePolicy *resource_cloudfunctions_function_cloudfunctions.FailurePolicy) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - if failurePolicy == nil { - return nil - } - - result = append(result, map[string]interface{}{ - "retry": failurePolicy.Retry != nil, - }) - - return result -} - -func expandSourceRepository(configured []interface{}) *resource_cloudfunctions_function_cloudfunctions.SourceRepository { - if len(configured) == 0 || configured[0] == nil { - return &resource_cloudfunctions_function_cloudfunctions.SourceRepository{} - } - - data := configured[0].(map[string]interface{}) - return &resource_cloudfunctions_function_cloudfunctions.SourceRepository{ - Url: data["url"].(string), - } -} - -func flattenSourceRepository(sourceRepo *resource_cloudfunctions_function_cloudfunctions.SourceRepository) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - if sourceRepo == nil { - return nil - } - - result = append(result, map[string]interface{}{ - "url": sourceRepo.Url, - "deployed_url": sourceRepo.DeployedUrl, - }) - - return result -} - -func resourceCloudIotDevice() *resource_cloudiot_device_schema.Resource { - return &resource_cloudiot_device_schema.Resource{ - Create: resourceCloudIotDeviceCreate, - Read: resourceCloudIotDeviceRead, - Update: resourceCloudIotDeviceUpdate, - Delete: resourceCloudIotDeviceDelete, - - Importer: &resource_cloudiot_device_schema.ResourceImporter{ - State: resourceCloudIotDeviceImport, - }, - - Timeouts: &resource_cloudiot_device_schema.ResourceTimeout{ - Create: resource_cloudiot_device_schema.DefaultTimeout(4 * resource_cloudiot_device_time.Minute), - Update: resource_cloudiot_device_schema.DefaultTimeout(4 * resource_cloudiot_device_time.Minute), - Delete: resource_cloudiot_device_schema.DefaultTimeout(4 * resource_cloudiot_device_time.Minute), - }, - - Schema: map[string]*resource_cloudiot_device_schema.Schema{ - "name": { - Type: resource_cloudiot_device_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique name for the resource.`, - }, - "registry": { - Type: resource_cloudiot_device_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the device registry where this device should be created.`, - }, - "blocked": { - Type: resource_cloudiot_device_schema.TypeBool, - Optional: true, - Description: `If a device is blocked, connections or requests from this device will fail.`, - }, - "credentials": { - Type: resource_cloudiot_device_schema.TypeList, - Optional: true, - Description: `The credentials used to authenticate this device.`, - MaxItems: 3, - Elem: &resource_cloudiot_device_schema.Resource{ - Schema: map[string]*resource_cloudiot_device_schema.Schema{ - "public_key": { - Type: resource_cloudiot_device_schema.TypeList, - Required: true, - Description: `A public key used to verify the signature of JSON Web Tokens (JWTs).`, - MaxItems: 1, - Elem: &resource_cloudiot_device_schema.Resource{ - Schema: map[string]*resource_cloudiot_device_schema.Schema{ - "format": { - Type: resource_cloudiot_device_schema.TypeString, - Required: true, - ValidateFunc: resource_cloudiot_device_validation.StringInSlice([]string{"RSA_PEM", "RSA_X509_PEM", "ES256_PEM", "ES256_X509_PEM"}, false), - Description: `The format of the key. Possible values: ["RSA_PEM", "RSA_X509_PEM", "ES256_PEM", "ES256_X509_PEM"]`, - }, - "key": { - Type: resource_cloudiot_device_schema.TypeString, - Required: true, - Description: `The key data.`, - }, - }, - }, - }, - "expiration_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Optional: true, - Description: `The time at which this credential becomes invalid.`, - }, - }, - }, - }, - "gateway_config": { - Type: resource_cloudiot_device_schema.TypeList, - Optional: true, - Description: `Gateway-related configuration and state.`, - MaxItems: 1, - Elem: &resource_cloudiot_device_schema.Resource{ - Schema: map[string]*resource_cloudiot_device_schema.Schema{ - "gateway_auth_method": { - Type: resource_cloudiot_device_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudiot_device_validation.StringInSlice([]string{"ASSOCIATION_ONLY", "DEVICE_AUTH_TOKEN_ONLY", "ASSOCIATION_AND_DEVICE_AUTH_TOKEN", ""}, false), - Description: `Indicates whether the device is a gateway. Possible values: ["ASSOCIATION_ONLY", "DEVICE_AUTH_TOKEN_ONLY", "ASSOCIATION_AND_DEVICE_AUTH_TOKEN"]`, - }, - "gateway_type": { - Type: resource_cloudiot_device_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_cloudiot_device_validation.StringInSlice([]string{"GATEWAY", "NON_GATEWAY", ""}, false), - Description: `Indicates whether the device is a gateway. Default value: "NON_GATEWAY" Possible values: ["GATEWAY", "NON_GATEWAY"]`, - Default: "NON_GATEWAY", - }, - "last_accessed_gateway_id": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The ID of the gateway the device accessed most recently.`, - }, - "last_accessed_gateway_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The most recent time at which the device accessed the gateway specified in last_accessed_gateway.`, - }, - }, - }, - }, - "log_level": { - Type: resource_cloudiot_device_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudiot_device_validation.StringInSlice([]string{"NONE", "ERROR", "INFO", "DEBUG", ""}, false), - Description: `The logging verbosity for device activity. Possible values: ["NONE", "ERROR", "INFO", "DEBUG"]`, - }, - "metadata": { - Type: resource_cloudiot_device_schema.TypeMap, - Optional: true, - Description: `The metadata key-value pairs assigned to the device.`, - Elem: &resource_cloudiot_device_schema.Schema{Type: resource_cloudiot_device_schema.TypeString}, - }, - "config": { - Type: resource_cloudiot_device_schema.TypeList, - Computed: true, - Description: `The most recent device configuration, which is eventually sent from Cloud IoT Core to the device.`, - Elem: &resource_cloudiot_device_schema.Resource{ - Schema: map[string]*resource_cloudiot_device_schema.Schema{ - "binary_data": { - Type: resource_cloudiot_device_schema.TypeString, - Optional: true, - Description: `The device configuration data.`, - }, - "cloud_update_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The time at which this configuration version was updated in Cloud IoT Core.`, - }, - "device_ack_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The time at which Cloud IoT Core received the acknowledgment from the device, -indicating that the device has received this configuration version.`, - }, - "version": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The version of this update.`, - }, - }, - }, - }, - "last_config_ack_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The last time a cloud-to-device config version acknowledgment was received from the device.`, - }, - "last_config_send_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The last time a cloud-to-device config version was sent to the device.`, - }, - "last_error_status": { - Type: resource_cloudiot_device_schema.TypeList, - Computed: true, - Description: `The error message of the most recent error, such as a failure to publish to Cloud Pub/Sub.`, - Elem: &resource_cloudiot_device_schema.Resource{ - Schema: map[string]*resource_cloudiot_device_schema.Schema{ - "details": { - Type: resource_cloudiot_device_schema.TypeList, - Optional: true, - Description: `A list of messages that carry the error details.`, - Elem: &resource_cloudiot_device_schema.Schema{ - Type: resource_cloudiot_device_schema.TypeMap, - }, - }, - "message": { - Type: resource_cloudiot_device_schema.TypeString, - Optional: true, - Description: `A developer-facing error message, which should be in English.`, - }, - "number": { - Type: resource_cloudiot_device_schema.TypeInt, - Optional: true, - Description: `The status code, which should be an enum value of google.rpc.Code.`, - }, - }, - }, - }, - "last_error_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The time the most recent error occurred, such as a failure to publish to Cloud Pub/Sub.`, - }, - "last_event_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The last time a telemetry event was received.`, - }, - "last_heartbeat_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The last time an MQTT PINGREQ was received.`, - }, - "last_state_time": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `The last time a state event was received.`, - }, - "num_id": { - Type: resource_cloudiot_device_schema.TypeString, - Computed: true, - Description: `A server-defined unique numeric ID for the device. -This is a more compact way to identify devices, and it is globally unique.`, - }, - "state": { - Type: resource_cloudiot_device_schema.TypeList, - Computed: true, - Description: `The state most recently received from the device.`, - Elem: &resource_cloudiot_device_schema.Resource{ - Schema: map[string]*resource_cloudiot_device_schema.Schema{ - "binary_data": { - Type: resource_cloudiot_device_schema.TypeString, - Optional: true, - Description: `The device state data.`, - }, - "update_time": { - Type: resource_cloudiot_device_schema.TypeString, - Optional: true, - Description: `The time at which this state version was updated in Cloud IoT Core.`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudIotDeviceCreate(d *resource_cloudiot_device_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandCloudIotDeviceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(idProp)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - credentialsProp, err := expandCloudIotDeviceCredentials(d.Get("credentials"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("credentials"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(credentialsProp)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, credentialsProp)) { - obj["credentials"] = credentialsProp - } - blockedProp, err := expandCloudIotDeviceBlocked(d.Get("blocked"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("blocked"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(blockedProp)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, blockedProp)) { - obj["blocked"] = blockedProp - } - logLevelProp, err := expandCloudIotDeviceLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(logLevelProp)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - metadataProp, err := expandCloudIotDeviceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(metadataProp)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - gatewayConfigProp, err := expandCloudIotDeviceGatewayConfig(d.Get("gateway_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gateway_config"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(gatewayConfigProp)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, gatewayConfigProp)) { - obj["gatewayConfig"] = gatewayConfigProp - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices") - if err != nil { - return err - } - - resource_cloudiot_device_log.Printf("[DEBUG] Creating new Device: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloudiot_device_schema.TimeoutCreate)) - if err != nil { - return resource_cloudiot_device_fmt.Errorf("Error creating Device: %s", err) - } - - id, err := replaceVars(d, config, "{{registry}}/devices/{{name}}") - if err != nil { - return resource_cloudiot_device_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_cloudiot_device_log.Printf("[DEBUG] Finished creating Device %q: %#v", d.Id(), res) - - return resourceCloudIotDeviceRead(d, meta) -} - -func resourceCloudIotDeviceRead(d *resource_cloudiot_device_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloudiot_device_fmt.Sprintf("CloudIotDevice %q", d.Id())) - } - - if err := d.Set("name", flattenCloudIotDeviceName(res["id"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("num_id", flattenCloudIotDeviceNumId(res["numId"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("credentials", flattenCloudIotDeviceCredentials(res["credentials"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_heartbeat_time", flattenCloudIotDeviceLastHeartbeatTime(res["lastHeartbeatTime"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_event_time", flattenCloudIotDeviceLastEventTime(res["lastEventTime"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_state_time", flattenCloudIotDeviceLastStateTime(res["lastStateTime"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_config_ack_time", flattenCloudIotDeviceLastConfigAckTime(res["lastConfigAckTime"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_config_send_time", flattenCloudIotDeviceLastConfigSendTime(res["lastConfigSendTime"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("blocked", flattenCloudIotDeviceBlocked(res["blocked"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_error_time", flattenCloudIotDeviceLastErrorTime(res["lastErrorTime"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("last_error_status", flattenCloudIotDeviceLastErrorStatus(res["lastErrorStatus"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("config", flattenCloudIotDeviceConfig(res["config"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("state", flattenCloudIotDeviceState(res["state"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("log_level", flattenCloudIotDeviceLogLevel(res["logLevel"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("metadata", flattenCloudIotDeviceMetadata(res["metadata"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - if err := d.Set("gateway_config", flattenCloudIotDeviceGatewayConfig(res["gatewayConfig"], d, config)); err != nil { - return resource_cloudiot_device_fmt.Errorf("Error reading Device: %s", err) - } - - return nil -} - -func resourceCloudIotDeviceUpdate(d *resource_cloudiot_device_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - credentialsProp, err := expandCloudIotDeviceCredentials(d.Get("credentials"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("credentials"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(v)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, credentialsProp)) { - obj["credentials"] = credentialsProp - } - blockedProp, err := expandCloudIotDeviceBlocked(d.Get("blocked"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("blocked"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(v)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, blockedProp)) { - obj["blocked"] = blockedProp - } - logLevelProp, err := expandCloudIotDeviceLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(v)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - metadataProp, err := expandCloudIotDeviceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(v)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - gatewayConfigProp, err := expandCloudIotDeviceGatewayConfig(d.Get("gateway_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gateway_config"); !isEmptyValue(resource_cloudiot_device_reflect.ValueOf(v)) && (ok || !resource_cloudiot_device_reflect.DeepEqual(v, gatewayConfigProp)) { - obj["gatewayConfig"] = gatewayConfigProp - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") - if err != nil { - return err - } - - resource_cloudiot_device_log.Printf("[DEBUG] Updating Device %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("credentials") { - updateMask = append(updateMask, "credentials") - } - - if d.HasChange("blocked") { - updateMask = append(updateMask, "blocked") - } - - if d.HasChange("log_level") { - updateMask = append(updateMask, "logLevel") - } - - if d.HasChange("metadata") { - updateMask = append(updateMask, "metadata") - } - - if d.HasChange("gateway_config") { - updateMask = append(updateMask, "gateway_config.gateway_auth_method") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloudiot_device_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloudiot_device_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloudiot_device_fmt.Errorf("Error updating Device %q: %s", d.Id(), err) - } else { - resource_cloudiot_device_log.Printf("[DEBUG] Finished updating Device %q: %#v", d.Id(), res) - } - - return resourceCloudIotDeviceRead(d, meta) -} - -func resourceCloudIotDeviceDelete(d *resource_cloudiot_device_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}{{registry}}/devices/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloudiot_device_log.Printf("[DEBUG] Deleting Device %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloudiot_device_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Device") - } - - resource_cloudiot_device_log.Printf("[DEBUG] Finished deleting Device %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIotDeviceImport(d *resource_cloudiot_device_schema.ResourceData, meta interface{}) ([]*resource_cloudiot_device_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)/devices/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{registry}}/devices/{{name}}") - if err != nil { - return nil, resource_cloudiot_device_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_cloudiot_device_schema.ResourceData{d}, nil -} - -func flattenCloudIotDeviceName(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceNumId(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceCredentials(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "expiration_time": flattenCloudIotDeviceCredentialsExpirationTime(original["expirationTime"], d, config), - "public_key": flattenCloudIotDeviceCredentialsPublicKey(original["publicKey"], d, config), - }) - } - return transformed -} - -func flattenCloudIotDeviceCredentialsExpirationTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceCredentialsPublicKey(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["format"] = - flattenCloudIotDeviceCredentialsPublicKeyFormat(original["format"], d, config) - transformed["key"] = - flattenCloudIotDeviceCredentialsPublicKeyKey(original["key"], d, config) - return []interface{}{transformed} -} - -func flattenCloudIotDeviceCredentialsPublicKeyFormat(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceCredentialsPublicKeyKey(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastHeartbeatTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastEventTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastStateTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastConfigAckTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastConfigSendTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceBlocked(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastErrorTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastErrorStatus(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["number"] = - flattenCloudIotDeviceLastErrorStatusNumber(original["number"], d, config) - transformed["message"] = - flattenCloudIotDeviceLastErrorStatusMessage(original["message"], d, config) - transformed["details"] = - flattenCloudIotDeviceLastErrorStatusDetails(original["details"], d, config) - return []interface{}{transformed} -} - -func flattenCloudIotDeviceLastErrorStatusNumber(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_cloudiot_device_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenCloudIotDeviceLastErrorStatusMessage(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLastErrorStatusDetails(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfig(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["version"] = - flattenCloudIotDeviceConfigVersion(original["version"], d, config) - transformed["cloud_update_time"] = - flattenCloudIotDeviceConfigCloudUpdateTime(original["cloudUpdateTime"], d, config) - transformed["device_ack_time"] = - flattenCloudIotDeviceConfigDeviceAckTime(original["deviceAckTime"], d, config) - transformed["binary_data"] = - flattenCloudIotDeviceConfigBinaryData(original["binaryData"], d, config) - return []interface{}{transformed} -} - -func flattenCloudIotDeviceConfigVersion(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfigCloudUpdateTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfigDeviceAckTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceConfigBinaryData(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceState(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["update_time"] = - flattenCloudIotDeviceStateUpdateTime(original["updateTime"], d, config) - transformed["binary_data"] = - flattenCloudIotDeviceStateBinaryData(original["binaryData"], d, config) - return []interface{}{transformed} -} - -func flattenCloudIotDeviceStateUpdateTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceStateBinaryData(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceLogLevel(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceMetadata(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfig(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["gateway_type"] = - flattenCloudIotDeviceGatewayConfigGatewayType(original["gatewayType"], d, config) - transformed["gateway_auth_method"] = - flattenCloudIotDeviceGatewayConfigGatewayAuthMethod(original["gatewayAuthMethod"], d, config) - transformed["last_accessed_gateway_id"] = - flattenCloudIotDeviceGatewayConfigLastAccessedGatewayId(original["lastAccessedGatewayId"], d, config) - transformed["last_accessed_gateway_time"] = - flattenCloudIotDeviceGatewayConfigLastAccessedGatewayTime(original["lastAccessedGatewayTime"], d, config) - return []interface{}{transformed} -} - -func flattenCloudIotDeviceGatewayConfigGatewayType(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfigGatewayAuthMethod(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfigLastAccessedGatewayId(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceGatewayConfigLastAccessedGatewayTime(v interface{}, d *resource_cloudiot_device_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIotDeviceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpirationTime, err := expandCloudIotDeviceCredentialsExpirationTime(original["expiration_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedExpirationTime); val.IsValid() && !isEmptyValue(val) { - transformed["expirationTime"] = transformedExpirationTime - } - - transformedPublicKey, err := expandCloudIotDeviceCredentialsPublicKey(original["public_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedPublicKey); val.IsValid() && !isEmptyValue(val) { - transformed["publicKey"] = transformedPublicKey - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudIotDeviceCredentialsExpirationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceCredentialsPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFormat, err := expandCloudIotDeviceCredentialsPublicKeyFormat(original["format"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedFormat); val.IsValid() && !isEmptyValue(val) { - transformed["format"] = transformedFormat - } - - transformedKey, err := expandCloudIotDeviceCredentialsPublicKeyKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandCloudIotDeviceCredentialsPublicKeyFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceCredentialsPublicKeyKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceBlocked(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceLogLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandCloudIotDeviceGatewayConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGatewayType, err := expandCloudIotDeviceGatewayConfigGatewayType(original["gateway_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedGatewayType); val.IsValid() && !isEmptyValue(val) { - transformed["gatewayType"] = transformedGatewayType - } - - transformedGatewayAuthMethod, err := expandCloudIotDeviceGatewayConfigGatewayAuthMethod(original["gateway_auth_method"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedGatewayAuthMethod); val.IsValid() && !isEmptyValue(val) { - transformed["gatewayAuthMethod"] = transformedGatewayAuthMethod - } - - transformedLastAccessedGatewayId, err := expandCloudIotDeviceGatewayConfigLastAccessedGatewayId(original["last_accessed_gateway_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedLastAccessedGatewayId); val.IsValid() && !isEmptyValue(val) { - transformed["lastAccessedGatewayId"] = transformedLastAccessedGatewayId - } - - transformedLastAccessedGatewayTime, err := expandCloudIotDeviceGatewayConfigLastAccessedGatewayTime(original["last_accessed_gateway_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_device_reflect.ValueOf(transformedLastAccessedGatewayTime); val.IsValid() && !isEmptyValue(val) { - transformed["lastAccessedGatewayTime"] = transformedLastAccessedGatewayTime - } - - return transformed, nil -} - -func expandCloudIotDeviceGatewayConfigGatewayType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceGatewayConfigGatewayAuthMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceGatewayConfigLastAccessedGatewayId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceGatewayConfigLastAccessedGatewayTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryHTTPConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHTTPEnabledState, err := expandCloudIotDeviceRegistryHTTPEnabledState(original["http_enabled_state"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedHTTPEnabledState); val.IsValid() && !isEmptyValue(val) { - transformed["httpEnabledState"] = transformedHTTPEnabledState - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryHTTPEnabledState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryMqttConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMqttEnabledState, err := expandCloudIotDeviceRegistryMqttEnabledState(original["mqtt_enabled_state"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedMqttEnabledState); val.IsValid() && !isEmptyValue(val) { - transformed["mqttEnabledState"] = transformedMqttEnabledState - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryMqttEnabledState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryStateNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopicName, err := expandCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(original["pubsub_topic_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopicName"] = transformedPubsubTopicName - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPublicKeyCertificate, err := expandCloudIotDeviceRegistryCredentialsPublicKeyCertificate(original["public_key_certificate"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedPublicKeyCertificate); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeyCertificate"] = transformedPublicKeyCertificate - } - - req = append(req, transformed) - } - - return req, nil -} - -func expandCloudIotDeviceRegistryCredentialsPublicKeyCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFormat, err := expandCloudIotDeviceRegistryPublicKeyCertificateFormat(original["format"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedFormat); val.IsValid() && !isEmptyValue(val) { - transformed["format"] = transformedFormat - } - - transformedCertificate, err := expandCloudIotDeviceRegistryPublicKeyCertificateCertificate(original["certificate"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedCertificate); val.IsValid() && !isEmptyValue(val) { - transformed["certificate"] = transformedCertificate - } - - return transformed, nil -} - -func expandCloudIotDeviceRegistryPublicKeyCertificateFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryPublicKeyCertificateCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenCloudIotDeviceRegistryCredentials(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - resource_cloudiot_registry_log.Printf("[DEBUG] Flattening device resitry credentials: %q", d.Id()) - if v == nil { - resource_cloudiot_registry_log.Printf("[DEBUG] The credentials array is nil: %q", d.Id()) - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - resource_cloudiot_registry_log.Printf("[DEBUG] Original credential: %+v", original) - if len(original) < 1 { - resource_cloudiot_registry_log.Printf("[DEBUG] Excluding empty credential that the API returned. %q", d.Id()) - continue - } - resource_cloudiot_registry_log.Printf("[DEBUG] Credentials array before appending a new credential: %+v", transformed) - transformed = append(transformed, map[string]interface{}{ - "public_key_certificate": flattenCloudIotDeviceRegistryCredentialsPublicKeyCertificate(original["publicKeyCertificate"], d, config), - }) - resource_cloudiot_registry_log.Printf("[DEBUG] Credentials array after appending a new credential: %+v", transformed) - } - return transformed -} - -func flattenCloudIotDeviceRegistryCredentialsPublicKeyCertificate(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - resource_cloudiot_registry_log.Printf("[DEBUG] Flattening device resitry credentials public key certificate: %q", d.Id()) - if v == nil { - resource_cloudiot_registry_log.Printf("[DEBUG] The public key certificate is nil: %q", d.Id()) - return v - } - - original := v.(map[string]interface{}) - resource_cloudiot_registry_log.Printf("[DEBUG] Original public key certificate: %+v", original) - transformed := make(map[string]interface{}) - - transformedPublicKeyCertificateFormat := flattenCloudIotDeviceRegistryPublicKeyCertificateFormat(original["format"], d, config) - transformed["format"] = transformedPublicKeyCertificateFormat - - transformedPublicKeyCertificateCertificate := flattenCloudIotDeviceRegistryPublicKeyCertificateCertificate(original["certificate"], d, config) - transformed["certificate"] = transformedPublicKeyCertificateCertificate - - resource_cloudiot_registry_log.Printf("[DEBUG] Transformed public key certificate: %+v", transformed) - - return transformed -} - -func flattenCloudIotDeviceRegistryPublicKeyCertificateFormat(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryPublicKeyCertificateCertificate(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryHTTPConfig(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHTTPEnabledState := flattenCloudIotDeviceRegistryHTTPConfigHTTPEnabledState(original["httpEnabledState"], d, config) - transformed["http_enabled_state"] = transformedHTTPEnabledState - - return transformed -} - -func flattenCloudIotDeviceRegistryHTTPConfigHTTPEnabledState(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryMqttConfig(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMqttEnabledState := flattenCloudIotDeviceRegistryMqttConfigMqttEnabledState(original["mqttEnabledState"], d, config) - transformed["mqtt_enabled_state"] = transformedMqttEnabledState - - return transformed -} - -func flattenCloudIotDeviceRegistryMqttConfigMqttEnabledState(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryStateNotificationConfig(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - resource_cloudiot_registry_log.Printf("[DEBUG] Flattening state notification config: %+v", v) - if v == nil { - return v - } - - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopicName := flattenCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(original["pubsubTopicName"], d, config) - if val := resource_cloudiot_registry_reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !isEmptyValue(val) { - resource_cloudiot_registry_log.Printf("[DEBUG] pubsub topic name is not null: %v", d.Get("pubsub_topic_name")) - transformed["pubsub_topic_name"] = transformedPubsubTopicName - } - - return transformed -} - -func flattenCloudIotDeviceRegistryStateNotificationConfigPubsubTopicName(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func validateCloudIotDeviceRegistryID(v interface{}, k string) (warnings []string, errors []error) { - value := v.(string) - if resource_cloudiot_registry_strings.HasPrefix(value, "goog") { - errors = append(errors, resource_cloudiot_registry_fmt.Errorf( - "%q (%q) can not start with \"goog\"", k, value)) - } - if !resource_cloudiot_registry_regexp.MustCompile(CloudIoTIdRegex).MatchString(value) { - errors = append(errors, resource_cloudiot_registry_fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, CloudIoTIdRegex)) - } - return -} - -func validateCloudIotDeviceRegistrySubfolderMatch(v interface{}, k string) (warnings []string, errors []error) { - value := v.(string) - if resource_cloudiot_registry_strings.HasPrefix(value, "/") { - errors = append(errors, resource_cloudiot_registry_fmt.Errorf( - "%q (%q) can not start with '/'", k, value)) - } - return -} - -func resourceCloudIotDeviceRegistry() *resource_cloudiot_registry_schema.Resource { - return &resource_cloudiot_registry_schema.Resource{ - Create: resourceCloudIotDeviceRegistryCreate, - Read: resourceCloudIotDeviceRegistryRead, - Update: resourceCloudIotDeviceRegistryUpdate, - Delete: resourceCloudIotDeviceRegistryDelete, - - Importer: &resource_cloudiot_registry_schema.ResourceImporter{ - State: resourceCloudIotDeviceRegistryImport, - }, - - Timeouts: &resource_cloudiot_registry_schema.ResourceTimeout{ - Create: resource_cloudiot_registry_schema.DefaultTimeout(4 * resource_cloudiot_registry_time.Minute), - Update: resource_cloudiot_registry_schema.DefaultTimeout(4 * resource_cloudiot_registry_time.Minute), - Delete: resource_cloudiot_registry_schema.DefaultTimeout(4 * resource_cloudiot_registry_time.Minute), - }, - - Schema: map[string]*resource_cloudiot_registry_schema.Schema{ - "name": { - Type: resource_cloudiot_registry_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateCloudIotDeviceRegistryID, - Description: `A unique name for the resource, required by device registry.`, - }, - "event_notification_configs": { - Type: resource_cloudiot_registry_schema.TypeList, - Computed: true, - Optional: true, - Description: `List of configurations for event notifications, such as PubSub topics -to publish device events to.`, - MaxItems: 10, - Elem: &resource_cloudiot_registry_schema.Resource{ - Schema: map[string]*resource_cloudiot_registry_schema.Schema{ - "pubsub_topic_name": { - Type: resource_cloudiot_registry_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `PubSub topic name to publish device events.`, - }, - "subfolder_matches": { - Type: resource_cloudiot_registry_schema.TypeString, - Optional: true, - ValidateFunc: validateCloudIotDeviceRegistrySubfolderMatch, - Description: `If the subfolder name matches this string exactly, this -configuration will be used. The string must not include the -leading '/' character. If empty, all strings are matched. Empty -value can only be used for the last 'event_notification_configs' -item.`, - }, - }, - }, - }, - "log_level": { - Type: resource_cloudiot_registry_schema.TypeString, - Optional: true, - ValidateFunc: resource_cloudiot_registry_validation.StringInSlice([]string{"NONE", "ERROR", "INFO", "DEBUG", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("NONE"), - Description: `The default logging verbosity for activity from devices in this -registry. Specifies which events should be written to logs. For -example, if the LogLevel is ERROR, only events that terminate in -errors will be logged. LogLevel is inclusive; enabling INFO logging -will also enable ERROR logging. Default value: "NONE" Possible values: ["NONE", "ERROR", "INFO", "DEBUG"]`, - Default: "NONE", - }, - "region": { - Type: resource_cloudiot_registry_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region in which the created registry should reside. -If it is not provided, the provider region is used.`, - }, - "state_notification_config": { - Type: resource_cloudiot_registry_schema.TypeMap, - Description: `A PubSub topic to publish device state updates.`, - Optional: true, - }, - "mqtt_config": { - Type: resource_cloudiot_registry_schema.TypeMap, - Description: `Activate or deactivate MQTT.`, - Computed: true, - Optional: true, - }, - "http_config": { - Type: resource_cloudiot_registry_schema.TypeMap, - Description: `Activate or deactivate HTTP.`, - Computed: true, - Optional: true, - }, - "credentials": { - Type: resource_cloudiot_registry_schema.TypeList, - Description: `List of public key certificates to authenticate devices.`, - Optional: true, - MaxItems: 10, - Elem: &resource_cloudiot_registry_schema.Resource{ - Schema: map[string]*resource_cloudiot_registry_schema.Schema{ - "public_key_certificate": { - Type: resource_cloudiot_registry_schema.TypeMap, - Description: `A public key certificate format and data.`, - Required: true, - }, - }, - }, - }, - "project": { - Type: resource_cloudiot_registry_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceCloudIotDeviceRegistryCreate(d *resource_cloudiot_registry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandCloudIotDeviceRegistryName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(idProp)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - eventNotificationConfigsProp, err := expandCloudIotDeviceRegistryEventNotificationConfigs(d.Get("event_notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_notification_configs"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(eventNotificationConfigsProp)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, eventNotificationConfigsProp)) { - obj["eventNotificationConfigs"] = eventNotificationConfigsProp - } - logLevelProp, err := expandCloudIotDeviceRegistryLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(logLevelProp)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - - obj, err = resourceCloudIotDeviceRegistryEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries") - if err != nil { - return err - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Creating new DeviceRegistry: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_cloudiot_registry_schema.TimeoutCreate)) - if err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error creating DeviceRegistry: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_cloudiot_registry_log.Printf("[DEBUG] Finished creating DeviceRegistry %q: %#v", d.Id(), res) - - return resourceCloudIotDeviceRegistryRead(d, meta) -} - -func resourceCloudIotDeviceRegistryRead(d *resource_cloudiot_registry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_cloudiot_registry_fmt.Sprintf("CloudIotDeviceRegistry %q", d.Id())) - } - - res, err = resourceCloudIotDeviceRegistryDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_cloudiot_registry_log.Printf("[DEBUG] Removing CloudIotDeviceRegistry because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - - if err := d.Set("name", flattenCloudIotDeviceRegistryName(res["id"], d, config)); err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - if err := d.Set("event_notification_configs", flattenCloudIotDeviceRegistryEventNotificationConfigs(res["eventNotificationConfigs"], d, config)); err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - if err := d.Set("log_level", flattenCloudIotDeviceRegistryLogLevel(res["logLevel"], d, config)); err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - - return nil -} - -func resourceCloudIotDeviceRegistryUpdate(d *resource_cloudiot_registry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - eventNotificationConfigsProp, err := expandCloudIotDeviceRegistryEventNotificationConfigs(d.Get("event_notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_notification_configs"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(v)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, eventNotificationConfigsProp)) { - obj["eventNotificationConfigs"] = eventNotificationConfigsProp - } - logLevelProp, err := expandCloudIotDeviceRegistryLogLevel(d.Get("log_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_level"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(v)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, logLevelProp)) { - obj["logLevel"] = logLevelProp - } - - obj, err = resourceCloudIotDeviceRegistryEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return err - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Updating DeviceRegistry %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("event_notification_configs") { - updateMask = append(updateMask, "eventNotificationConfigs") - } - - if d.HasChange("log_level") { - updateMask = append(updateMask, "logLevel") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloudiot_registry_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - resource_cloudiot_registry_log.Printf("[DEBUG] updateMask before adding extra schema entries %q: %v", d.Id(), updateMask) - - resource_cloudiot_registry_log.Printf("[DEBUG] Pre-update on state notification config: %q", d.Id()) - if d.HasChange("state_notification_config") { - resource_cloudiot_registry_log.Printf("[DEBUG] %q stateNotificationConfig.pubsubTopicName has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "stateNotificationConfig.pubsubTopicName") - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Pre-update on MQTT config: %q", d.Id()) - if d.HasChange("mqtt_config") { - resource_cloudiot_registry_log.Printf("[DEBUG] %q mqttConfig.mqttEnabledState has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "mqttConfig.mqttEnabledState") - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Pre-update on HTTP config: %q", d.Id()) - if d.HasChange("http_config") { - resource_cloudiot_registry_log.Printf("[DEBUG] %q httpConfig.httpEnabledState has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "httpConfig.httpEnabledState") - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Pre-update on credentials: %q", d.Id()) - if d.HasChange("credentials") { - resource_cloudiot_registry_log.Printf("[DEBUG] %q credentials has a change. Adding it to the update mask", d.Id()) - updateMask = append(updateMask, "credentials") - } - - resource_cloudiot_registry_log.Printf("[DEBUG] updateMask after adding extra schema entries %q: %v", d.Id(), updateMask) - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_cloudiot_registry_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Update URL %q: %v", d.Id(), url) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_cloudiot_registry_schema.TimeoutUpdate)) - - if err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error updating DeviceRegistry %q: %s", d.Id(), err) - } else { - resource_cloudiot_registry_log.Printf("[DEBUG] Finished updating DeviceRegistry %q: %#v", d.Id(), res) - } - - return resourceCloudIotDeviceRegistryRead(d, meta) -} - -func resourceCloudIotDeviceRegistryDelete(d *resource_cloudiot_registry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_cloudiot_registry_fmt.Errorf("Error fetching project for DeviceRegistry: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{CloudIotBasePath}}projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_cloudiot_registry_log.Printf("[DEBUG] Deleting DeviceRegistry %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_cloudiot_registry_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DeviceRegistry") - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Finished deleting DeviceRegistry %q: %#v", d.Id(), res) - return nil -} - -func resourceCloudIotDeviceRegistryImport(d *resource_cloudiot_registry_schema.ResourceData, meta interface{}) ([]*resource_cloudiot_registry_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/locations/(?P[^/]+)/registries/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/registries/{{name}}") - if err != nil { - return nil, resource_cloudiot_registry_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_cloudiot_registry_schema.ResourceData{d}, nil -} - -func flattenCloudIotDeviceRegistryName(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryEventNotificationConfigs(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "subfolder_matches": flattenCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(original["subfolderMatches"], d, config), - "pubsub_topic_name": flattenCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(original["pubsubTopicName"], d, config), - }) - } - return transformed -} - -func flattenCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenCloudIotDeviceRegistryLogLevel(v interface{}, d *resource_cloudiot_registry_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandCloudIotDeviceRegistryName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryEventNotificationConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubfolderMatches, err := expandCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(original["subfolder_matches"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedSubfolderMatches); val.IsValid() && !isEmptyValue(val) { - transformed["subfolderMatches"] = transformedSubfolderMatches - } - - transformedPubsubTopicName, err := expandCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(original["pubsub_topic_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_cloudiot_registry_reflect.ValueOf(transformedPubsubTopicName); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopicName"] = transformedPubsubTopicName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandCloudIotDeviceRegistryEventNotificationConfigsSubfolderMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryEventNotificationConfigsPubsubTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandCloudIotDeviceRegistryLogLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceCloudIotDeviceRegistryEncoder(d *resource_cloudiot_registry_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - resource_cloudiot_registry_log.Printf("[DEBUG] Resource data before encoding extra schema entries %q: %#v", d.Id(), obj) - - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding state notification config: %q", d.Id()) - stateNotificationConfigProp, err := expandCloudIotDeviceRegistryStateNotificationConfig(d.Get("state_notification_config"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("state_notification_config"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(stateNotificationConfigProp)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, stateNotificationConfigProp)) { - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding %q. Setting stateNotificationConfig: %#v", d.Id(), stateNotificationConfigProp) - obj["stateNotificationConfig"] = stateNotificationConfigProp - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding HTTP config: %q", d.Id()) - httpConfigProp, err := expandCloudIotDeviceRegistryHTTPConfig(d.Get("http_config"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("http_config"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(httpConfigProp)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, httpConfigProp)) { - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding %q. Setting httpConfig: %#v", d.Id(), httpConfigProp) - obj["httpConfig"] = httpConfigProp - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding MQTT config: %q", d.Id()) - mqttConfigProp, err := expandCloudIotDeviceRegistryMqttConfig(d.Get("mqtt_config"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("mqtt_config"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(mqttConfigProp)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, mqttConfigProp)) { - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding %q. Setting mqttConfig: %#v", d.Id(), mqttConfigProp) - obj["mqttConfig"] = mqttConfigProp - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding credentials: %q", d.Id()) - credentialsProp, err := expandCloudIotDeviceRegistryCredentials(d.Get("credentials"), d, config) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("credentials"); !isEmptyValue(resource_cloudiot_registry_reflect.ValueOf(credentialsProp)) && (ok || !resource_cloudiot_registry_reflect.DeepEqual(v, credentialsProp)) { - resource_cloudiot_registry_log.Printf("[DEBUG] Encoding %q. Setting credentials: %#v", d.Id(), credentialsProp) - obj["credentials"] = credentialsProp - } - - resource_cloudiot_registry_log.Printf("[DEBUG] Resource data after encoding extra schema entries %q: %#v", d.Id(), obj) - - return obj, nil -} - -func resourceCloudIotDeviceRegistryDecoder(d *resource_cloudiot_registry_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - resource_cloudiot_registry_log.Printf("[DEBUG] Decoding state notification config: %q", d.Id()) - resource_cloudiot_registry_log.Printf("[DEBUG] State notification config before decoding: %v", d.Get("state_notification_config")) - if err := d.Set("state_notification_config", flattenCloudIotDeviceRegistryStateNotificationConfig(res["stateNotificationConfig"], d, config)); err != nil { - return nil, resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - resource_cloudiot_registry_log.Printf("[DEBUG] State notification config after decoding: %v", d.Get("state_notification_config")) - - resource_cloudiot_registry_log.Printf("[DEBUG] Decoding HTTP config: %q", d.Id()) - resource_cloudiot_registry_log.Printf("[DEBUG] HTTP config before decoding: %v", d.Get("http_config")) - if err := d.Set("http_config", flattenCloudIotDeviceRegistryHTTPConfig(res["httpConfig"], d, config)); err != nil { - return nil, resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - resource_cloudiot_registry_log.Printf("[DEBUG] HTTP config after decoding: %v", d.Get("http_config")) - - resource_cloudiot_registry_log.Printf("[DEBUG] Decoding MQTT config: %q", d.Id()) - resource_cloudiot_registry_log.Printf("[DEBUG] MQTT config before decoding: %v", d.Get("mqtt_config")) - if err := d.Set("mqtt_config", flattenCloudIotDeviceRegistryMqttConfig(res["mqttConfig"], d, config)); err != nil { - return nil, resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - resource_cloudiot_registry_log.Printf("[DEBUG] MQTT config after decoding: %v", d.Get("mqtt_config")) - - resource_cloudiot_registry_log.Printf("[DEBUG] Decoding credentials: %q", d.Id()) - resource_cloudiot_registry_log.Printf("[DEBUG] credentials before decoding: %v", d.Get("credentials")) - if err := d.Set("credentials", flattenCloudIotDeviceRegistryCredentials(res["credentials"], d, config)); err != nil { - return nil, resource_cloudiot_registry_fmt.Errorf("Error reading DeviceRegistry: %s", err) - } - resource_cloudiot_registry_log.Printf("[DEBUG] credentials after decoding: %v", d.Get("credentials")) - - return res, nil -} - -const ( - composerEnvironmentEnvVariablesRegexp = "[a-zA-Z_][a-zA-Z0-9_]*." - composerEnvironmentReservedAirflowEnvVarRegexp = "AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+" - composerEnvironmentVersionRegexp = `composer-([0-9]+\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?|latest)-airflow-([0-9]+\.[0-9]+(\.[0-9]+.*)?)` -) - -var composerEnvironmentReservedEnvVar = map[string]struct{}{ - "AIRFLOW_HOME": {}, - "C_FORCE_ROOT": {}, - "CONTAINER_NAME": {}, - "DAGS_FOLDER": {}, - "GCP_PROJECT": {}, - "GCS_BUCKET": {}, - "GKE_CLUSTER_NAME": {}, - "SQL_DATABASE": {}, - "SQL_INSTANCE": {}, - "SQL_PASSWORD": {}, - "SQL_PROJECT": {}, - "SQL_REGION": {}, - "SQL_USER": {}, -} - -var ( - composerSoftwareConfigKeys = []string{ - "config.0.software_config.0.airflow_config_overrides", - "config.0.software_config.0.pypi_packages", - "config.0.software_config.0.env_variables", - "config.0.software_config.0.image_version", - "config.0.software_config.0.python_version", - "config.0.software_config.0.scheduler_count", - } - - composerConfigKeys = []string{ - "config.0.node_count", - "config.0.node_config", - "config.0.software_config", - "config.0.private_environment_config", - } - - composerIpAllocationPolicyKeys = []string{ - "config.0.node_config.0.ip_allocation_policy.0.use_ip_aliases", - "config.0.node_config.0.ip_allocation_policy.0.cluster_secondary_range_name", - "config.0.node_config.0.ip_allocation_policy.0.services_secondary_range_name", - "config.0.node_config.0.ip_allocation_policy.0.cluster_ipv4_cidr_block", - "config.0.node_config.0.ip_allocation_policy.0.services_ipv4_cidr_block", - } -) - -func resourceComposerEnvironment() *resource_composer_environment_schema.Resource { - return &resource_composer_environment_schema.Resource{ - Create: resourceComposerEnvironmentCreate, - Read: resourceComposerEnvironmentRead, - Update: resourceComposerEnvironmentUpdate, - Delete: resourceComposerEnvironmentDelete, - - Importer: &resource_composer_environment_schema.ResourceImporter{ - State: resourceComposerEnvironmentImport, - }, - - Timeouts: &resource_composer_environment_schema.ResourceTimeout{ - - Create: resource_composer_environment_schema.DefaultTimeout(120 * resource_composer_environment_time.Minute), - Update: resource_composer_environment_schema.DefaultTimeout(120 * resource_composer_environment_time.Minute), - Delete: resource_composer_environment_schema.DefaultTimeout(30 * resource_composer_environment_time.Minute), - }, - - Schema: map[string]*resource_composer_environment_schema.Schema{ - "name": { - Type: resource_composer_environment_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the environment.`, - }, - "region": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The location or Compute Engine region for the environment.`, - }, - "project": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - "config": { - Type: resource_composer_environment_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Configuration parameters for this environment.`, - Elem: &resource_composer_environment_schema.Resource{ - Schema: map[string]*resource_composer_environment_schema.Schema{ - "node_count": { - Type: resource_composer_environment_schema.TypeInt, - Computed: true, - Optional: true, - AtLeastOneOf: composerConfigKeys, - ValidateFunc: resource_composer_environment_validation.IntAtLeast(3), - Description: `The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "node_config": { - Type: resource_composer_environment_schema.TypeList, - Computed: true, - Optional: true, - AtLeastOneOf: composerConfigKeys, - MaxItems: 1, - Description: `The configuration used for the Kubernetes Engine cluster.`, - Elem: &resource_composer_environment_schema.Resource{ - Schema: map[string]*resource_composer_environment_schema.Schema{ - "zone": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Compute Engine zone in which to deploy the VMs running the Apache Airflow software, specified as the zone name or relative resource name (e.g. "projects/{project}/zones/{zone}"). Must belong to the enclosing environment's project and region. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "machine_type": { - Type: resource_composer_environment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "network": { - Type: resource_composer_environment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. The network must belong to the environment's project. If unspecified, the "default" network ID in the environment's project is used. If a Custom Subnet Network is provided, subnetwork must also be provided.`, - }, - "subnetwork": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Compute Engine subnetwork to be used for machine communications, , specified as a self-link, relative resource name (e.g. "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region.`, - }, - "disk_size_gb": { - Type: resource_composer_environment_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "oauth_scopes": { - Type: resource_composer_environment_schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Elem: &resource_composer_environment_schema.Schema{ - Type: resource_composer_environment_schema.TypeString, - }, - Set: resource_composer_environment_schema.HashString, - Description: `The set of Google API scopes to be made available on all node VMs. Cannot be updated. If empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "service_account": { - Type: resource_composer_environment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateServiceAccountRelativeNameOrEmail, - DiffSuppressFunc: compareServiceAccountEmailToLink, - Description: `The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. If given, note that the service account must have roles/composer.worker for any GCP resources created under the Cloud Composer Environment.`, - }, - "tags": { - Type: resource_composer_environment_schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &resource_composer_environment_schema.Schema{ - Type: resource_composer_environment_schema.TypeString, - }, - Set: resource_composer_environment_schema.HashString, - Description: `The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with RFC1035. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "ip_allocation_policy": { - Type: resource_composer_environment_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - ConfigMode: resource_composer_environment_schema.SchemaConfigModeAttr, - MaxItems: 1, - Description: `Configuration for controlling how IPs are allocated in the GKE cluster. Cannot be updated.`, - Elem: &resource_composer_environment_schema.Resource{ - Schema: map[string]*resource_composer_environment_schema.Schema{ - "use_ip_aliases": { - Type: resource_composer_environment_schema.TypeBool, - Optional: true, - ForceNew: true, - AtLeastOneOf: composerIpAllocationPolicyKeys, - Description: `Whether or not to enable Alias IPs in the GKE cluster. If true, a VPC-native cluster is created. Defaults to true if the ip_allocation_policy block is present in config. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.`, - }, - "cluster_secondary_range_name": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: composerIpAllocationPolicyKeys, - Description: `The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true.`, - ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.cluster_ipv4_cidr_block"}, - }, - "services_secondary_range_name": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: composerIpAllocationPolicyKeys, - Description: `The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true.`, - ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.services_ipv4_cidr_block"}, - }, - "cluster_ipv4_cidr_block": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: composerIpAllocationPolicyKeys, - Description: `The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both.`, - DiffSuppressFunc: cidrOrSizeDiffSuppress, - ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.cluster_secondary_range_name"}, - }, - "services_ipv4_cidr_block": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: composerIpAllocationPolicyKeys, - Description: `The IP address range used to allocate IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both.`, - DiffSuppressFunc: cidrOrSizeDiffSuppress, - ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.services_secondary_range_name"}, - }, - }, - }, - }, - }, - }, - }, - "software_config": { - Type: resource_composer_environment_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: composerConfigKeys, - MaxItems: 1, - Description: `The configuration settings for software inside the environment.`, - Elem: &resource_composer_environment_schema.Resource{ - Schema: map[string]*resource_composer_environment_schema.Schema{ - "airflow_config_overrides": { - Type: resource_composer_environment_schema.TypeMap, - Optional: true, - AtLeastOneOf: composerSoftwareConfigKeys, - Elem: &resource_composer_environment_schema.Schema{Type: resource_composer_environment_schema.TypeString}, - Description: `Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and cannot contain "=" or ";". Section and property names cannot contain characters: "." Apache Airflow configuration property names must be written in snake_case. Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are blacklisted, and cannot be overridden.`, - }, - "pypi_packages": { - Type: resource_composer_environment_schema.TypeMap, - Optional: true, - AtLeastOneOf: composerSoftwareConfigKeys, - Elem: &resource_composer_environment_schema.Schema{Type: resource_composer_environment_schema.TypeString}, - ValidateFunc: validateComposerEnvironmentPypiPackages, - Description: `Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name (e.g. "numpy"). Values are the lowercase extras and version specifier (e.g. "==1.12.0", "[devel,gcp_api]", "[devel]>=1.8.2, <1.9.2"). To specify a package without pinning it to a version specifier, use the empty string as the value.`, - }, - "env_variables": { - Type: resource_composer_environment_schema.TypeMap, - Optional: true, - AtLeastOneOf: composerSoftwareConfigKeys, - Elem: &resource_composer_environment_schema.Schema{Type: resource_composer_environment_schema.TypeString}, - ValidateFunc: validateComposerEnvironmentEnvVariables, - Description: `Additional environment variables to provide to the Apache Airflow schedulerf, worker, and webserver processes. Environment variable names must match the regular expression [a-zA-Z_][a-zA-Z0-9_]*. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+), and they cannot match any of the following reserved names: AIRFLOW_HOME C_FORCE_ROOT CONTAINER_NAME DAGS_FOLDER GCP_PROJECT GCS_BUCKET GKE_CLUSTER_NAME SQL_DATABASE SQL_INSTANCE SQL_PASSWORD SQL_PROJECT SQL_REGION SQL_USER.`, - }, - "image_version": { - Type: resource_composer_environment_schema.TypeString, - Computed: true, - Optional: true, - AtLeastOneOf: composerSoftwareConfigKeys, - ValidateFunc: validateRegexp(composerEnvironmentVersionRegexp), - DiffSuppressFunc: composerImageVersionDiffSuppress, - Description: `The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression composer-[0-9]+\.[0-9]+(\.[0-9]+)?-airflow-[0-9]+\.[0-9]+(\.[0-9]+.*)?. The Cloud Composer portion of the version is a semantic version. The portion of the image version following 'airflow-' is an official Apache Airflow repository release name. See documentation for allowed release names.`, - }, - "python_version": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - AtLeastOneOf: composerSoftwareConfigKeys, - Computed: true, - ForceNew: true, - Description: `The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '2'. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.`, - }, - "scheduler_count": { - Type: resource_composer_environment_schema.TypeInt, - Optional: true, - AtLeastOneOf: composerSoftwareConfigKeys, - Computed: true, - Description: `The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.`, - }, - }, - }, - }, - "private_environment_config": { - Type: resource_composer_environment_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: composerConfigKeys, - MaxItems: 1, - ForceNew: true, - Description: `The configuration used for the Private IP Cloud Composer environment.`, - Elem: &resource_composer_environment_schema.Resource{ - Schema: map[string]*resource_composer_environment_schema.Schema{ - "enable_private_endpoint": { - Type: resource_composer_environment_schema.TypeBool, - Optional: true, - Default: true, - AtLeastOneOf: []string{ - "config.0.private_environment_config.0.enable_private_endpoint", - "config.0.private_environment_config.0.master_ipv4_cidr_block", - "config.0.private_environment_config.0.cloud_sql_ipv4_cidr_block", - "config.0.private_environment_config.0.web_server_ipv4_cidr_block", - }, - ForceNew: true, - Description: `If true, access to the public endpoint of the GKE cluster is denied. If this field is set to true, ip_allocation_policy.use_ip_aliases must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "master_ipv4_cidr_block": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: []string{ - "config.0.private_environment_config.0.enable_private_endpoint", - "config.0.private_environment_config.0.master_ipv4_cidr_block", - "config.0.private_environment_config.0.cloud_sql_ipv4_cidr_block", - "config.0.private_environment_config.0.web_server_ipv4_cidr_block", - }, - ForceNew: true, - Description: `The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. If left blank, the default value of '172.16.0.0/28' is used.`, - }, - "web_server_ipv4_cidr_block": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: []string{ - "config.0.private_environment_config.0.enable_private_endpoint", - "config.0.private_environment_config.0.master_ipv4_cidr_block", - "config.0.private_environment_config.0.cloud_sql_ipv4_cidr_block", - "config.0.private_environment_config.0.web_server_ipv4_cidr_block", - }, - ForceNew: true, - Description: `The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, - }, - "cloud_sql_ipv4_cidr_block": { - Type: resource_composer_environment_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: []string{ - "config.0.private_environment_config.0.enable_private_endpoint", - "config.0.private_environment_config.0.master_ipv4_cidr_block", - "config.0.private_environment_config.0.cloud_sql_ipv4_cidr_block", - "config.0.private_environment_config.0.web_server_ipv4_cidr_block", - }, - ForceNew: true, - Description: `The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block.`, - }, - }, - }, - }, - "airflow_uri": { - Type: resource_composer_environment_schema.TypeString, - Computed: true, - Description: `The URI of the Apache Airflow Web UI hosted within this environment.`, - }, - "dag_gcs_prefix": { - Type: resource_composer_environment_schema.TypeString, - Computed: true, - Description: `The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using '/'-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with this prefix.`, - }, - "gke_cluster": { - Type: resource_composer_environment_schema.TypeString, - Computed: true, - Description: `The Kubernetes Engine cluster used to run this environment.`, - }, - }, - }, - }, - "labels": { - Type: resource_composer_environment_schema.TypeMap, - Optional: true, - Elem: &resource_composer_environment_schema.Schema{Type: resource_composer_environment_schema.TypeString}, - Description: `User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: Label keys must be between 1 and 63 characters long and must conform to the following regular expression: [a-z]([-a-z0-9]*[a-z0-9])?. Label values must be between 0 and 63 characters long and must conform to the regular expression ([a-z]([-a-z0-9]*[a-z0-9])?)?. No more than 64 labels can be associated with a given environment. Both keys and values must be <= 128 bytes in size.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComposerEnvironmentCreate(d *resource_composer_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - envName, err := resourceComposerEnvironmentName(d, config) - if err != nil { - return err - } - - transformedConfig, err := expandComposerEnvironmentConfig(d.Get("config"), d, config) - if err != nil { - return err - } - - env := &resource_composer_environment_composer.Environment{ - Name: envName.resourceName(), - Labels: expandLabels(d), - Config: transformedConfig, - } - - updateOnlyEnv := getComposerEnvironmentPostCreateUpdateObj(env) - - resource_composer_environment_log.Printf("[DEBUG] Creating new Environment %q", envName.parentName()) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Create(envName.parentName(), env).Do() - if err != nil { - return err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") - if err != nil { - return resource_composer_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - waitErr := composerOperationWaitTime( - config, op, envName.Project, "Creating Environment", userAgent, - d.Timeout(resource_composer_environment_schema.TimeoutCreate)) - - if waitErr != nil { - - d.SetId("") - - errMsg := resource_composer_environment_fmt.Sprintf("Error waiting to create Environment: %s", waitErr) - if err := handleComposerEnvironmentCreationOpFailure(id, envName, d, config); err != nil { - return resource_composer_environment_fmt.Errorf("Error waiting to create Environment: %s. An initial "+ - "environment was or is still being created, and clean up failed with "+ - "error: %s.", errMsg, err) - } - - return resource_composer_environment_fmt.Errorf("Error waiting to create Environment: %s", waitErr) - } - - resource_composer_environment_log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), op) - - if err := resourceComposerEnvironmentPostCreateUpdate(updateOnlyEnv, d, config, userAgent); err != nil { - return err - } - - return resourceComposerEnvironmentRead(d, meta) -} - -func resourceComposerEnvironmentRead(d *resource_composer_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - envName, err := resourceComposerEnvironmentName(d, config) - if err != nil { - return err - } - - res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.resourceName()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_composer_environment_fmt.Sprintf("ComposerEnvironment %q", d.Id())) - } - - if err := d.Set("project", envName.Project); err != nil { - return resource_composer_environment_fmt.Errorf("Error setting Environment: %s", err) - } - - if err := d.Set("region", envName.Region); err != nil { - return resource_composer_environment_fmt.Errorf("Error setting Environment: %s", err) - } - if err := d.Set("name", GetResourceNameFromSelfLink(res.Name)); err != nil { - return resource_composer_environment_fmt.Errorf("Error setting Environment: %s", err) - } - if err := d.Set("config", flattenComposerEnvironmentConfig(res.Config)); err != nil { - return resource_composer_environment_fmt.Errorf("Error setting Environment: %s", err) - } - if err := d.Set("labels", res.Labels); err != nil { - return resource_composer_environment_fmt.Errorf("Error setting Environment: %s", err) - } - return nil -} - -func resourceComposerEnvironmentUpdate(d *resource_composer_environment_schema.ResourceData, meta interface{}) error { - tfConfig := meta.(*Config) - userAgent, err := generateUserAgentString(d, tfConfig.userAgent) - if err != nil { - return err - } - - d.Partial(true) - - if d.HasChange("config") { - config, err := expandComposerEnvironmentConfig(d.Get("config"), d, tfConfig) - if err != nil { - return err - } - - if d.HasChange("config.0.software_config.0.image_version") { - patchObj := &resource_composer_environment_composer.Environment{ - Config: &resource_composer_environment_composer.EnvironmentConfig{ - SoftwareConfig: &resource_composer_environment_composer.SoftwareConfig{}, - }, - } - if config != nil && config.SoftwareConfig != nil { - patchObj.Config.SoftwareConfig.ImageVersion = config.SoftwareConfig.ImageVersion - } - err = resourceComposerEnvironmentPatchField("config.softwareConfig.imageVersion", userAgent, patchObj, d, tfConfig) - if err != nil { - return err - } - } - - if d.HasChange("config.0.software_config.0.airflow_config_overrides") { - patchObj := &resource_composer_environment_composer.Environment{ - Config: &resource_composer_environment_composer.EnvironmentConfig{ - SoftwareConfig: &resource_composer_environment_composer.SoftwareConfig{ - AirflowConfigOverrides: make(map[string]string), - }, - }, - } - - if config != nil && config.SoftwareConfig != nil && len(config.SoftwareConfig.AirflowConfigOverrides) > 0 { - patchObj.Config.SoftwareConfig.AirflowConfigOverrides = config.SoftwareConfig.AirflowConfigOverrides - } - - err = resourceComposerEnvironmentPatchField("config.softwareConfig.airflowConfigOverrides", userAgent, patchObj, d, tfConfig) - if err != nil { - return err - } - } - - if d.HasChange("config.0.software_config.0.env_variables") { - patchObj := &resource_composer_environment_composer.Environment{ - Config: &resource_composer_environment_composer.EnvironmentConfig{ - SoftwareConfig: &resource_composer_environment_composer.SoftwareConfig{ - EnvVariables: make(map[string]string), - }, - }, - } - if config != nil && config.SoftwareConfig != nil && len(config.SoftwareConfig.EnvVariables) > 0 { - patchObj.Config.SoftwareConfig.EnvVariables = config.SoftwareConfig.EnvVariables - } - - err = resourceComposerEnvironmentPatchField("config.softwareConfig.envVariables", userAgent, patchObj, d, tfConfig) - if err != nil { - return err - } - } - - if d.HasChange("config.0.software_config.0.pypi_packages") { - patchObj := &resource_composer_environment_composer.Environment{ - Config: &resource_composer_environment_composer.EnvironmentConfig{ - SoftwareConfig: &resource_composer_environment_composer.SoftwareConfig{ - PypiPackages: make(map[string]string), - }, - }, - } - if config != nil && config.SoftwareConfig != nil && config.SoftwareConfig.PypiPackages != nil { - patchObj.Config.SoftwareConfig.PypiPackages = config.SoftwareConfig.PypiPackages - } - - err = resourceComposerEnvironmentPatchField("config.softwareConfig.pypiPackages", userAgent, patchObj, d, tfConfig) - if err != nil { - return err - } - } - - if d.HasChange("config.0.node_count") { - patchObj := &resource_composer_environment_composer.Environment{Config: &resource_composer_environment_composer.EnvironmentConfig{}} - if config != nil { - patchObj.Config.NodeCount = config.NodeCount - } - err = resourceComposerEnvironmentPatchField("config.nodeCount", userAgent, patchObj, d, tfConfig) - if err != nil { - return err - } - } - - if d.HasChange("config.0.web_server_network_access_control.0.allowed_ip_range") { - patchObj := &resource_composer_environment_composer.Environment{Config: &resource_composer_environment_composer.EnvironmentConfig{}} - if config != nil { - patchObj.Config.WebServerNetworkAccessControl = config.WebServerNetworkAccessControl - } - err = resourceComposerEnvironmentPatchField("config.webServerNetworkAccessControl", userAgent, patchObj, d, tfConfig) - if err != nil { - return err - } - } - - } - - if d.HasChange("labels") { - patchEnv := &resource_composer_environment_composer.Environment{Labels: expandLabels(d)} - err := resourceComposerEnvironmentPatchField("labels", userAgent, patchEnv, d, tfConfig) - if err != nil { - return err - } - } - - d.Partial(false) - return resourceComposerEnvironmentRead(d, tfConfig) -} - -func resourceComposerEnvironmentPostCreateUpdate(updateEnv *resource_composer_environment_composer.Environment, d *resource_composer_environment_schema.ResourceData, cfg *Config, userAgent string) error { - if updateEnv == nil { - return nil - } - - d.Partial(true) - - if updateEnv.Config != nil && updateEnv.Config.SoftwareConfig != nil && len(updateEnv.Config.SoftwareConfig.PypiPackages) > 0 { - resource_composer_environment_log.Printf("[DEBUG] Running post-create update for Environment %q", d.Id()) - err := resourceComposerEnvironmentPatchField("config.softwareConfig.pypiPackages", userAgent, updateEnv, d, cfg) - if err != nil { - return err - } - - resource_composer_environment_log.Printf("[DEBUG] Finish update to Environment %q post create for update only fields", d.Id()) - } - d.Partial(false) - return resourceComposerEnvironmentRead(d, cfg) -} - -func resourceComposerEnvironmentPatchField(updateMask, userAgent string, env *resource_composer_environment_composer.Environment, d *resource_composer_environment_schema.ResourceData, config *Config) error { - envJson, _ := env.MarshalJSON() - resource_composer_environment_log.Printf("[DEBUG] Updating Environment %q (updateMask = %q): %s", d.Id(), updateMask, string(envJson)) - envName, err := resourceComposerEnvironmentName(d, config) - if err != nil { - return err - } - - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments. - Patch(envName.resourceName(), env). - UpdateMask(updateMask).Do() - if err != nil { - return err - } - - waitErr := composerOperationWaitTime( - config, op, envName.Project, "Updating newly created Environment", userAgent, - d.Timeout(resource_composer_environment_schema.TimeoutCreate)) - if waitErr != nil { - - return resource_composer_environment_fmt.Errorf("Error waiting to update Environment: %s", waitErr) - } - - resource_composer_environment_log.Printf("[DEBUG] Finished updating Environment %q (updateMask = %q)", d.Id(), updateMask) - return nil -} - -func resourceComposerEnvironmentDelete(d *resource_composer_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - envName, err := resourceComposerEnvironmentName(d, config) - if err != nil { - return err - } - - resource_composer_environment_log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.resourceName()).Do() - if err != nil { - return err - } - - err = composerOperationWaitTime( - config, op, envName.Project, "Deleting Environment", userAgent, - d.Timeout(resource_composer_environment_schema.TimeoutDelete)) - if err != nil { - return err - } - - resource_composer_environment_log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), op) - return nil -} - -func resourceComposerEnvironmentImport(d *resource_composer_environment_schema.ResourceData, meta interface{}) ([]*resource_composer_environment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{name}}") - if err != nil { - return nil, resource_composer_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_composer_environment_schema.ResourceData{d}, nil -} - -func flattenComposerEnvironmentConfig(envCfg *resource_composer_environment_composer.EnvironmentConfig) interface{} { - if envCfg == nil { - return nil - } - transformed := make(map[string]interface{}) - transformed["gke_cluster"] = envCfg.GkeCluster - transformed["dag_gcs_prefix"] = envCfg.DagGcsPrefix - transformed["node_count"] = envCfg.NodeCount - transformed["airflow_uri"] = envCfg.AirflowUri - transformed["node_config"] = flattenComposerEnvironmentConfigNodeConfig(envCfg.NodeConfig) - transformed["software_config"] = flattenComposerEnvironmentConfigSoftwareConfig(envCfg.SoftwareConfig) - transformed["private_environment_config"] = flattenComposerEnvironmentConfigPrivateEnvironmentConfig(envCfg.PrivateEnvironmentConfig) - - return []interface{}{transformed} -} - -func flattenComposerEnvironmentConfigPrivateEnvironmentConfig(envCfg *resource_composer_environment_composer.PrivateEnvironmentConfig) interface{} { - if envCfg == nil { - return nil - } - - transformed := make(map[string]interface{}) - transformed["enable_private_endpoint"] = envCfg.PrivateClusterConfig.EnablePrivateEndpoint - transformed["master_ipv4_cidr_block"] = envCfg.PrivateClusterConfig.MasterIpv4CidrBlock - transformed["cloud_sql_ipv4_cidr_block"] = envCfg.CloudSqlIpv4CidrBlock - transformed["web_server_ipv4_cidr_block"] = envCfg.WebServerIpv4CidrBlock - - return []interface{}{transformed} -} - -func flattenComposerEnvironmentConfigNodeConfig(nodeCfg *resource_composer_environment_composer.NodeConfig) interface{} { - if nodeCfg == nil { - return nil - } - transformed := make(map[string]interface{}) - transformed["zone"] = nodeCfg.Location - transformed["machine_type"] = nodeCfg.MachineType - transformed["network"] = nodeCfg.Network - transformed["subnetwork"] = nodeCfg.Subnetwork - transformed["disk_size_gb"] = nodeCfg.DiskSizeGb - transformed["service_account"] = nodeCfg.ServiceAccount - transformed["oauth_scopes"] = flattenComposerEnvironmentConfigNodeConfigOauthScopes(nodeCfg.OauthScopes) - transformed["tags"] = flattenComposerEnvironmentConfigNodeConfigTags(nodeCfg.Tags) - transformed["ip_allocation_policy"] = flattenComposerEnvironmentConfigNodeConfigIPAllocationPolicy(nodeCfg.IpAllocationPolicy) - return []interface{}{transformed} -} - -func flattenComposerEnvironmentConfigNodeConfigIPAllocationPolicy(ipPolicy *resource_composer_environment_composer.IPAllocationPolicy) interface{} { - if ipPolicy == nil { - return nil - } - transformed := make(map[string]interface{}) - transformed["use_ip_aliases"] = ipPolicy.UseIpAliases - transformed["cluster_ipv4_cidr_block"] = ipPolicy.ClusterIpv4CidrBlock - transformed["cluster_secondary_range_name"] = ipPolicy.ClusterSecondaryRangeName - transformed["services_ipv4_cidr_block"] = ipPolicy.ServicesIpv4CidrBlock - transformed["services_secondary_range_name"] = ipPolicy.ServicesSecondaryRangeName - - return []interface{}{transformed} -} - -func flattenComposerEnvironmentConfigNodeConfigOauthScopes(v interface{}) interface{} { - if v == nil { - return v - } - return resource_composer_environment_schema.NewSet(resource_composer_environment_schema.HashString, convertStringArrToInterface(v.([]string))) -} - -func flattenComposerEnvironmentConfigNodeConfigTags(v interface{}) interface{} { - if v == nil { - return v - } - return resource_composer_environment_schema.NewSet(resource_composer_environment_schema.HashString, convertStringArrToInterface(v.([]string))) -} - -func flattenComposerEnvironmentConfigSoftwareConfig(softwareCfg *resource_composer_environment_composer.SoftwareConfig) interface{} { - if softwareCfg == nil { - return nil - } - transformed := make(map[string]interface{}) - transformed["image_version"] = softwareCfg.ImageVersion - transformed["python_version"] = softwareCfg.PythonVersion - transformed["airflow_config_overrides"] = softwareCfg.AirflowConfigOverrides - transformed["pypi_packages"] = softwareCfg.PypiPackages - transformed["env_variables"] = softwareCfg.EnvVariables - transformed["scheduler_count"] = softwareCfg.SchedulerCount - return []interface{}{transformed} -} - -func expandComposerEnvironmentConfig(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (*resource_composer_environment_composer.EnvironmentConfig, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - original := l[0].(map[string]interface{}) - transformed := &resource_composer_environment_composer.EnvironmentConfig{} - - if nodeCountRaw, ok := original["node_count"]; ok { - transformedNodeCount, err := expandComposerEnvironmentConfigNodeCount(nodeCountRaw, d, config) - if err != nil { - return nil, err - } - transformed.NodeCount = transformedNodeCount - } - - transformedNodeConfig, err := expandComposerEnvironmentConfigNodeConfig(original["node_config"], d, config) - if err != nil { - return nil, err - } - transformed.NodeConfig = transformedNodeConfig - - transformedSoftwareConfig, err := expandComposerEnvironmentConfigSoftwareConfig(original["software_config"], d, config) - if err != nil { - return nil, err - } - transformed.SoftwareConfig = transformedSoftwareConfig - - transformedPrivateEnvironmentConfig, err := expandComposerEnvironmentConfigPrivateEnvironmentConfig(original["private_environment_config"], d, config) - if err != nil { - return nil, err - } - transformed.PrivateEnvironmentConfig = transformedPrivateEnvironmentConfig - - return transformed, nil -} - -func expandComposerEnvironmentConfigNodeCount(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (int64, error) { - if v == nil { - return 0, nil - } - return int64(v.(int)), nil -} - -func expandComposerEnvironmentConfigPrivateEnvironmentConfig(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (*resource_composer_environment_composer.PrivateEnvironmentConfig, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := &resource_composer_environment_composer.PrivateEnvironmentConfig{ - EnablePrivateEnvironment: true, - } - - subBlock := &resource_composer_environment_composer.PrivateClusterConfig{} - - if v, ok := original["enable_private_endpoint"]; ok { - subBlock.EnablePrivateEndpoint = v.(bool) - } - - if v, ok := original["master_ipv4_cidr_block"]; ok { - subBlock.MasterIpv4CidrBlock = v.(string) - } - - if v, ok := original["cloud_sql_ipv4_cidr_block"]; ok { - transformed.CloudSqlIpv4CidrBlock = v.(string) - } - - if v, ok := original["web_server_ipv4_cidr_block"]; ok { - transformed.WebServerIpv4CidrBlock = v.(string) - } - - transformed.PrivateClusterConfig = subBlock - - return transformed, nil -} - -func expandComposerEnvironmentConfigNodeConfig(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (*resource_composer_environment_composer.NodeConfig, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := &resource_composer_environment_composer.NodeConfig{} - - if transformedDiskSizeGb, ok := original["disk_size_gb"]; ok { - transformed.DiskSizeGb = int64(transformedDiskSizeGb.(int)) - } - - if v, ok := original["service_account"]; ok { - transformedServiceAccount, err := expandComposerEnvironmentServiceAccount(v, d, config) - if err != nil { - return nil, err - } - transformed.ServiceAccount = transformedServiceAccount - } - - var nodeConfigZone string - if v, ok := original["zone"]; ok { - transformedZone, err := expandComposerEnvironmentZone(v, d, config) - if err != nil { - return nil, err - } - transformed.Location = transformedZone - nodeConfigZone = transformedZone - } - - if v, ok := original["machine_type"]; ok { - transformedMachineType, err := expandComposerEnvironmentMachineType(v, d, config, nodeConfigZone) - if err != nil { - return nil, err - } - transformed.MachineType = transformedMachineType - } - - if v, ok := original["network"]; ok { - transformedNetwork, err := expandComposerEnvironmentNetwork(v, d, config) - if err != nil { - return nil, err - } - transformed.Network = transformedNetwork - } - - if v, ok := original["subnetwork"]; ok { - transformedSubnetwork, err := expandComposerEnvironmentSubnetwork(v, d, config) - if err != nil { - return nil, err - } - transformed.Subnetwork = transformedSubnetwork - } - transformedIPAllocationPolicy, err := expandComposerEnvironmentIPAllocationPolicy(original["ip_allocation_policy"], d, config) - if err != nil { - return nil, err - } - transformed.IpAllocationPolicy = transformedIPAllocationPolicy - - transformedOauthScopes, err := expandComposerEnvironmentSetList(original["oauth_scopes"], d, config) - if err != nil { - return nil, err - } - transformed.OauthScopes = transformedOauthScopes - - transformedTags, err := expandComposerEnvironmentSetList(original["tags"], d, config) - if err != nil { - return nil, err - } - transformed.Tags = transformedTags - - return transformed, nil -} - -func expandComposerEnvironmentIPAllocationPolicy(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (*resource_composer_environment_composer.IPAllocationPolicy, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := &resource_composer_environment_composer.IPAllocationPolicy{} - - if v, ok := original["use_ip_aliases"]; ok { - transformed.UseIpAliases = v.(bool) - } - - if v, ok := original["cluster_ipv4_cidr_block"]; ok { - transformed.ClusterIpv4CidrBlock = v.(string) - } - - if v, ok := original["cluster_secondary_range_name"]; ok { - transformed.ClusterSecondaryRangeName = v.(string) - } - - if v, ok := original["services_ipv4_cidr_block"]; ok { - transformed.ServicesIpv4CidrBlock = v.(string) - } - - if v, ok := original["services_secondary_range_name"]; ok { - transformed.ServicesSecondaryRangeName = v.(string) - } - return transformed, nil - -} - -func expandComposerEnvironmentServiceAccount(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (string, error) { - serviceAccount := v.(string) - if len(serviceAccount) == 0 { - return "", nil - } - - return GetResourceNameFromSelfLink(serviceAccount), nil -} - -func expandComposerEnvironmentZone(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (string, error) { - zone := v.(string) - if len(zone) == 0 { - return zone, nil - } - if !resource_composer_environment_strings.Contains(zone, "/") { - project, err := getProject(d, config) - if err != nil { - return "", err - } - return resource_composer_environment_fmt.Sprintf("projects/%s/zones/%s", project, zone), nil - } - - return getRelativePath(zone) -} - -func expandComposerEnvironmentMachineType(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config, nodeCfgZone string) (string, error) { - machineType := v.(string) - requiredZone := GetResourceNameFromSelfLink(nodeCfgZone) - - fv, err := ParseMachineTypesFieldValue(v.(string), d, config) - if err != nil { - - project, err := getProject(d, config) - if err != nil { - return "", err - } - - fv = &ZonalFieldValue{ - Project: project, - Zone: requiredZone, - Name: GetResourceNameFromSelfLink(machineType), - resourceType: "machineTypes", - } - } - - if requiredZone != "" && fv.Zone != requiredZone { - return "", resource_composer_environment_fmt.Errorf("node_config machine_type %q must be in node_config zone %q", machineType, requiredZone) - } - return fv.RelativeLink(), nil -} - -func expandComposerEnvironmentNetwork(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (string, error) { - fv, err := ParseNetworkFieldValue(v.(string), d, config) - if err != nil { - return "", err - } - return fv.RelativeLink(), nil -} - -func expandComposerEnvironmentSubnetwork(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (string, error) { - fv, err := ParseSubnetworkFieldValue(v.(string), d, config) - if err != nil { - return "", err - } - return fv.RelativeLink(), nil -} - -func expandComposerEnvironmentSetList(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) ([]string, error) { - if v == nil { - return nil, nil - } - return convertStringArr(v.(*resource_composer_environment_schema.Set).List()), nil -} - -func expandComposerEnvironmentConfigSoftwareConfig(v interface{}, d *resource_composer_environment_schema.ResourceData, config *Config) (*resource_composer_environment_composer.SoftwareConfig, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := &resource_composer_environment_composer.SoftwareConfig{} - - transformed.ImageVersion = original["image_version"].(string) - transformed.PythonVersion = original["python_version"].(string) - transformed.AirflowConfigOverrides = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "airflow_config_overrides") - transformed.PypiPackages = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "pypi_packages") - transformed.EnvVariables = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "env_variables") - transformed.SchedulerCount = int64(original["scheduler_count"].(int)) - return transformed, nil -} - -func expandComposerEnvironmentConfigSoftwareConfigStringMap(softwareConfig map[string]interface{}, k string) map[string]string { - v, ok := softwareConfig[k] - if ok && v != nil { - return convertStringMap(v.(map[string]interface{})) - } - return map[string]string{} -} - -func validateComposerEnvironmentPypiPackages(v interface{}, k string) (ws []string, errors []error) { - if v == nil { - return ws, errors - } - for pkgName := range v.(map[string]interface{}) { - if pkgName != resource_composer_environment_strings.ToLower(pkgName) { - errors = append(errors, - resource_composer_environment_fmt.Errorf("PYPI package %q can only contain lowercase characters", pkgName)) - } - } - - return ws, errors -} - -func validateComposerEnvironmentEnvVariables(v interface{}, k string) (ws []string, errors []error) { - if v == nil { - return ws, errors - } - - reEnvVarName := resource_composer_environment_regexp.MustCompile(composerEnvironmentEnvVariablesRegexp) - reAirflowReserved := resource_composer_environment_regexp.MustCompile(composerEnvironmentReservedAirflowEnvVarRegexp) - - for envVarName := range v.(map[string]interface{}) { - if !reEnvVarName.MatchString(envVarName) { - errors = append(errors, - resource_composer_environment_fmt.Errorf("env_variable %q must match regexp %q", envVarName, composerEnvironmentEnvVariablesRegexp)) - } else if _, ok := composerEnvironmentReservedEnvVar[envVarName]; ok { - errors = append(errors, - resource_composer_environment_fmt.Errorf("env_variable %q is a reserved name and cannot be used", envVarName)) - } else if reAirflowReserved.MatchString(envVarName) { - errors = append(errors, - resource_composer_environment_fmt.Errorf("env_variable %q cannot match reserved Airflow variable names with regexp %q", - envVarName, composerEnvironmentReservedAirflowEnvVarRegexp)) - } - } - - return ws, errors -} - -func handleComposerEnvironmentCreationOpFailure(id string, envName *composerEnvironmentName, d *resource_composer_environment_schema.ResourceData, config *Config) error { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - resource_composer_environment_log.Printf("[WARNING] Creation operation for Composer Environment %q failed, check Environment isn't still running", id) - - env, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.resourceName()).Do() - if err != nil { - - return handleNotFoundError(err, d, resource_composer_environment_fmt.Sprintf("Composer Environment %q", envName.resourceName())) - } - - if env.State == "CREATING" { - return resource_composer_environment_fmt.Errorf( - "Getting creation operation state failed while waiting for environment to finish creating, "+ - "but environment seems to still be in 'CREATING' state. Wait for operation to finish and either "+ - "manually delete environment or import %q into your state", id) - } - - resource_composer_environment_log.Printf("[WARNING] Environment %q from failed creation operation was created, deleting.", id) - op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.resourceName()).Do() - if err != nil { - return resource_composer_environment_fmt.Errorf("Could not delete the invalid created environment with state %q: %s", env.State, err) - } - - waitErr := composerOperationWaitTime( - config, op, envName.Project, - resource_composer_environment_fmt.Sprintf("Deleting invalid created Environment with state %q", env.State), userAgent, - d.Timeout(resource_composer_environment_schema.TimeoutCreate)) - if waitErr != nil { - return resource_composer_environment_fmt.Errorf("Error waiting to delete invalid Environment with state %q: %s", env.State, waitErr) - } - - return nil -} - -func getComposerEnvironmentPostCreateUpdateObj(env *resource_composer_environment_composer.Environment) (updateEnv *resource_composer_environment_composer.Environment) { - - if env != nil && env.Config != nil && env.Config.SoftwareConfig != nil { - if len(env.Config.SoftwareConfig.PypiPackages) > 0 { - updateEnv = &resource_composer_environment_composer.Environment{ - Config: &resource_composer_environment_composer.EnvironmentConfig{ - SoftwareConfig: &resource_composer_environment_composer.SoftwareConfig{ - PypiPackages: env.Config.SoftwareConfig.PypiPackages, - }, - }, - } - - env.Config.SoftwareConfig.PypiPackages = make(map[string]string) - } - } - - return updateEnv -} - -func resourceComposerEnvironmentName(d *resource_composer_environment_schema.ResourceData, config *Config) (*composerEnvironmentName, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - - return &composerEnvironmentName{ - Project: project, - Region: region, - Environment: d.Get("name").(string), - }, nil -} - -type composerEnvironmentName struct { - Project string - Region string - Environment string -} - -func (n *composerEnvironmentName) resourceName() string { - return resource_composer_environment_fmt.Sprintf("projects/%s/locations/%s/environments/%s", n.Project, n.Region, n.Environment) -} - -func (n *composerEnvironmentName) parentName() string { - return resource_composer_environment_fmt.Sprintf("projects/%s/locations/%s", n.Project, n.Region) -} - -func compareServiceAccountEmailToLink(_, old, new string, _ *resource_composer_environment_schema.ResourceData) bool { - - if !resource_composer_environment_strings.HasPrefix("projects/", old) { - return old == GetResourceNameFromSelfLink(new) - } - return compareSelfLinkRelativePaths("", old, new, nil) -} - -func validateServiceAccountRelativeNameOrEmail(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - serviceAccountRe := "(" + resource_composer_environment_strings.Join(PossibleServiceAccountNames, "|") + ")" - if resource_composer_environment_strings.HasPrefix(value, "projects/") { - serviceAccountRe = resource_composer_environment_fmt.Sprintf("projects/(.+)/serviceAccounts/%s", serviceAccountRe) - } - r := resource_composer_environment_regexp.MustCompile(serviceAccountRe) - if !r.MatchString(value) { - errors = append(errors, resource_composer_environment_fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, serviceAccountRe)) - } - - return -} - -func composerImageVersionDiffSuppress(_, old, new string, _ *resource_composer_environment_schema.ResourceData) bool { - versionRe := resource_composer_environment_regexp.MustCompile(composerEnvironmentVersionRegexp) - oldVersions := versionRe.FindStringSubmatch(old) - newVersions := versionRe.FindStringSubmatch(new) - if oldVersions == nil || len(oldVersions) < 4 { - - if old != "" { - resource_composer_environment_log.Printf("[WARN] Composer version didn't match regexp: %s", old) - } - return old == new - } - if newVersions == nil || len(newVersions) < 3 { - - if new != "" { - resource_composer_environment_log.Printf("[WARN] Composer version didn't match regexp: %s", new) - } - return old == new - } - - eq, err := versionsEqual(oldVersions[3], newVersions[3]) - if err != nil { - resource_composer_environment_log.Printf("[WARN] Could not parse airflow version, %s", err) - } - if !eq { - return false - } - - if oldVersions[1] == "latest" || newVersions[1] == "latest" { - return true - } - - eq, err = versionsEqual(oldVersions[1], newVersions[1]) - if err != nil { - resource_composer_environment_log.Printf("[WARN] Could not parse composer version, %s", err) - } - return eq -} - -func versionsEqual(old, new string) (bool, error) { - o, err := resource_composer_environment_version.NewVersion(old) - if err != nil { - return false, err - } - n, err := resource_composer_environment_version.NewVersion(new) - if err != nil { - return false, err - } - return o.Equal(n), nil -} - -func resourceComputeAddress() *resource_compute_address_schema.Resource { - return &resource_compute_address_schema.Resource{ - Create: resourceComputeAddressCreate, - Read: resourceComputeAddressRead, - Delete: resourceComputeAddressDelete, - - Importer: &resource_compute_address_schema.ResourceImporter{ - State: resourceComputeAddressImport, - }, - - Timeouts: &resource_compute_address_schema.ResourceTimeout{ - Create: resource_compute_address_schema.DefaultTimeout(4 * resource_compute_address_time.Minute), - Delete: resource_compute_address_schema.DefaultTimeout(4 * resource_compute_address_time.Minute), - }, - - Schema: map[string]*resource_compute_address_schema.Schema{ - "name": { - Type: resource_compute_address_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' -which means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "address": { - Type: resource_compute_address_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The static external IP address represented by this resource. Only -IPv4 is supported. An address may only be specified for INTERNAL -address types. The IP address must be inside the specified subnetwork, -if any.`, - }, - "address_type": { - Type: resource_compute_address_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_address_validation.StringInSlice([]string{"INTERNAL", "EXTERNAL", ""}, false), - Description: `The type of address to reserve. Default value: "EXTERNAL" Possible values: ["INTERNAL", "EXTERNAL"]`, - Default: "EXTERNAL", - }, - "description": { - Type: resource_compute_address_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "network": { - Type: resource_compute_address_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the network in which to reserve the address. This field -can only be used with INTERNAL type with the VPC_PEERING and -IPSEC_INTERCONNECT purposes.`, - }, - "network_tier": { - Type: resource_compute_address_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_address_validation.StringInSlice([]string{"PREMIUM", "STANDARD", ""}, false), - Description: `The networking tier used for configuring this address. If this field is not -specified, it is assumed to be PREMIUM. Possible values: ["PREMIUM", "STANDARD"]`, - }, - "prefix_length": { - Type: resource_compute_address_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The prefix length if the resource represents an IP range.`, - }, - "purpose": { - Type: resource_compute_address_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The purpose of this resource, which can be one of the following values: - -* GCE_ENDPOINT for addresses that are used by VM instances, alias IP - ranges, internal load balancers, and similar resources. - -* SHARED_LOADBALANCER_VIP for an address that can be used by multiple - internal load balancers. - -* VPC_PEERING for addresses that are reserved for VPC peer networks. - -* IPSEC_INTERCONNECT for addresses created from a private IP range - that are reserved for a VLAN attachment in an IPsec-encrypted Cloud - Interconnect configuration. These addresses are regional resources. - -* PRIVATE_SERVICE_CONNECT for a private network address that is used -to configure Private Service Connect. Only global internal addresses -can use this purpose. - -This should only be set when using an Internal address.`, - }, - "region": { - Type: resource_compute_address_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created address should reside. -If it is not provided, the provider region is used.`, - }, - "subnetwork": { - Type: resource_compute_address_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the subnetwork in which to reserve the address. If an IP -address is specified, it must be within the subnetwork's IP range. -This field can only be used with INTERNAL type with -GCE_ENDPOINT/DNS_RESOLVER purposes.`, - }, - "creation_timestamp": { - Type: resource_compute_address_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "users": { - Type: resource_compute_address_schema.TypeList, - Computed: true, - Description: `The URLs of the resources that are using this address.`, - Elem: &resource_compute_address_schema.Schema{ - Type: resource_compute_address_schema.TypeString, - }, - }, - "project": { - Type: resource_compute_address_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_address_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeAddressCreate(d *resource_compute_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - addressProp, err := expandComputeAddressAddress(d.Get("address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address"); !isEmptyValue(resource_compute_address_reflect.ValueOf(addressProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, addressProp)) { - obj["address"] = addressProp - } - addressTypeProp, err := expandComputeAddressAddressType(d.Get("address_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address_type"); !isEmptyValue(resource_compute_address_reflect.ValueOf(addressTypeProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, addressTypeProp)) { - obj["addressType"] = addressTypeProp - } - descriptionProp, err := expandComputeAddressDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_address_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeAddressName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_address_reflect.ValueOf(nameProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - purposeProp, err := expandComputeAddressPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(resource_compute_address_reflect.ValueOf(purposeProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - networkTierProp, err := expandComputeAddressNetworkTier(d.Get("network_tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_tier"); !isEmptyValue(resource_compute_address_reflect.ValueOf(networkTierProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, networkTierProp)) { - obj["networkTier"] = networkTierProp - } - subnetworkProp, err := expandComputeAddressSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(resource_compute_address_reflect.ValueOf(subnetworkProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, subnetworkProp)) { - obj["subnetwork"] = subnetworkProp - } - networkProp, err := expandComputeAddressNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_address_reflect.ValueOf(networkProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - prefixLengthProp, err := expandComputeAddressPrefixLength(d.Get("prefix_length"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("prefix_length"); !isEmptyValue(resource_compute_address_reflect.ValueOf(prefixLengthProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, prefixLengthProp)) { - obj["prefixLength"] = prefixLengthProp - } - regionProp, err := expandComputeAddressRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_address_reflect.ValueOf(regionProp)) && (ok || !resource_compute_address_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses") - if err != nil { - return err - } - - resource_compute_address_log.Printf("[DEBUG] Creating new Address: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_address_fmt.Errorf("Error fetching project for Address: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_address_schema.TimeoutCreate)) - if err != nil { - return resource_compute_address_fmt.Errorf("Error creating Address: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return resource_compute_address_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Address", userAgent, - d.Timeout(resource_compute_address_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_address_fmt.Errorf("Error waiting to create Address: %s", err) - } - - resource_compute_address_log.Printf("[DEBUG] Finished creating Address %q: %#v", d.Id(), res) - - return resourceComputeAddressRead(d, meta) -} - -func resourceComputeAddressRead(d *resource_compute_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_address_fmt.Errorf("Error fetching project for Address: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_address_fmt.Sprintf("ComputeAddress %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - - if err := d.Set("address", flattenComputeAddressAddress(res["address"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("address_type", flattenComputeAddressAddressType(res["addressType"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeAddressCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("description", flattenComputeAddressDescription(res["description"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("name", flattenComputeAddressName(res["name"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("purpose", flattenComputeAddressPurpose(res["purpose"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("network_tier", flattenComputeAddressNetworkTier(res["networkTier"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("subnetwork", flattenComputeAddressSubnetwork(res["subnetwork"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("users", flattenComputeAddressUsers(res["users"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("network", flattenComputeAddressNetwork(res["network"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("prefix_length", flattenComputeAddressPrefixLength(res["prefixLength"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("region", flattenComputeAddressRegion(res["region"], d, config)); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_address_fmt.Errorf("Error reading Address: %s", err) - } - - return nil -} - -func resourceComputeAddressDelete(d *resource_compute_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_address_fmt.Errorf("Error fetching project for Address: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_address_log.Printf("[DEBUG] Deleting Address %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_address_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Address") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Address", userAgent, - d.Timeout(resource_compute_address_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_address_log.Printf("[DEBUG] Finished deleting Address %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeAddressImport(d *resource_compute_address_schema.ResourceData, meta interface{}) ([]*resource_compute_address_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/addresses/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/addresses/{{name}}") - if err != nil { - return nil, resource_compute_address_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_address_schema.ResourceData{d}, nil -} - -func flattenComputeAddressAddress(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressAddressType(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_address_reflect.ValueOf(v)) { - return "EXTERNAL" - } - - return v -} - -func flattenComputeAddressCreationTimestamp(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressDescription(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressName(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressPurpose(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressNetworkTier(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressSubnetwork(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeAddressUsers(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAddressNetwork(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeAddressPrefixLength(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_address_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAddressRegion(v interface{}, d *resource_compute_address_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeAddressAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressAddressType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressNetworkTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_address_fmt.Errorf("Invalid value for subnetwork: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeAddressNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_address_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeAddressPrefixLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAddressRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_address_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeAttachedDisk() *resource_compute_attached_disk_schema.Resource { - return &resource_compute_attached_disk_schema.Resource{ - Create: resourceAttachedDiskCreate, - Read: resourceAttachedDiskRead, - Delete: resourceAttachedDiskDelete, - - Importer: &resource_compute_attached_disk_schema.ResourceImporter{ - State: resourceAttachedDiskImport, - }, - - Timeouts: &resource_compute_attached_disk_schema.ResourceTimeout{ - Create: resource_compute_attached_disk_schema.DefaultTimeout(300 * resource_compute_attached_disk_time.Second), - Delete: resource_compute_attached_disk_schema.DefaultTimeout(300 * resource_compute_attached_disk_time.Second), - }, - - Schema: map[string]*resource_compute_attached_disk_schema.Schema{ - "disk": { - Type: resource_compute_attached_disk_schema.TypeString, - Required: true, - ForceNew: true, - Description: `name or self_link of the disk that will be attached.`, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "instance": { - Type: resource_compute_attached_disk_schema.TypeString, - Required: true, - ForceNew: true, - Description: `name or self_link of the compute instance that the disk will be attached to. If the self_link is provided then zone and project are extracted from the self link. If only the name is used then zone and project must be defined as properties on the resource or provider.`, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "project": { - Type: resource_compute_attached_disk_schema.TypeString, - ForceNew: true, - Computed: true, - Optional: true, - Description: `The project that the referenced compute instance is a part of. If instance is referenced by its self_link the project defined in the link will take precedence.`, - }, - "zone": { - Type: resource_compute_attached_disk_schema.TypeString, - ForceNew: true, - Computed: true, - Optional: true, - Description: `The zone that the referenced compute instance is located within. If instance is referenced by its self_link the zone defined in the link will take precedence.`, - }, - "device_name": { - Type: resource_compute_attached_disk_schema.TypeString, - ForceNew: true, - Optional: true, - Computed: true, - Description: `Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine.`, - }, - "mode": { - Type: resource_compute_attached_disk_schema.TypeString, - ForceNew: true, - Optional: true, - Default: "READ_WRITE", - Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.`, - ValidateFunc: resource_compute_attached_disk_validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false), - }, - }, - UseJSONNumber: true, - } -} - -func resourceAttachedDiskCreate(d *resource_compute_attached_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) - if err != nil { - return err - } - - disk := d.Get("disk").(string) - diskName := GetResourceNameFromSelfLink(disk) - diskSrc := resource_compute_attached_disk_fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName) - - if resource_compute_attached_disk_strings.Contains(disk, "regions") { - rv, err := ParseRegionDiskFieldValue(disk, d, config) - if err != nil { - return err - } - diskSrc = rv.RelativeLink() - } - - attachedDisk := resource_compute_attached_disk_compute.AttachedDisk{ - Source: diskSrc, - Mode: d.Get("mode").(string), - DeviceName: d.Get("device_name").(string), - } - - op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do() - if err != nil { - return err - } - - d.SetId(resource_compute_attached_disk_fmt.Sprintf("projects/%s/zones/%s/instances/%s/%s", zv.Project, zv.Zone, zv.Name, diskName)) - - waitErr := computeOperationWaitTime(config, op, zv.Project, - "disk to attach", userAgent, d.Timeout(resource_compute_attached_disk_schema.TimeoutCreate)) - if waitErr != nil { - d.SetId("") - return waitErr - } - - return resourceAttachedDiskRead(d, meta) -} - -func resourceAttachedDiskRead(d *resource_compute_attached_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) - if err != nil { - return err - } - if err := d.Set("project", zv.Project); err != nil { - return resource_compute_attached_disk_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", zv.Zone); err != nil { - return resource_compute_attached_disk_fmt.Errorf("Error setting zone: %s", err) - } - - diskName := GetResourceNameFromSelfLink(d.Get("disk").(string)) - - instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_attached_disk_fmt.Sprintf("AttachedDisk %q", d.Id())) - } - - ad := findDiskByName(instance.Disks, diskName) - if ad == nil { - resource_compute_attached_disk_log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.") - d.SetId("") - return nil - } - - if err := d.Set("device_name", ad.DeviceName); err != nil { - return resource_compute_attached_disk_fmt.Errorf("Error setting device_name: %s", err) - } - if err := d.Set("mode", ad.Mode); err != nil { - return resource_compute_attached_disk_fmt.Errorf("Error setting mode: %s", err) - } - - instancePath, err := getRelativePath(instance.SelfLink) - if err != nil { - return err - } - if err := d.Set("instance", instancePath); err != nil { - return resource_compute_attached_disk_fmt.Errorf("Error setting instance: %s", err) - } - diskPath, err := getRelativePath(ad.Source) - if err != nil { - return err - } - if err := d.Set("disk", diskPath); err != nil { - return resource_compute_attached_disk_fmt.Errorf("Error setting disk: %s", err) - } - - return nil -} - -func resourceAttachedDiskDelete(d *resource_compute_attached_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) - if err != nil { - return err - } - - diskName := GetResourceNameFromSelfLink(d.Get("disk").(string)) - - instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() - if err != nil { - return err - } - - ad := findDiskByName(instance.Disks, diskName) - if ad == nil { - return nil - } - - op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(zv.Project, zv.Zone, zv.Name, ad.DeviceName).Do() - if err != nil { - return err - } - - waitErr := computeOperationWaitTime(config, op, zv.Project, - resource_compute_attached_disk_fmt.Sprintf("Detaching disk from %s", zv.Name), userAgent, d.Timeout(resource_compute_attached_disk_schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - return nil -} - -func resourceAttachedDiskImport(d *resource_compute_attached_disk_schema.ResourceData, meta interface{}) ([]*resource_compute_attached_disk_schema.ResourceData, error) { - config := meta.(*Config) - - err := parseImportId( - []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) - if err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{instance}}/{{disk}}") - if err != nil { - return nil, err - } - d.SetId(id) - - return []*resource_compute_attached_disk_schema.ResourceData{d}, nil -} - -func findDiskByName(disks []*resource_compute_attached_disk_compute.AttachedDisk, id string) *resource_compute_attached_disk_compute.AttachedDisk { - for _, disk := range disks { - if compareSelfLinkOrResourceName("", disk.Source, id, nil) { - return disk - } - } - - return nil -} - -func resourceComputeAutoscaler() *resource_compute_autoscaler_schema.Resource { - return &resource_compute_autoscaler_schema.Resource{ - Create: resourceComputeAutoscalerCreate, - Read: resourceComputeAutoscalerRead, - Update: resourceComputeAutoscalerUpdate, - Delete: resourceComputeAutoscalerDelete, - - Importer: &resource_compute_autoscaler_schema.ResourceImporter{ - State: resourceComputeAutoscalerImport, - }, - - Timeouts: &resource_compute_autoscaler_schema.ResourceTimeout{ - Create: resource_compute_autoscaler_schema.DefaultTimeout(4 * resource_compute_autoscaler_time.Minute), - Update: resource_compute_autoscaler_schema.DefaultTimeout(4 * resource_compute_autoscaler_time.Minute), - Delete: resource_compute_autoscaler_schema.DefaultTimeout(4 * resource_compute_autoscaler_time.Minute), - }, - - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "autoscaling_policy": { - Type: resource_compute_autoscaler_schema.TypeList, - Required: true, - Description: `The configuration parameters for the autoscaling algorithm. You can -define one or more of the policies for an autoscaler: cpuUtilization, -customMetricUtilizations, and loadBalancingUtilization. - -If none of these are specified, the default will be to autoscale based -on cpuUtilization to 0.6 or 60%.`, - MaxItems: 1, - Elem: &resource_compute_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "max_replicas": { - Type: resource_compute_autoscaler_schema.TypeInt, - Required: true, - Description: `The maximum number of instances that the autoscaler can scale up -to. This is required when creating or updating an autoscaler. The -maximum number of replicas should not be lower than minimal number -of replicas.`, - }, - "min_replicas": { - Type: resource_compute_autoscaler_schema.TypeInt, - Required: true, - Description: `The minimum number of replicas that the autoscaler can scale down -to. This cannot be less than 0. If not provided, autoscaler will -choose a default value depending on maximum number of instances -allowed.`, - }, - "cooldown_period": { - Type: resource_compute_autoscaler_schema.TypeInt, - Optional: true, - Description: `The number of seconds that the autoscaler should wait before it -starts collecting information from a new instance. This prevents -the autoscaler from collecting information when the instance is -initializing, during which the collected usage would not be -reliable. The default time autoscaler waits is 60 seconds. - -Virtual machine initialization times might vary because of -numerous factors. We recommend that you test how long an -instance may take to initialize. To do this, create an instance -and time the startup process.`, - Default: 60, - }, - "cpu_utilization": { - Type: resource_compute_autoscaler_schema.TypeList, - Computed: true, - Optional: true, - Description: `Defines the CPU utilization policy that allows the autoscaler to -scale based on the average CPU utilization of a managed instance -group.`, - MaxItems: 1, - Elem: &resource_compute_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "target": { - Type: resource_compute_autoscaler_schema.TypeFloat, - Required: true, - Description: `The target CPU utilization that the autoscaler should maintain. -Must be a float value in the range (0, 1]. If not specified, the -default is 0.6. - -If the CPU level is below the target utilization, the autoscaler -scales down the number of instances until it reaches the minimum -number of instances you specified or until the average CPU of -your instances reaches the target utilization. - -If the average CPU is above the target utilization, the autoscaler -scales up until it reaches the maximum number of instances you -specified or until the average utilization reaches the target -utilization.`, - }, - "predictive_method": { - Type: resource_compute_autoscaler_schema.TypeString, - Optional: true, - Description: `Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: - -- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. - -- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.`, - Default: "NONE", - }, - }, - }, - }, - "load_balancing_utilization": { - Type: resource_compute_autoscaler_schema.TypeList, - Optional: true, - Description: `Configuration parameters of autoscaling based on a load balancer.`, - MaxItems: 1, - Elem: &resource_compute_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "target": { - Type: resource_compute_autoscaler_schema.TypeFloat, - Required: true, - Description: `Fraction of backend capacity utilization (set in HTTP(s) load -balancing configuration) that autoscaler should maintain. Must -be a positive float value. If not defined, the default is 0.8.`, - }, - }, - }, - }, - "metric": { - Type: resource_compute_autoscaler_schema.TypeList, - Optional: true, - Description: `Configuration parameters of autoscaling based on a custom metric.`, - Elem: &resource_compute_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "name": { - Type: resource_compute_autoscaler_schema.TypeString, - Required: true, - Description: `The identifier (type) of the Stackdriver Monitoring metric. -The metric cannot have negative values. - -The metric must have a value type of INT64 or DOUBLE.`, - }, - "target": { - Type: resource_compute_autoscaler_schema.TypeFloat, - Optional: true, - Description: `The target value of the metric that autoscaler should -maintain. This must be a positive value. A utilization -metric scales number of virtual machines handling requests -to increase or decrease proportionally to the metric. - -For example, a good metric to use as a utilizationTarget is -www.googleapis.com/compute/instance/network/received_bytes_count. -The autoscaler will work to keep this value constant for each -of the instances.`, - }, - "type": { - Type: resource_compute_autoscaler_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_autoscaler_validation.StringInSlice([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE", ""}, false), - Description: `Defines how target utilization value is expressed for a -Stackdriver Monitoring metric. Possible values: ["GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"]`, - }, - }, - }, - }, - "mode": { - Type: resource_compute_autoscaler_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_autoscaler_validation.StringInSlice([]string{"OFF", "ONLY_UP", "ON", ""}, false), - Description: `Defines operating mode for this policy. Default value: "ON" Possible values: ["OFF", "ONLY_UP", "ON"]`, - Default: "ON", - }, - "scale_in_control": { - Type: resource_compute_autoscaler_schema.TypeList, - Optional: true, - Description: `Defines scale in controls to reduce the risk of response latency -and outages due to abrupt scale-in events`, - MaxItems: 1, - Elem: &resource_compute_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "max_scaled_in_replicas": { - Type: resource_compute_autoscaler_schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "fixed": { - Type: resource_compute_autoscaler_schema.TypeInt, - Optional: true, - Description: `Specifies a fixed number of VM instances. This must be a positive -integer.`, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed", "autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent"}, - }, - "percent": { - Type: resource_compute_autoscaler_schema.TypeInt, - Optional: true, - Description: `Specifies a percentage of instances between 0 to 100%, inclusive. -For example, specify 80 for 80%.`, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed", "autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent"}, - }, - }, - }, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas", "autoscaling_policy.0.scale_in_control.0.time_window_sec"}, - }, - "time_window_sec": { - Type: resource_compute_autoscaler_schema.TypeInt, - Optional: true, - Description: `How long back autoscaling should look when computing recommendations -to include directives regarding slower scale down, as described above.`, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas", "autoscaling_policy.0.scale_in_control.0.time_window_sec"}, - }, - }, - }, - }, - "scaling_schedules": { - Type: resource_compute_autoscaler_schema.TypeSet, - Optional: true, - Description: `Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap.`, - Elem: &resource_compute_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_autoscaler_schema.Schema{ - "name": { - Type: resource_compute_autoscaler_schema.TypeString, - Required: true, - }, - "duration_sec": { - Type: resource_compute_autoscaler_schema.TypeInt, - Required: true, - Description: `The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.`, - }, - "min_required_replicas": { - Type: resource_compute_autoscaler_schema.TypeInt, - Required: true, - Description: `Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.`, - }, - "schedule": { - Type: resource_compute_autoscaler_schema.TypeString, - Required: true, - Description: `The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).`, - }, - "description": { - Type: resource_compute_autoscaler_schema.TypeString, - Optional: true, - Description: `A description of a scaling schedule.`, - }, - "disabled": { - Type: resource_compute_autoscaler_schema.TypeBool, - Optional: true, - Description: `A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.`, - Default: false, - }, - "time_zone": { - Type: resource_compute_autoscaler_schema.TypeString, - Optional: true, - Description: `The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, - Default: "UTC", - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: resource_compute_autoscaler_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource. The name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "target": { - Type: resource_compute_autoscaler_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the managed instance group that this autoscaler will scale.`, - }, - "description": { - Type: resource_compute_autoscaler_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "zone": { - Type: resource_compute_autoscaler_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the zone where the instance group resides.`, - }, - "creation_timestamp": { - Type: resource_compute_autoscaler_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_autoscaler_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_autoscaler_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeAutoscalerCreate(d *resource_compute_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeAutoscalerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(nameProp)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeAutoscalerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - autoscalingPolicyProp, err := expandComputeAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(autoscalingPolicyProp)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, autoscalingPolicyProp)) { - obj["autoscalingPolicy"] = autoscalingPolicyProp - } - targetProp, err := expandComputeAutoscalerTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(targetProp)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - zoneProp, err := expandComputeAutoscalerZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(zoneProp)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers") - if err != nil { - return err - } - - resource_compute_autoscaler_log.Printf("[DEBUG] Creating new Autoscaler: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error fetching project for Autoscaler: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_autoscaler_schema.TimeoutCreate)) - if err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error creating Autoscaler: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") - if err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Autoscaler", userAgent, - d.Timeout(resource_compute_autoscaler_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_autoscaler_fmt.Errorf("Error waiting to create Autoscaler: %s", err) - } - - resource_compute_autoscaler_log.Printf("[DEBUG] Finished creating Autoscaler %q: %#v", d.Id(), res) - - return resourceComputeAutoscalerRead(d, meta) -} - -func resourceComputeAutoscalerRead(d *resource_compute_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error fetching project for Autoscaler: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_autoscaler_fmt.Sprintf("ComputeAutoscaler %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeAutoscalerCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - if err := d.Set("name", flattenComputeAutoscalerName(res["name"], d, config)); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - if err := d.Set("description", flattenComputeAutoscalerDescription(res["description"], d, config)); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - if err := d.Set("autoscaling_policy", flattenComputeAutoscalerAutoscalingPolicy(res["autoscalingPolicy"], d, config)); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - if err := d.Set("target", flattenComputeAutoscalerTarget(res["target"], d, config)); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - if err := d.Set("zone", flattenComputeAutoscalerZone(res["zone"], d, config)); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error reading Autoscaler: %s", err) - } - - return nil -} - -func resourceComputeAutoscalerUpdate(d *resource_compute_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error fetching project for Autoscaler: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandComputeAutoscalerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeAutoscalerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - autoscalingPolicyProp, err := expandComputeAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, autoscalingPolicyProp)) { - obj["autoscalingPolicy"] = autoscalingPolicyProp - } - targetProp, err := expandComputeAutoscalerTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - zoneProp, err := expandComputeAutoscalerZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_autoscaler_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers?autoscaler={{name}}") - if err != nil { - return err - } - - resource_compute_autoscaler_log.Printf("[DEBUG] Updating Autoscaler %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_autoscaler_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error updating Autoscaler %q: %s", d.Id(), err) - } else { - resource_compute_autoscaler_log.Printf("[DEBUG] Finished updating Autoscaler %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Autoscaler", userAgent, - d.Timeout(resource_compute_autoscaler_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeAutoscalerRead(d, meta) -} - -func resourceComputeAutoscalerDelete(d *resource_compute_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_autoscaler_fmt.Errorf("Error fetching project for Autoscaler: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_autoscaler_log.Printf("[DEBUG] Deleting Autoscaler %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_autoscaler_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Autoscaler") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Autoscaler", userAgent, - d.Timeout(resource_compute_autoscaler_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_autoscaler_log.Printf("[DEBUG] Finished deleting Autoscaler %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeAutoscalerImport(d *resource_compute_autoscaler_schema.ResourceData, meta interface{}) ([]*resource_compute_autoscaler_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/autoscalers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}") - if err != nil { - return nil, resource_compute_autoscaler_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_autoscaler_schema.ResourceData{d}, nil -} - -func flattenComputeAutoscalerCreationTimestamp(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerName(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerDescription(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicy(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_replicas"] = - flattenComputeAutoscalerAutoscalingPolicyMinReplicas(original["minNumReplicas"], d, config) - transformed["max_replicas"] = - flattenComputeAutoscalerAutoscalingPolicyMaxReplicas(original["maxNumReplicas"], d, config) - transformed["cooldown_period"] = - flattenComputeAutoscalerAutoscalingPolicyCooldownPeriod(original["coolDownPeriodSec"], d, config) - transformed["mode"] = - flattenComputeAutoscalerAutoscalingPolicyMode(original["mode"], d, config) - transformed["scale_in_control"] = - flattenComputeAutoscalerAutoscalingPolicyScaleInControl(original["scaleInControl"], d, config) - transformed["cpu_utilization"] = - flattenComputeAutoscalerAutoscalingPolicyCpuUtilization(original["cpuUtilization"], d, config) - transformed["metric"] = - flattenComputeAutoscalerAutoscalingPolicyMetric(original["customMetricUtilizations"], d, config) - transformed["load_balancing_utilization"] = - flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["loadBalancingUtilization"], d, config) - transformed["scaling_schedules"] = - flattenComputeAutoscalerAutoscalingPolicyScalingSchedules(original["scalingSchedules"], d, config) - return []interface{}{transformed} -} - -func flattenComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyMode(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_scaled_in_replicas"] = - flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(original["maxScaledInReplicas"], d, config) - transformed["time_window_sec"] = - flattenComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["timeWindowSec"], d, config) - return []interface{}{transformed} -} - -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed"] = - flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(original["fixed"], d, config) - transformed["percent"] = - flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) - return []interface{}{transformed} -} - -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target"] = - flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["utilizationTarget"], d, config) - transformed["predictive_method"] = - flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictiveMethod"], d, config) - return []interface{}{transformed} -} - -func flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_autoscaler_reflect.ValueOf(v)) { - return "NONE" - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenComputeAutoscalerAutoscalingPolicyMetricName(original["metric"], d, config), - "target": flattenComputeAutoscalerAutoscalingPolicyMetricTarget(original["utilizationTarget"], d, config), - "type": flattenComputeAutoscalerAutoscalingPolicyMetricType(original["utilizationTargetType"], d, config), - }) - } - return transformed -} - -func flattenComputeAutoscalerAutoscalingPolicyMetricName(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyMetricType(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target"] = - flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["utilizationTarget"], d, config) - return []interface{}{transformed} -} - -func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "name": k, - "min_required_replicas": flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(original["minRequiredReplicas"], d, config), - "schedule": flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(original["schedule"], d, config), - "time_zone": flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(original["timeZone"], d, config), - "duration_sec": flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(original["durationSec"], d, config), - "disabled": flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(original["disabled"], d, config), - "description": flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(original["description"], d, config), - }) - } - return transformed -} - -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeAutoscalerTarget(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeAutoscalerZone(v interface{}, d *resource_compute_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeAutoscalerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinReplicas, err := expandComputeAutoscalerAutoscalingPolicyMinReplicas(original["min_replicas"], d, config) - if err != nil { - return nil, err - } else { - transformed["minNumReplicas"] = transformedMinReplicas - } - - transformedMaxReplicas, err := expandComputeAutoscalerAutoscalingPolicyMaxReplicas(original["max_replicas"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedMaxReplicas); val.IsValid() && !isEmptyValue(val) { - transformed["maxNumReplicas"] = transformedMaxReplicas - } - - transformedCooldownPeriod, err := expandComputeAutoscalerAutoscalingPolicyCooldownPeriod(original["cooldown_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["coolDownPeriodSec"] = transformedCooldownPeriod - } - - transformedMode, err := expandComputeAutoscalerAutoscalingPolicyMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - transformedScaleInControl, err := expandComputeAutoscalerAutoscalingPolicyScaleInControl(original["scale_in_control"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedScaleInControl); val.IsValid() && !isEmptyValue(val) { - transformed["scaleInControl"] = transformedScaleInControl - } - - transformedCpuUtilization, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilization(original["cpu_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["cpuUtilization"] = transformedCpuUtilization - } - - transformedMetric, err := expandComputeAutoscalerAutoscalingPolicyMetric(original["metric"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedMetric); val.IsValid() && !isEmptyValue(val) { - transformed["customMetricUtilizations"] = transformedMetric - } - - transformedLoadBalancingUtilization, err := expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["load_balancing_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedLoadBalancingUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["loadBalancingUtilization"] = transformedLoadBalancingUtilization - } - - transformedScalingSchedules, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedules(original["scaling_schedules"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedScalingSchedules); val.IsValid() && !isEmptyValue(val) { - transformed["scalingSchedules"] = transformedScalingSchedules - } - - return transformed, nil -} - -func expandComputeAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxScaledInReplicas, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(original["max_scaled_in_replicas"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedMaxScaledInReplicas); val.IsValid() && !isEmptyValue(val) { - transformed["maxScaledInReplicas"] = transformedMaxScaledInReplicas - } - - transformedTimeWindowSec, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["time_window_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedTimeWindowSec); val.IsValid() && !isEmptyValue(val) { - transformed["timeWindowSec"] = transformedTimeWindowSec - } - - return transformed, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixed, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(original["fixed"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedFixed); val.IsValid() && !isEmptyValue(val) { - transformed["fixed"] = transformedFixed - } - - transformedPercent, err := expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - return transformed, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["target"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTarget"] = transformedTarget - } - - transformedPredictiveMethod, err := expandComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictive_method"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedPredictiveMethod); val.IsValid() && !isEmptyValue(val) { - transformed["predictiveMethod"] = transformedPredictiveMethod - } - - return transformed, nil -} - -func expandComputeAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyMetric(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandComputeAutoscalerAutoscalingPolicyMetricName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["metric"] = transformedName - } - - transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyMetricTarget(original["target"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTarget"] = transformedTarget - } - - transformedType, err := expandComputeAutoscalerAutoscalingPolicyMetricType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTargetType"] = transformedType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeAutoscalerAutoscalingPolicyMetricName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyMetricType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTarget, err := expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["target"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTarget"] = transformedTarget - } - - return transformed, nil -} - -func expandComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_compute_autoscaler_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinRequiredReplicas, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(original["min_required_replicas"], d, config) - if err != nil { - return nil, err - } else { - transformed["minRequiredReplicas"] = transformedMinRequiredReplicas - } - - transformedSchedule, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(original["schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["schedule"] = transformedSchedule - } - - transformedTimeZone, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(original["time_zone"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { - transformed["timeZone"] = transformedTimeZone - } - - transformedDurationSec, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(original["duration_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedDurationSec); val.IsValid() && !isEmptyValue(val) { - transformed["durationSec"] = transformedDurationSec - } - - transformedDisabled, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - transformedDescription, err := expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_autoscaler_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedName, err := expandString(original["name"], d, config) - if err != nil { - return nil, err - } - m[transformedName] = transformed - } - return m, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeAutoscalerTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || v.(string) == "" { - return "", nil - } - f, err := parseZonalFieldValue("instanceGroupManagers", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, resource_compute_autoscaler_fmt.Errorf("Invalid value for target: %s", err) - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+f.RelativeLink()) - if err != nil { - return nil, err - } - - return url, nil -} - -func expandComputeAutoscalerZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_autoscaler_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeBackendBucket() *resource_compute_backend_bucket_schema.Resource { - return &resource_compute_backend_bucket_schema.Resource{ - Create: resourceComputeBackendBucketCreate, - Read: resourceComputeBackendBucketRead, - Update: resourceComputeBackendBucketUpdate, - Delete: resourceComputeBackendBucketDelete, - - Importer: &resource_compute_backend_bucket_schema.ResourceImporter{ - State: resourceComputeBackendBucketImport, - }, - - Timeouts: &resource_compute_backend_bucket_schema.ResourceTimeout{ - Create: resource_compute_backend_bucket_schema.DefaultTimeout(4 * resource_compute_backend_bucket_time.Minute), - Update: resource_compute_backend_bucket_schema.DefaultTimeout(4 * resource_compute_backend_bucket_time.Minute), - Delete: resource_compute_backend_bucket_schema.DefaultTimeout(4 * resource_compute_backend_bucket_time.Minute), - }, - - Schema: map[string]*resource_compute_backend_bucket_schema.Schema{ - "bucket_name": { - Type: resource_compute_backend_bucket_schema.TypeString, - Required: true, - Description: `Cloud Storage bucket name.`, - }, - "name": { - Type: resource_compute_backend_bucket_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "cdn_policy": { - Type: resource_compute_backend_bucket_schema.TypeList, - Computed: true, - Optional: true, - Description: `Cloud CDN configuration for this Backend Bucket.`, - MaxItems: 1, - Elem: &resource_compute_backend_bucket_schema.Resource{ - Schema: map[string]*resource_compute_backend_bucket_schema.Schema{ - "cache_mode": { - Type: resource_compute_backend_bucket_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_backend_bucket_validation.StringInSlice([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}, false), - Description: `Specifies the cache setting for all responses from this backend. -The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"]`, - }, - "client_ttl": { - Type: resource_compute_backend_bucket_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "default_ttl": { - Type: resource_compute_backend_bucket_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the default TTL for cached content served by this origin for responses -that do not have an existing valid TTL (max-age or s-max-age).`, - }, - "max_ttl": { - Type: resource_compute_backend_bucket_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "negative_caching": { - Type: resource_compute_backend_bucket_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects.`, - }, - "negative_caching_policy": { - Type: resource_compute_backend_bucket_schema.TypeList, - Optional: true, - Description: `Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. -Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs.`, - Elem: &resource_compute_backend_bucket_schema.Resource{ - Schema: map[string]*resource_compute_backend_bucket_schema.Schema{ - "code": { - Type: resource_compute_backend_bucket_schema.TypeInt, - Optional: true, - Description: `The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 -can be specified as values, and you cannot specify a status code more than once.`, - }, - "ttl": { - Type: resource_compute_backend_bucket_schema.TypeInt, - Optional: true, - Description: `The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s -(30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.`, - }, - }, - }, - }, - "serve_while_stale": { - Type: resource_compute_backend_bucket_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache.`, - }, - "signed_url_cache_max_age_sec": { - Type: resource_compute_backend_bucket_schema.TypeInt, - Optional: true, - Description: `Maximum number of seconds the response to a signed URL request will -be considered fresh. After this time period, -the response will be revalidated before being served. -When serving responses to signed URL requests, -Cloud CDN will internally behave as though -all responses from this backend had a "Cache-Control: public, -max-age=[TTL]" header, regardless of any existing Cache-Control -header. The actual headers served in responses will not be altered.`, - }, - }, - }, - }, - "custom_response_headers": { - Type: resource_compute_backend_bucket_schema.TypeList, - Optional: true, - Description: `Headers that the HTTP/S load balancer should add to proxied responses.`, - Elem: &resource_compute_backend_bucket_schema.Schema{ - Type: resource_compute_backend_bucket_schema.TypeString, - }, - }, - "description": { - Type: resource_compute_backend_bucket_schema.TypeString, - Optional: true, - Description: `An optional textual description of the resource; provided by the -client when the resource is created.`, - }, - "enable_cdn": { - Type: resource_compute_backend_bucket_schema.TypeBool, - Optional: true, - Description: `If true, enable Cloud CDN for this BackendBucket.`, - }, - "creation_timestamp": { - Type: resource_compute_backend_bucket_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_backend_bucket_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_backend_bucket_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeBackendBucketCreate(d *resource_compute_backend_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketNameProp, err := expandComputeBackendBucketBucketName(d.Get("bucket_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_name"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(bucketNameProp)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, bucketNameProp)) { - obj["bucketName"] = bucketNameProp - } - cdnPolicyProp, err := expandComputeBackendBucketCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(cdnPolicyProp)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - customResponseHeadersProp, err := expandComputeBackendBucketCustomResponseHeaders(d.Get("custom_response_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(customResponseHeadersProp)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, customResponseHeadersProp)) { - obj["customResponseHeaders"] = customResponseHeadersProp - } - descriptionProp, err := expandComputeBackendBucketDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableCdnProp, err := expandComputeBackendBucketEnableCdn(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(enableCdnProp)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, enableCdnProp)) { - obj["enableCdn"] = enableCdnProp - } - nameProp, err := expandComputeBackendBucketName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(nameProp)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets") - if err != nil { - return err - } - - resource_compute_backend_bucket_log.Printf("[DEBUG] Creating new BackendBucket: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_bucket_schema.TimeoutCreate)) - if err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error creating BackendBucket: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating BackendBucket", userAgent, - d.Timeout(resource_compute_backend_bucket_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_backend_bucket_fmt.Errorf("Error waiting to create BackendBucket: %s", err) - } - - resource_compute_backend_bucket_log.Printf("[DEBUG] Finished creating BackendBucket %q: %#v", d.Id(), res) - - return resourceComputeBackendBucketRead(d, meta) -} - -func resourceComputeBackendBucketRead(d *resource_compute_backend_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_backend_bucket_fmt.Sprintf("ComputeBackendBucket %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - - if err := d.Set("bucket_name", flattenComputeBackendBucketBucketName(res["bucketName"], d, config)); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("cdn_policy", flattenComputeBackendBucketCdnPolicy(res["cdnPolicy"], d, config)); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("custom_response_headers", flattenComputeBackendBucketCustomResponseHeaders(res["customResponseHeaders"], d, config)); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeBackendBucketCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("description", flattenComputeBackendBucketDescription(res["description"], d, config)); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("enable_cdn", flattenComputeBackendBucketEnableCdn(res["enableCdn"], d, config)); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("name", flattenComputeBackendBucketName(res["name"], d, config)); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error reading BackendBucket: %s", err) - } - - return nil -} - -func resourceComputeBackendBucketUpdate(d *resource_compute_backend_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - bucketNameProp, err := expandComputeBackendBucketBucketName(d.Get("bucket_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_name"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(v)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, bucketNameProp)) { - obj["bucketName"] = bucketNameProp - } - cdnPolicyProp, err := expandComputeBackendBucketCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(v)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - customResponseHeadersProp, err := expandComputeBackendBucketCustomResponseHeaders(d.Get("custom_response_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(v)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, customResponseHeadersProp)) { - obj["customResponseHeaders"] = customResponseHeadersProp - } - descriptionProp, err := expandComputeBackendBucketDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(v)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableCdnProp, err := expandComputeBackendBucketEnableCdn(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(v)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, enableCdnProp)) { - obj["enableCdn"] = enableCdnProp - } - nameProp, err := expandComputeBackendBucketName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_backend_bucket_reflect.ValueOf(v)) && (ok || !resource_compute_backend_bucket_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return err - } - - resource_compute_backend_bucket_log.Printf("[DEBUG] Updating BackendBucket %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_bucket_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error updating BackendBucket %q: %s", d.Id(), err) - } else { - resource_compute_backend_bucket_log.Printf("[DEBUG] Finished updating BackendBucket %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating BackendBucket", userAgent, - d.Timeout(resource_compute_backend_bucket_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeBackendBucketRead(d, meta) -} - -func resourceComputeBackendBucketDelete(d *resource_compute_backend_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_bucket_fmt.Errorf("Error fetching project for BackendBucket: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_backend_bucket_log.Printf("[DEBUG] Deleting BackendBucket %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_bucket_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackendBucket") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting BackendBucket", userAgent, - d.Timeout(resource_compute_backend_bucket_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_backend_bucket_log.Printf("[DEBUG] Finished deleting BackendBucket %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeBackendBucketImport(d *resource_compute_backend_bucket_schema.ResourceData, meta interface{}) ([]*resource_compute_backend_bucket_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/backendBuckets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{name}}") - if err != nil { - return nil, resource_compute_backend_bucket_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_backend_bucket_schema.ResourceData{d}, nil -} - -func flattenComputeBackendBucketBucketName(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicy(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["signed_url_cache_max_age_sec"] = - flattenComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(original["signedUrlCacheMaxAgeSec"], d, config) - transformed["default_ttl"] = - flattenComputeBackendBucketCdnPolicyDefaultTtl(original["defaultTtl"], d, config) - transformed["max_ttl"] = - flattenComputeBackendBucketCdnPolicyMaxTtl(original["maxTtl"], d, config) - transformed["client_ttl"] = - flattenComputeBackendBucketCdnPolicyClientTtl(original["clientTtl"], d, config) - transformed["negative_caching"] = - flattenComputeBackendBucketCdnPolicyNegativeCaching(original["negativeCaching"], d, config) - transformed["negative_caching_policy"] = - flattenComputeBackendBucketCdnPolicyNegativeCachingPolicy(original["negativeCachingPolicy"], d, config) - transformed["cache_mode"] = - flattenComputeBackendBucketCdnPolicyCacheMode(original["cacheMode"], d, config) - transformed["serve_while_stale"] = - flattenComputeBackendBucketCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_bucket_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendBucketCdnPolicyDefaultTtl(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_bucket_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendBucketCdnPolicyMaxTtl(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_bucket_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendBucketCdnPolicyClientTtl(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_bucket_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendBucketCdnPolicyNegativeCaching(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicy(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "code": flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(original["code"], d, config), - "ttl": flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config), - }) - } - return transformed -} - -func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_bucket_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_bucket_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendBucketCdnPolicyCacheMode(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_bucket_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendBucketCustomResponseHeaders(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketCreationTimestamp(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketDescription(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketEnableCdn(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendBucketName(v interface{}, d *resource_compute_backend_bucket_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeBackendBucketBucketName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSignedUrlCacheMaxAgeSec, err := expandComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(original["signed_url_cache_max_age_sec"], d, config) - if err != nil { - return nil, err - } else { - transformed["signedUrlCacheMaxAgeSec"] = transformedSignedUrlCacheMaxAgeSec - } - - transformedDefaultTtl, err := expandComputeBackendBucketCdnPolicyDefaultTtl(original["default_ttl"], d, config) - if err != nil { - return nil, err - } else { - transformed["defaultTtl"] = transformedDefaultTtl - } - - transformedMaxTtl, err := expandComputeBackendBucketCdnPolicyMaxTtl(original["max_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_bucket_reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { - transformed["maxTtl"] = transformedMaxTtl - } - - transformedClientTtl, err := expandComputeBackendBucketCdnPolicyClientTtl(original["client_ttl"], d, config) - if err != nil { - return nil, err - } else { - transformed["clientTtl"] = transformedClientTtl - } - - transformedNegativeCaching, err := expandComputeBackendBucketCdnPolicyNegativeCaching(original["negative_caching"], d, config) - if err != nil { - return nil, err - } else { - transformed["negativeCaching"] = transformedNegativeCaching - } - - transformedNegativeCachingPolicy, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_bucket_reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy - } - - transformedCacheMode, err := expandComputeBackendBucketCdnPolicyCacheMode(original["cache_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_bucket_reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { - transformed["cacheMode"] = transformedCacheMode - } - - transformedServeWhileStale, err := expandComputeBackendBucketCdnPolicyServeWhileStale(original["serve_while_stale"], d, config) - if err != nil { - return nil, err - } else { - transformed["serveWhileStale"] = transformedServeWhileStale - } - - return transformed, nil -} - -func expandComputeBackendBucketCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCode, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(original["code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_bucket_reflect.ValueOf(transformedCode); val.IsValid() && !isEmptyValue(val) { - transformed["code"] = transformedCode - } - - transformedTtl, err := expandComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config) - if err != nil { - return nil, err - } else { - transformed["ttl"] = transformedTtl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCachingPolicyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyNegativeCachingPolicyTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCdnPolicyServeWhileStale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketCustomResponseHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketEnableCdn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendBucketName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeBackendBucketSignedUrlKey() *resource_compute_backend_bucket_signed_url_key_schema.Resource { - return &resource_compute_backend_bucket_signed_url_key_schema.Resource{ - Create: resourceComputeBackendBucketSignedUrlKeyCreate, - Read: resourceComputeBackendBucketSignedUrlKeyRead, - Delete: resourceComputeBackendBucketSignedUrlKeyDelete, - - Timeouts: &resource_compute_backend_bucket_signed_url_key_schema.ResourceTimeout{ - Create: resource_compute_backend_bucket_signed_url_key_schema.DefaultTimeout(4 * resource_compute_backend_bucket_signed_url_key_time.Minute), - Delete: resource_compute_backend_bucket_signed_url_key_schema.DefaultTimeout(4 * resource_compute_backend_bucket_signed_url_key_time.Minute), - }, - - Schema: map[string]*resource_compute_backend_bucket_signed_url_key_schema.Schema{ - "backend_bucket": { - Type: resource_compute_backend_bucket_signed_url_key_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend bucket this signed URL key belongs.`, - }, - "key_value": { - Type: resource_compute_backend_bucket_signed_url_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `128-bit key value used for signing the URL. The key value must be a -valid RFC 4648 Section 5 base64url encoded string.`, - Sensitive: true, - }, - "name": { - Type: resource_compute_backend_bucket_signed_url_key_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the signed URL key.`, - }, - "project": { - Type: resource_compute_backend_bucket_signed_url_key_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeBackendBucketSignedUrlKeyCreate(d *resource_compute_backend_bucket_signed_url_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - keyNameProp, err := expandNestedComputeBackendBucketSignedUrlKeyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_backend_bucket_signed_url_key_reflect.ValueOf(keyNameProp)) && (ok || !resource_compute_backend_bucket_signed_url_key_reflect.DeepEqual(v, keyNameProp)) { - obj["keyName"] = keyNameProp - } - keyValueProp, err := expandNestedComputeBackendBucketSignedUrlKeyKeyValue(d.Get("key_value"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("key_value"); !isEmptyValue(resource_compute_backend_bucket_signed_url_key_reflect.ValueOf(keyValueProp)) && (ok || !resource_compute_backend_bucket_signed_url_key_reflect.DeepEqual(v, keyValueProp)) { - obj["keyValue"] = keyValueProp - } - backendBucketProp, err := expandNestedComputeBackendBucketSignedUrlKeyBackendBucket(d.Get("backend_bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_bucket"); !isEmptyValue(resource_compute_backend_bucket_signed_url_key_reflect.ValueOf(backendBucketProp)) && (ok || !resource_compute_backend_bucket_signed_url_key_reflect.DeepEqual(v, backendBucketProp)) { - obj["backendBucket"] = backendBucketProp - } - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}/addSignedUrlKey") - if err != nil { - return err - } - - resource_compute_backend_bucket_signed_url_key_log.Printf("[DEBUG] Creating new BackendBucketSignedUrlKey: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_bucket_signed_url_key_schema.TimeoutCreate)) - if err != nil { - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error creating BackendBucketSignedUrlKey: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/backendBuckets/{{backend_bucket}}") - if err != nil { - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating BackendBucketSignedUrlKey", userAgent, - d.Timeout(resource_compute_backend_bucket_signed_url_key_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error waiting to create BackendBucketSignedUrlKey: %s", err) - } - - resource_compute_backend_bucket_signed_url_key_log.Printf("[DEBUG] Finished creating BackendBucketSignedUrlKey %q: %#v", d.Id(), res) - - return resourceComputeBackendBucketSignedUrlKeyRead(d, meta) -} - -func resourceComputeBackendBucketSignedUrlKeyRead(d *resource_compute_backend_bucket_signed_url_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_backend_bucket_signed_url_key_fmt.Sprintf("ComputeBackendBucketSignedUrlKey %q", d.Id())) - } - - res, err = flattenNestedComputeBackendBucketSignedUrlKey(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_backend_bucket_signed_url_key_log.Printf("[DEBUG] Removing ComputeBackendBucketSignedUrlKey because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error reading BackendBucketSignedUrlKey: %s", err) - } - - if err := d.Set("name", flattenNestedComputeBackendBucketSignedUrlKeyName(res["keyName"], d, config)); err != nil { - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error reading BackendBucketSignedUrlKey: %s", err) - } - - return nil -} - -func resourceComputeBackendBucketSignedUrlKeyDelete(d *resource_compute_backend_bucket_signed_url_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Error fetching project for BackendBucketSignedUrlKey: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendBuckets/{{backend_bucket}}/deleteSignedUrlKey?keyName={{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_backend_bucket_signed_url_key_log.Printf("[DEBUG] Deleting BackendBucketSignedUrlKey %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_bucket_signed_url_key_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackendBucketSignedUrlKey") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting BackendBucketSignedUrlKey", userAgent, - d.Timeout(resource_compute_backend_bucket_signed_url_key_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_backend_bucket_signed_url_key_log.Printf("[DEBUG] Finished deleting BackendBucketSignedUrlKey %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedComputeBackendBucketSignedUrlKeyName(v interface{}, d *resource_compute_backend_bucket_signed_url_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeBackendBucketSignedUrlKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendBucketSignedUrlKeyKeyValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendBucketSignedUrlKeyBackendBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendBuckets", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_backend_bucket_signed_url_key_fmt.Errorf("Invalid value for backend_bucket: %s", err) - } - return f.RelativeLink(), nil -} - -func flattenNestedComputeBackendBucketSignedUrlKey(d *resource_compute_backend_bucket_signed_url_key_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["cdnPolicy"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["signedUrlKeyNames"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_backend_bucket_signed_url_key_fmt.Errorf("expected list or map for value cdnPolicy.signedUrlKeyNames. Actual value: %v", v) - } - - _, item, err := resourceComputeBackendBucketSignedUrlKeyFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeBackendBucketSignedUrlKeyFindNestedObjectInList(d *resource_compute_backend_bucket_signed_url_key_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeBackendBucketSignedUrlKeyName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeBackendBucketSignedUrlKeyName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - - item := map[string]interface{}{ - "keyName": itemRaw, - } - - itemName := flattenNestedComputeBackendBucketSignedUrlKeyName(item["keyName"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_backend_bucket_signed_url_key_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_backend_bucket_signed_url_key_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_backend_bucket_signed_url_key_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_backend_bucket_signed_url_key_log.Printf("[DEBUG] Skipping item with keyName= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_backend_bucket_signed_url_key_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func suppressWhenDisabled(k, old, new string, d *resource_compute_backend_service_schema.ResourceData) bool { - _, n := d.GetChange("log_config.0.enable") - if isEmptyValue(resource_compute_backend_service_reflect.ValueOf(n)) { - return true - } - return false -} - -func isNegBackend(backend map[string]interface{}) bool { - backendGroup, ok := backend["group"] - if !ok { - return false - } - - match, err := resource_compute_backend_service_regexp.MatchString("(?:global|regions/[^/]+)/networkEndpointGroups", backendGroup.(string)) - if err != nil { - - return false - } - return match -} - -func resourceGoogleComputeBackendServiceBackendHash(v interface{}) int { - if v == nil { - return 0 - } - - var buf resource_compute_backend_service_bytes.Buffer - m := v.(map[string]interface{}) - resource_compute_backend_service_log.Printf("[DEBUG] hashing %v", m) - - if group, err := getRelativePath(m["group"].(string)); err != nil { - resource_compute_backend_service_log.Printf("[WARN] Error on retrieving relative path of instance group: %s", err) - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%s-", m["group"].(string))) - } else { - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%s-", group)) - } - - if v, ok := m["balancing_mode"]; ok { - if v == nil { - v = "" - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%v-", v)) - } - if v, ok := m["capacity_scaler"]; ok { - if v == nil { - v = 0.0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%f-", v.(float64))) - } - if v, ok := m["description"]; ok { - if v == nil { - v = "" - } - - resource_compute_backend_service_log.Printf("[DEBUG] writing description %s", v) - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%v-", v)) - } - if v, ok := m["max_rate"]; ok { - if v == nil { - v = 0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%v-", v)) - } - if v, ok := m["max_rate_per_instance"]; ok { - if v == nil { - v = 0.0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%f-", v.(float64))) - } - if v, ok := m["max_connections"]; ok { - if v == nil { - v = 0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%v-", v)) - } - if v, ok := m["max_connections_per_instance"]; ok { - if v == nil { - v = 0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%v-", v)) - } - if v, ok := m["max_rate_per_instance"]; ok { - if v == nil { - v = 0.0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%f-", v.(float64))) - } - if v, ok := m["max_connections_per_endpoint"]; ok { - if v == nil { - v = 0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%v-", v)) - } - if v, ok := m["max_rate_per_endpoint"]; ok { - if v == nil { - v = 0.0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%f-", v.(float64))) - } - if v, ok := m["max_utilization"]; ok && !isNegBackend(m) { - if v == nil { - v = 0.0 - } - - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%f-", v.(float64))) - } - - if v, ok := m["failover"]; ok { - if v == nil { - v = false - } - buf.WriteString(resource_compute_backend_service_fmt.Sprintf("%v-", v.(bool))) - } - - resource_compute_backend_service_log.Printf("[DEBUG] computed hash value of %v from %v", hashcode(buf.String()), buf.String()) - return hashcode(buf.String()) -} - -func resourceComputeBackendService() *resource_compute_backend_service_schema.Resource { - return &resource_compute_backend_service_schema.Resource{ - Create: resourceComputeBackendServiceCreate, - Read: resourceComputeBackendServiceRead, - Update: resourceComputeBackendServiceUpdate, - Delete: resourceComputeBackendServiceDelete, - - Importer: &resource_compute_backend_service_schema.ResourceImporter{ - State: resourceComputeBackendServiceImport, - }, - - Timeouts: &resource_compute_backend_service_schema.ResourceTimeout{ - Create: resource_compute_backend_service_schema.DefaultTimeout(4 * resource_compute_backend_service_time.Minute), - Update: resource_compute_backend_service_schema.DefaultTimeout(4 * resource_compute_backend_service_time.Minute), - Delete: resource_compute_backend_service_schema.DefaultTimeout(4 * resource_compute_backend_service_time.Minute), - }, - - SchemaVersion: 1, - - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "name": { - Type: resource_compute_backend_service_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "affinity_cookie_ttl_sec": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Lifetime of cookies in seconds if session_affinity is -GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts -only until the end of the browser session (or equivalent). The -maximum allowed value for TTL is one day. - -When the load balancing scheme is INTERNAL, this field is not used.`, - }, - "backend": { - Type: resource_compute_backend_service_schema.TypeSet, - Optional: true, - Description: `The set of backends that serve this BackendService.`, - Elem: computeBackendServiceBackendSchema(), - Set: resourceGoogleComputeBackendServiceBackendHash, - }, - "cdn_policy": { - Type: resource_compute_backend_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `Cloud CDN configuration for this BackendService.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "cache_key_policy": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `The CacheKeyPolicy for this CdnPolicy.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "include_host": { - Type: resource_compute_backend_service_schema.TypeBool, - Optional: true, - Description: `If true requests to different hosts will be cached separately.`, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "include_protocol": { - Type: resource_compute_backend_service_schema.TypeBool, - Optional: true, - Description: `If true, http and https requests will be cached separately.`, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "include_query_string": { - Type: resource_compute_backend_service_schema.TypeBool, - Optional: true, - Description: `If true, include query string parameters in the cache key -according to query_string_whitelist and -query_string_blacklist. If neither is set, the entire query -string will be included. - -If false, the query string will be excluded from the cache -key entirely.`, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "query_string_blacklist": { - Type: resource_compute_backend_service_schema.TypeSet, - Optional: true, - Description: `Names of query string parameters to exclude in cache keys. - -All other parameters will be included. Either specify -query_string_whitelist or query_string_blacklist, not both. -'&' and '=' will be percent encoded and not treated as -delimiters.`, - Elem: &resource_compute_backend_service_schema.Schema{ - Type: resource_compute_backend_service_schema.TypeString, - }, - Set: resource_compute_backend_service_schema.HashString, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "query_string_whitelist": { - Type: resource_compute_backend_service_schema.TypeSet, - Optional: true, - Description: `Names of query string parameters to include in cache keys. - -All other parameters will be excluded. Either specify -query_string_whitelist or query_string_blacklist, not both. -'&' and '=' will be percent encoded and not treated as -delimiters.`, - Elem: &resource_compute_backend_service_schema.Schema{ - Type: resource_compute_backend_service_schema.TypeString, - }, - Set: resource_compute_backend_service_schema.HashString, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - }, - }, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy", "cdn_policy.0.signed_url_cache_max_age_sec"}, - }, - "cache_mode": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_backend_service_validation.StringInSlice([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}, false), - Description: `Specifies the cache setting for all responses from this backend. -The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"]`, - }, - "client_ttl": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "default_ttl": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the default TTL for cached content served by this origin for responses -that do not have an existing valid TTL (max-age or s-max-age).`, - }, - "max_ttl": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "negative_caching": { - Type: resource_compute_backend_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects.`, - }, - "negative_caching_policy": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. -Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs.`, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "code": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 -can be specified as values, and you cannot specify a status code more than once.`, - }, - "ttl": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s -(30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL.`, - }, - }, - }, - }, - "serve_while_stale": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache.`, - }, - "signed_url_cache_max_age_sec": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Maximum number of seconds the response to a signed URL request -will be considered fresh, defaults to 1hr (3600s). After this -time period, the response will be revalidated before -being served. - -When serving responses to signed URL requests, Cloud CDN will -internally behave as though all responses from this backend had a -"Cache-Control: public, max-age=[TTL]" header, regardless of any -existing Cache-Control header. The actual headers served in -responses will not be altered.`, - Default: 3600, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy", "cdn_policy.0.signed_url_cache_max_age_sec"}, - }, - }, - }, - }, - "circuit_breakers": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Settings controlling the volume of connections to a backend service. This field -is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "max_connections": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of connections to the backend cluster. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_pending_requests": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of pending requests to the backend cluster. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_requests": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of parallel requests to the backend cluster. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_requests_per_connection": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Maximum requests for a single backend connection. This parameter -is respected by both the HTTP/1.1 and HTTP/2 implementations. If -not specified, there is no limit. Setting this parameter to 1 -will effectively disable keep alive.`, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_retries": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of parallel retries to the backend cluster. -Defaults to 3.`, - Default: 3, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - }, - }, - }, - "connection_draining_timeout_sec": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Time for which instance will be drained (not accept new -connections, but still work to finish started).`, - Default: 300, - }, - - "consistent_hash": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Consistent Hash-based load balancing can be used to provide soft session -affinity based on HTTP headers, cookies or other properties. This load balancing -policy is applicable only for HTTP connections. The affinity to a particular -destination host will be lost when one or more hosts are added/removed from the -destination service. This field specifies parameters that control consistent -hashing. This field only applies if the load_balancing_scheme is set to -INTERNAL_SELF_MANAGED. This field is only applicable when locality_lb_policy is -set to MAGLEV or RING_HASH.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "http_cookie": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Hash is based on HTTP Cookie. This field describes a HTTP cookie -that will be used as the hash key for the consistent hash load -balancer. If the cookie is not present, it will be generated. -This field is applicable if the sessionAffinity is set to HTTP_COOKIE.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "name": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - Description: `Name of the cookie.`, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie.0.ttl", "consistent_hash.0.http_cookie.0.name", "consistent_hash.0.http_cookie.0.path"}, - }, - "path": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - Description: `Path to set for the cookie.`, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie.0.ttl", "consistent_hash.0.http_cookie.0.name", "consistent_hash.0.http_cookie.0.path"}, - }, - "ttl": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Lifetime of the cookie.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "seconds": { - Type: resource_compute_backend_service_schema.TypeInt, - Required: true, - Description: `Span of time at a resolution of a second. -Must be from 0 to 315,576,000,000 inclusive.`, - }, - "nanos": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond -resolution. Durations less than one second are represented -with a 0 seconds field and a positive nanos field. Must -be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie.0.ttl", "consistent_hash.0.http_cookie.0.name", "consistent_hash.0.http_cookie.0.path"}, - }, - }, - }, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie", "consistent_hash.0.http_header_name", "consistent_hash.0.minimum_ring_size"}, - }, - "http_header_name": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - Description: `The hash based on the value of the specified header field. -This field is applicable if the sessionAffinity is set to HEADER_FIELD.`, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie", "consistent_hash.0.http_header_name", "consistent_hash.0.minimum_ring_size"}, - }, - "minimum_ring_size": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The minimum number of virtual nodes to use for the hash ring. -Larger ring sizes result in more granular load -distributions. If the number of hosts in the load balancing pool -is larger than the ring size, each host will be assigned a single -virtual node. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie", "consistent_hash.0.http_header_name", "consistent_hash.0.minimum_ring_size"}, - }, - }, - }, - }, - "custom_request_headers": { - Type: resource_compute_backend_service_schema.TypeSet, - Optional: true, - Description: `Headers that the HTTP/S load balancer should add to proxied -requests.`, - Elem: &resource_compute_backend_service_schema.Schema{ - Type: resource_compute_backend_service_schema.TypeString, - }, - Set: resource_compute_backend_service_schema.HashString, - }, - "custom_response_headers": { - Type: resource_compute_backend_service_schema.TypeSet, - Optional: true, - Description: `Headers that the HTTP/S load balancer should add to proxied -responses.`, - Elem: &resource_compute_backend_service_schema.Schema{ - Type: resource_compute_backend_service_schema.TypeString, - }, - Set: resource_compute_backend_service_schema.HashString, - }, - "description": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "enable_cdn": { - Type: resource_compute_backend_service_schema.TypeBool, - Optional: true, - Description: `If true, enable Cloud CDN for this BackendService.`, - }, - "health_checks": { - Type: resource_compute_backend_service_schema.TypeSet, - Optional: true, - Description: `The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource -for health checking this BackendService. Currently at most one health -check can be specified. - -A health check must be specified unless the backend service uses an internet -or serverless NEG as a backend. - -For internal load balancing, a URL to a HealthCheck resource must be specified instead.`, - MinItems: 1, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Schema{ - Type: resource_compute_backend_service_schema.TypeString, - }, - Set: selfLinkRelativePathHash, - }, - "iap": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Settings for enabling Cloud Identity Aware Proxy`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "oauth2_client_id": { - Type: resource_compute_backend_service_schema.TypeString, - Required: true, - Description: `OAuth2 Client ID for IAP`, - }, - "oauth2_client_secret": { - Type: resource_compute_backend_service_schema.TypeString, - Required: true, - Description: `OAuth2 Client Secret for IAP`, - Sensitive: true, - }, - "oauth2_client_secret_sha256": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - Description: `OAuth2 Client Secret SHA-256 for IAP`, - Sensitive: true, - }, - }, - }, - }, - "load_balancing_scheme": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_backend_service_validation.StringInSlice([]string{"EXTERNAL", "INTERNAL_SELF_MANAGED", ""}, false), - Description: `Indicates whether the backend service will be used with internal or -external load balancing. A backend service created for one type of -load balancing cannot be used with the other. Default value: "EXTERNAL" Possible values: ["EXTERNAL", "INTERNAL_SELF_MANAGED"]`, - Default: "EXTERNAL", - }, - "locality_lb_policy": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_backend_service_validation.StringInSlice([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV", ""}, false), - Description: `The load balancing algorithm used within the scope of the locality. -The possible values are - - -* ROUND_ROBIN - This is a simple policy in which each healthy backend - is selected in round robin order. - -* LEAST_REQUEST - An O(1) algorithm which selects two random healthy - hosts and picks the host which has fewer active requests. - -* RING_HASH - The ring/modulo hash load balancer implements consistent - hashing to backends. The algorithm has the property that the - addition/removal of a host from a set of N hosts only affects - 1/N of the requests. - -* RANDOM - The load balancer selects a random healthy host. - -* ORIGINAL_DESTINATION - Backend host is selected based on the client - connection metadata, i.e., connections are opened - to the same address as the destination address of - the incoming connection before the connection - was redirected to the load balancer. - -* MAGLEV - used as a drop in replacement for the ring hash load balancer. - Maglev is not as stable as ring hash but has faster table lookup - build times and host selection times. For more information about - Maglev, refer to https://ai.google/research/pubs/pub44824 - -This field is applicable only when the load_balancing_scheme is set to -INTERNAL_SELF_MANAGED. Possible values: ["ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV"]`, - }, - "log_config": { - Type: resource_compute_backend_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `This field denotes the logging options for the load balancer traffic served by this backend service. -If logging is enabled, logs will be exported to Stackdriver.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "enable": { - Type: resource_compute_backend_service_schema.TypeBool, - Optional: true, - Description: `Whether to enable logging for the load balancer traffic served by this backend service.`, - AtLeastOneOf: []string{"log_config.0.enable", "log_config.0.sample_rate"}, - }, - "sample_rate": { - Type: resource_compute_backend_service_schema.TypeFloat, - Optional: true, - DiffSuppressFunc: suppressWhenDisabled, - Description: `This field can only be specified if logging is enabled for this backend service. The value of -the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer -where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. -The default value is 1.0.`, - AtLeastOneOf: []string{"log_config.0.enable", "log_config.0.sample_rate"}, - }, - }, - }, - }, - "outlier_detection": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Settings controlling eviction of unhealthy hosts from the load balancing pool. -This field is applicable only when the load_balancing_scheme is set -to INTERNAL_SELF_MANAGED.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "base_ejection_time": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `The base time that a host is ejected for. The real time is equal to the base -time multiplied by the number of times the host has been ejected. Defaults to -30000ms or 30s.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "seconds": { - Type: resource_compute_backend_service_schema.TypeInt, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "consecutive_errors": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Number of errors before a host is ejected from the connection pool. When the -backend host is accessed over HTTP, a 5xx return code qualifies as an error. -Defaults to 5.`, - Default: 5, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "consecutive_gateway_failure": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The number of consecutive gateway failures (502, 503, 504 status or connection -errors that are mapped to one of those status codes) before a consecutive -gateway failure ejection occurs. Defaults to 5.`, - Default: 5, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "enforcing_consecutive_errors": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The percentage chance that a host will be actually ejected when an outlier -status is detected through consecutive 5xx. This setting can be used to disable -ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "enforcing_consecutive_gateway_failure": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The percentage chance that a host will be actually ejected when an outlier -status is detected through consecutive gateway failures. This setting can be -used to disable ejection or to ramp it up slowly. Defaults to 0.`, - Default: 0, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "enforcing_success_rate": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The percentage chance that a host will be actually ejected when an outlier -status is detected through success rate statistics. This setting can be used to -disable ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "interval": { - Type: resource_compute_backend_service_schema.TypeList, - Optional: true, - Description: `Time interval between ejection sweep analysis. This can result in both new -ejections as well as hosts being returned to service. Defaults to 10 seconds.`, - MaxItems: 1, - Elem: &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "seconds": { - Type: resource_compute_backend_service_schema.TypeInt, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "max_ejection_percent": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `Maximum percentage of hosts in the load balancing pool for the backend service -that can be ejected. Defaults to 10%.`, - Default: 10, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "success_rate_minimum_hosts": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The number of hosts in a cluster that must have enough request volume to detect -success rate outliers. If the number of hosts is less than this setting, outlier -detection via success rate statistics is not performed for any host in the -cluster. Defaults to 5.`, - Default: 5, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "success_rate_request_volume": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `The minimum number of total requests that must be collected in one interval (as -defined by the interval duration above) to include this host in success rate -based outlier detection. If the volume is lower than this setting, outlier -detection via success rate statistics is not performed for that host. Defaults -to 100.`, - Default: 100, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "success_rate_stdev_factor": { - Type: resource_compute_backend_service_schema.TypeInt, - Optional: true, - Description: `This factor is used to determine the ejection threshold for success rate outlier -ejection. The ejection threshold is the difference between the mean success -rate, and the product of this factor and the standard deviation of the mean -success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided -by a thousand to get a double. That is, if the desired factor is 1.9, the -runtime value should be 1900. Defaults to 1900.`, - Default: 1900, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - }, - }, - }, - "port_name": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `Name of backend port. The same name should appear in the instance -groups referenced by this service. Required when the load balancing -scheme is EXTERNAL.`, - }, - "protocol": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_backend_service_validation.StringInSlice([]string{"HTTP", "HTTPS", "HTTP2", "TCP", "SSL", "GRPC", ""}, false), - Description: `The protocol this BackendService uses to communicate with backends. -The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer -types and may result in errors if used with the GA API. Possible values: ["HTTP", "HTTPS", "HTTP2", "TCP", "SSL", "GRPC"]`, - }, - "security_policy": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The security policy associated with this backend service.`, - }, - "session_affinity": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_backend_service_validation.StringInSlice([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", ""}, false), - Description: `Type of session affinity to use. The default is NONE. Session affinity is -not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE"]`, - }, - "timeout_sec": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `How many seconds to wait for the backend before considering it a -failed request. Default is 30 seconds. Valid range is [1, 86400].`, - }, - "creation_timestamp": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "fingerprint": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. A hash of the contents stored in this -object. This field is used in optimistic locking.`, - }, - "project": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_backend_service_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeBackendServiceBackendSchema() *resource_compute_backend_service_schema.Resource { - return &resource_compute_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_backend_service_schema.Schema{ - "group": { - Type: resource_compute_backend_service_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The fully-qualified URL of an Instance Group or Network Endpoint -Group resource. In case of instance group this defines the list -of instances that serve traffic. Member virtual machine -instances from each instance group must live in the same zone as -the instance group itself. No two backends in a backend service -are allowed to use same Instance Group resource. - -For Network Endpoint Groups this defines list of endpoints. All -endpoints of Network Endpoint Group must be hosted on instances -located in the same zone as the Network Endpoint Group. - -Backend services cannot mix Instance Group and -Network Endpoint Group backends. - -Note that you must specify an Instance Group or Network Endpoint -Group resource using the fully-qualified URL, rather than a -partial URL.`, - }, - "balancing_mode": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_backend_service_validation.StringInSlice([]string{"UTILIZATION", "RATE", "CONNECTION", ""}, false), - Description: `Specifies the balancing mode for this backend. - -For global HTTP(S) or TCP/SSL load balancing, the default is -UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) -and CONNECTION (for TCP/SSL). Default value: "UTILIZATION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, - Default: "UTILIZATION", - }, - "capacity_scaler": { - Type: resource_compute_backend_service_schema.TypeFloat, - Optional: true, - Description: `A multiplier applied to the group's maximum servicing capacity -(based on UTILIZATION, RATE or CONNECTION). - -Default value is 1, which means the group will serve up to 100% -of its configured capacity (depending on balancingMode). A -setting of 0 means the group is completely drained, offering -0% of its available Capacity. Valid range is [0.0,1.0].`, - Default: 1.0, - }, - "description": { - Type: resource_compute_backend_service_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. -Provide this property when you create the resource.`, - }, - "max_connections": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The max number of simultaneous connections for the group. Can -be used with either CONNECTION or UTILIZATION balancing modes. - -For CONNECTION mode, either maxConnections or one -of maxConnectionsPerInstance or maxConnectionsPerEndpoint, -as appropriate for group type, must be set.`, - }, - "max_connections_per_endpoint": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The max number of simultaneous connections that a single backend -network endpoint can handle. This is used to calculate the -capacity of the group. Can be used in either CONNECTION or -UTILIZATION balancing modes. - -For CONNECTION mode, either -maxConnections or maxConnectionsPerEndpoint must be set.`, - }, - "max_connections_per_instance": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The max number of simultaneous connections that a single -backend instance can handle. This is used to calculate the -capacity of the group. Can be used in either CONNECTION or -UTILIZATION balancing modes. - -For CONNECTION mode, either maxConnections or -maxConnectionsPerInstance must be set.`, - }, - "max_rate": { - Type: resource_compute_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The max requests per second (RPS) of the group. - -Can be used with either RATE or UTILIZATION balancing modes, -but required if RATE mode. For RATE mode, either maxRate or one -of maxRatePerInstance or maxRatePerEndpoint, as appropriate for -group type, must be set.`, - }, - "max_rate_per_endpoint": { - Type: resource_compute_backend_service_schema.TypeFloat, - Computed: true, - Optional: true, - Description: `The max requests per second (RPS) that a single backend network -endpoint can handle. This is used to calculate the capacity of -the group. Can be used in either balancing mode. For RATE mode, -either maxRate or maxRatePerEndpoint must be set.`, - }, - "max_rate_per_instance": { - Type: resource_compute_backend_service_schema.TypeFloat, - Computed: true, - Optional: true, - Description: `The max requests per second (RPS) that a single backend -instance can handle. This is used to calculate the capacity of -the group. Can be used in either balancing mode. For RATE mode, -either maxRate or maxRatePerInstance must be set.`, - }, - "max_utilization": { - Type: resource_compute_backend_service_schema.TypeFloat, - Computed: true, - Optional: true, - Description: `Used when balancingMode is UTILIZATION. This ratio defines the -CPU utilization target for the group. Valid range is [0.0, 1.0].`, - }, - }, - } -} - -func resourceComputeBackendServiceCreate(d *resource_compute_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - affinityCookieTtlSecProp, err := expandComputeBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(affinityCookieTtlSecProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, affinityCookieTtlSecProp)) { - obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp - } - backendsProp, err := expandComputeBackendServiceBackend(d.Get("backend"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(backendsProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, backendsProp)) { - obj["backends"] = backendsProp - } - circuitBreakersProp, err := expandComputeBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(circuitBreakersProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, circuitBreakersProp)) { - obj["circuitBreakers"] = circuitBreakersProp - } - consistentHashProp, err := expandComputeBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(consistentHashProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, consistentHashProp)) { - obj["consistentHash"] = consistentHashProp - } - cdnPolicyProp, err := expandComputeBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(cdnPolicyProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - connectionDrainingProp, err := expandComputeBackendServiceConnectionDraining(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(connectionDrainingProp)) { - obj["connectionDraining"] = connectionDrainingProp - } - customRequestHeadersProp, err := expandComputeBackendServiceCustomRequestHeaders(d.Get("custom_request_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_request_headers"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(customRequestHeadersProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, customRequestHeadersProp)) { - obj["customRequestHeaders"] = customRequestHeadersProp - } - customResponseHeadersProp, err := expandComputeBackendServiceCustomResponseHeaders(d.Get("custom_response_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(customResponseHeadersProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, customResponseHeadersProp)) { - obj["customResponseHeaders"] = customResponseHeadersProp - } - fingerprintProp, err := expandComputeBackendServiceFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(fingerprintProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - descriptionProp, err := expandComputeBackendServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableCDNProp, err := expandComputeBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(enableCDNProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, enableCDNProp)) { - obj["enableCDN"] = enableCDNProp - } - healthChecksProp, err := expandComputeBackendServiceHealthChecks(d.Get("health_checks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(healthChecksProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, healthChecksProp)) { - obj["healthChecks"] = healthChecksProp - } - iapProp, err := expandComputeBackendServiceIap(d.Get("iap"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("iap"); ok || !resource_compute_backend_service_reflect.DeepEqual(v, iapProp) { - obj["iap"] = iapProp - } - loadBalancingSchemeProp, err := expandComputeBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, loadBalancingSchemeProp)) { - obj["loadBalancingScheme"] = loadBalancingSchemeProp - } - localityLbPolicyProp, err := expandComputeBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(localityLbPolicyProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, localityLbPolicyProp)) { - obj["localityLbPolicy"] = localityLbPolicyProp - } - nameProp, err := expandComputeBackendServiceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(nameProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - outlierDetectionProp, err := expandComputeBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(outlierDetectionProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, outlierDetectionProp)) { - obj["outlierDetection"] = outlierDetectionProp - } - portNameProp, err := expandComputeBackendServicePortName(d.Get("port_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(portNameProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, portNameProp)) { - obj["portName"] = portNameProp - } - protocolProp, err := expandComputeBackendServiceProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(protocolProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - securityPolicyProp, err := expandComputeBackendServiceSecurityPolicy(d.Get("security_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_policy"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(securityPolicyProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, securityPolicyProp)) { - obj["securityPolicy"] = securityPolicyProp - } - sessionAffinityProp, err := expandComputeBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(sessionAffinityProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, sessionAffinityProp)) { - obj["sessionAffinity"] = sessionAffinityProp - } - timeoutSecProp, err := expandComputeBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(timeoutSecProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - logConfigProp, err := expandComputeBackendServiceLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(logConfigProp)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - - obj, err = resourceComputeBackendServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices") - if err != nil { - return err - } - - resource_compute_backend_service_log.Printf("[DEBUG] Creating new BackendService: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_service_fmt.Errorf("Error fetching project for BackendService: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_service_schema.TimeoutCreate)) - if err != nil { - return resource_compute_backend_service_fmt.Errorf("Error creating BackendService: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") - if err != nil { - return resource_compute_backend_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating BackendService", userAgent, - d.Timeout(resource_compute_backend_service_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_backend_service_fmt.Errorf("Error waiting to create BackendService: %s", err) - } - - if o, n := d.GetChange("security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) - if err != nil { - return resource_compute_backend_service_errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) - } - - spr := emptySecurityPolicyReference() - spr.SecurityPolicy = pol.RelativeLink() - op, err := config.NewComputeClient(userAgent).BackendServices.SetSecurityPolicy(project, obj["name"].(string), spr).Do() - if err != nil { - return resource_compute_backend_service_errwrap.Wrapf("Error setting Backend Service security policy: {{err}}", err) - } - - waitErr := computeOperationWaitTime(config, op, project, "Setting Backend Service Security Policy", userAgent, d.Timeout(resource_compute_backend_service_schema.TimeoutCreate)) - if waitErr != nil { - return waitErr - } - } - - resource_compute_backend_service_log.Printf("[DEBUG] Finished creating BackendService %q: %#v", d.Id(), res) - - return resourceComputeBackendServiceRead(d, meta) -} - -func resourceComputeBackendServiceRead(d *resource_compute_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_service_fmt.Errorf("Error fetching project for BackendService: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_backend_service_fmt.Sprintf("ComputeBackendService %q", d.Id())) - } - - res, err = resourceComputeBackendServiceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_backend_service_log.Printf("[DEBUG] Removing ComputeBackendService because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - - if err := d.Set("affinity_cookie_ttl_sec", flattenComputeBackendServiceAffinityCookieTtlSec(res["affinityCookieTtlSec"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("backend", flattenComputeBackendServiceBackend(res["backends"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("circuit_breakers", flattenComputeBackendServiceCircuitBreakers(res["circuitBreakers"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("consistent_hash", flattenComputeBackendServiceConsistentHash(res["consistentHash"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("cdn_policy", flattenComputeBackendServiceCdnPolicy(res["cdnPolicy"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - - if flattenedProp := flattenComputeBackendServiceConnectionDraining(res["connectionDraining"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_compute_backend_service_googleapi.Error); ok { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("creation_timestamp", flattenComputeBackendServiceCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("custom_request_headers", flattenComputeBackendServiceCustomRequestHeaders(res["customRequestHeaders"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("custom_response_headers", flattenComputeBackendServiceCustomResponseHeaders(res["customResponseHeaders"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("fingerprint", flattenComputeBackendServiceFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("description", flattenComputeBackendServiceDescription(res["description"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("enable_cdn", flattenComputeBackendServiceEnableCDN(res["enableCDN"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("health_checks", flattenComputeBackendServiceHealthChecks(res["healthChecks"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("iap", flattenComputeBackendServiceIap(res["iap"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("load_balancing_scheme", flattenComputeBackendServiceLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("locality_lb_policy", flattenComputeBackendServiceLocalityLbPolicy(res["localityLbPolicy"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("name", flattenComputeBackendServiceName(res["name"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("outlier_detection", flattenComputeBackendServiceOutlierDetection(res["outlierDetection"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("port_name", flattenComputeBackendServicePortName(res["portName"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("protocol", flattenComputeBackendServiceProtocol(res["protocol"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("security_policy", flattenComputeBackendServiceSecurityPolicy(res["securityPolicy"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("session_affinity", flattenComputeBackendServiceSessionAffinity(res["sessionAffinity"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeBackendServiceTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("log_config", flattenComputeBackendServiceLogConfig(res["logConfig"], d, config)); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_backend_service_fmt.Errorf("Error reading BackendService: %s", err) - } - - return nil -} - -func resourceComputeBackendServiceUpdate(d *resource_compute_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_service_fmt.Errorf("Error fetching project for BackendService: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - affinityCookieTtlSecProp, err := expandComputeBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, affinityCookieTtlSecProp)) { - obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp - } - backendsProp, err := expandComputeBackendServiceBackend(d.Get("backend"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, backendsProp)) { - obj["backends"] = backendsProp - } - circuitBreakersProp, err := expandComputeBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, circuitBreakersProp)) { - obj["circuitBreakers"] = circuitBreakersProp - } - consistentHashProp, err := expandComputeBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, consistentHashProp)) { - obj["consistentHash"] = consistentHashProp - } - cdnPolicyProp, err := expandComputeBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - connectionDrainingProp, err := expandComputeBackendServiceConnectionDraining(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(connectionDrainingProp)) { - obj["connectionDraining"] = connectionDrainingProp - } - customRequestHeadersProp, err := expandComputeBackendServiceCustomRequestHeaders(d.Get("custom_request_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_request_headers"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, customRequestHeadersProp)) { - obj["customRequestHeaders"] = customRequestHeadersProp - } - customResponseHeadersProp, err := expandComputeBackendServiceCustomResponseHeaders(d.Get("custom_response_headers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_response_headers"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, customResponseHeadersProp)) { - obj["customResponseHeaders"] = customResponseHeadersProp - } - fingerprintProp, err := expandComputeBackendServiceFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - descriptionProp, err := expandComputeBackendServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableCDNProp, err := expandComputeBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, enableCDNProp)) { - obj["enableCDN"] = enableCDNProp - } - healthChecksProp, err := expandComputeBackendServiceHealthChecks(d.Get("health_checks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, healthChecksProp)) { - obj["healthChecks"] = healthChecksProp - } - iapProp, err := expandComputeBackendServiceIap(d.Get("iap"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("iap"); ok || !resource_compute_backend_service_reflect.DeepEqual(v, iapProp) { - obj["iap"] = iapProp - } - loadBalancingSchemeProp, err := expandComputeBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, loadBalancingSchemeProp)) { - obj["loadBalancingScheme"] = loadBalancingSchemeProp - } - localityLbPolicyProp, err := expandComputeBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, localityLbPolicyProp)) { - obj["localityLbPolicy"] = localityLbPolicyProp - } - nameProp, err := expandComputeBackendServiceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - outlierDetectionProp, err := expandComputeBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, outlierDetectionProp)) { - obj["outlierDetection"] = outlierDetectionProp - } - portNameProp, err := expandComputeBackendServicePortName(d.Get("port_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, portNameProp)) { - obj["portName"] = portNameProp - } - protocolProp, err := expandComputeBackendServiceProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - securityPolicyProp, err := expandComputeBackendServiceSecurityPolicy(d.Get("security_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_policy"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, securityPolicyProp)) { - obj["securityPolicy"] = securityPolicyProp - } - sessionAffinityProp, err := expandComputeBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, sessionAffinityProp)) { - obj["sessionAffinity"] = sessionAffinityProp - } - timeoutSecProp, err := expandComputeBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - logConfigProp, err := expandComputeBackendServiceLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_backend_service_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - - obj, err = resourceComputeBackendServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") - if err != nil { - return err - } - - resource_compute_backend_service_log.Printf("[DEBUG] Updating BackendService %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_service_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_backend_service_fmt.Errorf("Error updating BackendService %q: %s", d.Id(), err) - } else { - resource_compute_backend_service_log.Printf("[DEBUG] Finished updating BackendService %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating BackendService", userAgent, - d.Timeout(resource_compute_backend_service_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - if o, n := d.GetChange("security_policy"); o.(string) != n.(string) { - pol, err := ParseSecurityPolicyFieldValue(n.(string), d, config) - if err != nil { - return resource_compute_backend_service_errwrap.Wrapf("Error parsing Backend Service security policy: {{err}}", err) - } - - spr := emptySecurityPolicyReference() - spr.SecurityPolicy = pol.RelativeLink() - op, err := config.NewComputeClient(userAgent).BackendServices.SetSecurityPolicy(project, obj["name"].(string), spr).Do() - if err != nil { - return resource_compute_backend_service_errwrap.Wrapf("Error setting Backend Service security policy: {{err}}", err) - } - - waitErr := computeOperationWaitTime(config, op, project, "Setting Backend Service Security Policy", userAgent, d.Timeout(resource_compute_backend_service_schema.TimeoutCreate)) - if waitErr != nil { - return waitErr - } - } - return resourceComputeBackendServiceRead(d, meta) -} - -func resourceComputeBackendServiceDelete(d *resource_compute_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_service_fmt.Errorf("Error fetching project for BackendService: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_backend_service_log.Printf("[DEBUG] Deleting BackendService %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_service_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackendService") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting BackendService", userAgent, - d.Timeout(resource_compute_backend_service_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_backend_service_log.Printf("[DEBUG] Finished deleting BackendService %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeBackendServiceImport(d *resource_compute_backend_service_schema.ResourceData, meta interface{}) ([]*resource_compute_backend_service_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/backendServices/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{name}}") - if err != nil { - return nil, resource_compute_backend_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_backend_service_schema.ResourceData{d}, nil -} - -func flattenComputeBackendServiceAffinityCookieTtlSec(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceBackend(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_backend_service_schema.NewSet(resourceGoogleComputeBackendServiceBackendHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "balancing_mode": flattenComputeBackendServiceBackendBalancingMode(original["balancingMode"], d, config), - "capacity_scaler": flattenComputeBackendServiceBackendCapacityScaler(original["capacityScaler"], d, config), - "description": flattenComputeBackendServiceBackendDescription(original["description"], d, config), - "group": flattenComputeBackendServiceBackendGroup(original["group"], d, config), - "max_connections": flattenComputeBackendServiceBackendMaxConnections(original["maxConnections"], d, config), - "max_connections_per_instance": flattenComputeBackendServiceBackendMaxConnectionsPerInstance(original["maxConnectionsPerInstance"], d, config), - "max_connections_per_endpoint": flattenComputeBackendServiceBackendMaxConnectionsPerEndpoint(original["maxConnectionsPerEndpoint"], d, config), - "max_rate": flattenComputeBackendServiceBackendMaxRate(original["maxRate"], d, config), - "max_rate_per_instance": flattenComputeBackendServiceBackendMaxRatePerInstance(original["maxRatePerInstance"], d, config), - "max_rate_per_endpoint": flattenComputeBackendServiceBackendMaxRatePerEndpoint(original["maxRatePerEndpoint"], d, config), - "max_utilization": flattenComputeBackendServiceBackendMaxUtilization(original["maxUtilization"], d, config), - }) - } - return transformed -} - -func flattenComputeBackendServiceBackendBalancingMode(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceBackendCapacityScaler(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceBackendDescription(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceBackendGroup(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeBackendServiceBackendMaxConnections(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceBackendMaxRate(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceBackendMaxRatePerInstance(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceBackendMaxRatePerEndpoint(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceBackendMaxUtilization(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceCircuitBreakers(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_requests_per_connection"] = - flattenComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(original["maxRequestsPerConnection"], d, config) - transformed["max_connections"] = - flattenComputeBackendServiceCircuitBreakersMaxConnections(original["maxConnections"], d, config) - transformed["max_pending_requests"] = - flattenComputeBackendServiceCircuitBreakersMaxPendingRequests(original["maxPendingRequests"], d, config) - transformed["max_requests"] = - flattenComputeBackendServiceCircuitBreakersMaxRequests(original["maxRequests"], d, config) - transformed["max_retries"] = - flattenComputeBackendServiceCircuitBreakersMaxRetries(original["maxRetries"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCircuitBreakersMaxConnections(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCircuitBreakersMaxRequests(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCircuitBreakersMaxRetries(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceConsistentHash(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_cookie"] = - flattenComputeBackendServiceConsistentHashHttpCookie(original["httpCookie"], d, config) - transformed["http_header_name"] = - flattenComputeBackendServiceConsistentHashHttpHeaderName(original["httpHeaderName"], d, config) - transformed["minimum_ring_size"] = - flattenComputeBackendServiceConsistentHashMinimumRingSize(original["minimumRingSize"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceConsistentHashHttpCookie(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ttl"] = - flattenComputeBackendServiceConsistentHashHttpCookieTtl(original["ttl"], d, config) - transformed["name"] = - flattenComputeBackendServiceConsistentHashHttpCookieName(original["name"], d, config) - transformed["path"] = - flattenComputeBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeBackendServiceConsistentHashHttpCookieTtlSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceConsistentHashHttpCookieName(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceConsistentHashHttpCookiePath(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceConsistentHashHttpHeaderName(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceConsistentHashMinimumRingSize(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCdnPolicy(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cache_key_policy"] = - flattenComputeBackendServiceCdnPolicyCacheKeyPolicy(original["cacheKeyPolicy"], d, config) - transformed["signed_url_cache_max_age_sec"] = - flattenComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(original["signedUrlCacheMaxAgeSec"], d, config) - transformed["default_ttl"] = - flattenComputeBackendServiceCdnPolicyDefaultTtl(original["defaultTtl"], d, config) - transformed["max_ttl"] = - flattenComputeBackendServiceCdnPolicyMaxTtl(original["maxTtl"], d, config) - transformed["client_ttl"] = - flattenComputeBackendServiceCdnPolicyClientTtl(original["clientTtl"], d, config) - transformed["negative_caching"] = - flattenComputeBackendServiceCdnPolicyNegativeCaching(original["negativeCaching"], d, config) - transformed["negative_caching_policy"] = - flattenComputeBackendServiceCdnPolicyNegativeCachingPolicy(original["negativeCachingPolicy"], d, config) - transformed["cache_mode"] = - flattenComputeBackendServiceCdnPolicyCacheMode(original["cacheMode"], d, config) - transformed["serve_while_stale"] = - flattenComputeBackendServiceCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["include_host"] = - flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(original["includeHost"], d, config) - transformed["include_protocol"] = - flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(original["includeProtocol"], d, config) - transformed["include_query_string"] = - flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(original["includeQueryString"], d, config) - transformed["query_string_blacklist"] = - flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(original["queryStringBlacklist"], d, config) - transformed["query_string_whitelist"] = - flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["queryStringWhitelist"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_backend_service_schema.NewSet(resource_compute_backend_service_schema.HashString, v.([]interface{})) -} - -func flattenComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_backend_service_schema.NewSet(resource_compute_backend_service_schema.HashString, v.([]interface{})) -} - -func flattenComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCdnPolicyDefaultTtl(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCdnPolicyMaxTtl(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCdnPolicyClientTtl(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCdnPolicyNegativeCaching(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "code": flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(original["code"], d, config), - "ttl": flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config), - }) - } - return transformed -} - -func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCdnPolicyCacheMode(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceCdnPolicyServeWhileStale(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceConnectionDraining(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["connection_draining_timeout_sec"] = - flattenComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(original["drainingTimeoutSec"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceCreationTimestamp(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceCustomRequestHeaders(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_backend_service_schema.NewSet(resource_compute_backend_service_schema.HashString, v.([]interface{})) -} - -func flattenComputeBackendServiceCustomResponseHeaders(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_backend_service_schema.NewSet(resource_compute_backend_service_schema.HashString, v.([]interface{})) -} - -func flattenComputeBackendServiceFingerprint(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceDescription(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceEnableCDN(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceHealthChecks(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeBackendServiceIap(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["oauth2_client_id"] = - flattenComputeBackendServiceIapOauth2ClientId(original["oauth2ClientId"], d, config) - transformed["oauth2_client_secret"] = - flattenComputeBackendServiceIapOauth2ClientSecret(original["oauth2ClientSecret"], d, config) - transformed["oauth2_client_secret_sha256"] = - flattenComputeBackendServiceIapOauth2ClientSecretSha256(original["oauth2ClientSecretSha256"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceIapOauth2ClientId(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceIapOauth2ClientSecret(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return d.Get("iap.0.oauth2_client_secret") -} - -func flattenComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceLoadBalancingScheme(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceLocalityLbPolicy(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceName(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceOutlierDetection(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["base_ejection_time"] = - flattenComputeBackendServiceOutlierDetectionBaseEjectionTime(original["baseEjectionTime"], d, config) - transformed["consecutive_errors"] = - flattenComputeBackendServiceOutlierDetectionConsecutiveErrors(original["consecutiveErrors"], d, config) - transformed["consecutive_gateway_failure"] = - flattenComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(original["consecutiveGatewayFailure"], d, config) - transformed["enforcing_consecutive_errors"] = - flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(original["enforcingConsecutiveErrors"], d, config) - transformed["enforcing_consecutive_gateway_failure"] = - flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(original["enforcingConsecutiveGatewayFailure"], d, config) - transformed["enforcing_success_rate"] = - flattenComputeBackendServiceOutlierDetectionEnforcingSuccessRate(original["enforcingSuccessRate"], d, config) - transformed["interval"] = - flattenComputeBackendServiceOutlierDetectionInterval(original["interval"], d, config) - transformed["max_ejection_percent"] = - flattenComputeBackendServiceOutlierDetectionMaxEjectionPercent(original["maxEjectionPercent"], d, config) - transformed["success_rate_minimum_hosts"] = - flattenComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(original["successRateMinimumHosts"], d, config) - transformed["success_rate_request_volume"] = - flattenComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(original["successRateRequestVolume"], d, config) - transformed["success_rate_stdev_factor"] = - flattenComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(original["successRateStdevFactor"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionInterval(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeBackendServiceOutlierDetectionIntervalSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionIntervalNanos(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServicePortName(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceProtocol(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceSecurityPolicy(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceSessionAffinity(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceTimeoutSec(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeBackendServiceLogConfig(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable"] = - flattenComputeBackendServiceLogConfigEnable(original["enable"], d, config) - transformed["sample_rate"] = - flattenComputeBackendServiceLogConfigSampleRate(original["sampleRate"], d, config) - return []interface{}{transformed} -} - -func flattenComputeBackendServiceLogConfigEnable(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeBackendServiceLogConfigSampleRate(v interface{}, d *resource_compute_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeBackendServiceAffinityCookieTtlSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackend(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_backend_service_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBalancingMode, err := expandComputeBackendServiceBackendBalancingMode(original["balancing_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedBalancingMode); val.IsValid() && !isEmptyValue(val) { - transformed["balancingMode"] = transformedBalancingMode - } - - transformedCapacityScaler, err := expandComputeBackendServiceBackendCapacityScaler(original["capacity_scaler"], d, config) - if err != nil { - return nil, err - } else { - transformed["capacityScaler"] = transformedCapacityScaler - } - - transformedDescription, err := expandComputeBackendServiceBackendDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedGroup, err := expandComputeBackendServiceBackendGroup(original["group"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedGroup); val.IsValid() && !isEmptyValue(val) { - transformed["group"] = transformedGroup - } - - transformedMaxConnections, err := expandComputeBackendServiceBackendMaxConnections(original["max_connections"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnections"] = transformedMaxConnections - } - - transformedMaxConnectionsPerInstance, err := expandComputeBackendServiceBackendMaxConnectionsPerInstance(original["max_connections_per_instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxConnectionsPerInstance); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnectionsPerInstance"] = transformedMaxConnectionsPerInstance - } - - transformedMaxConnectionsPerEndpoint, err := expandComputeBackendServiceBackendMaxConnectionsPerEndpoint(original["max_connections_per_endpoint"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxConnectionsPerEndpoint); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnectionsPerEndpoint"] = transformedMaxConnectionsPerEndpoint - } - - transformedMaxRate, err := expandComputeBackendServiceBackendMaxRate(original["max_rate"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxRate); val.IsValid() && !isEmptyValue(val) { - transformed["maxRate"] = transformedMaxRate - } - - transformedMaxRatePerInstance, err := expandComputeBackendServiceBackendMaxRatePerInstance(original["max_rate_per_instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxRatePerInstance); val.IsValid() && !isEmptyValue(val) { - transformed["maxRatePerInstance"] = transformedMaxRatePerInstance - } - - transformedMaxRatePerEndpoint, err := expandComputeBackendServiceBackendMaxRatePerEndpoint(original["max_rate_per_endpoint"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxRatePerEndpoint); val.IsValid() && !isEmptyValue(val) { - transformed["maxRatePerEndpoint"] = transformedMaxRatePerEndpoint - } - - transformedMaxUtilization, err := expandComputeBackendServiceBackendMaxUtilization(original["max_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["maxUtilization"] = transformedMaxUtilization - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeBackendServiceBackendBalancingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendCapacityScaler(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendMaxRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendMaxRatePerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendMaxRatePerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceBackendMaxUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCircuitBreakers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxRequestsPerConnection, err := expandComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(original["max_requests_per_connection"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxRequestsPerConnection); val.IsValid() && !isEmptyValue(val) { - transformed["maxRequestsPerConnection"] = transformedMaxRequestsPerConnection - } - - transformedMaxConnections, err := expandComputeBackendServiceCircuitBreakersMaxConnections(original["max_connections"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnections"] = transformedMaxConnections - } - - transformedMaxPendingRequests, err := expandComputeBackendServiceCircuitBreakersMaxPendingRequests(original["max_pending_requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxPendingRequests); val.IsValid() && !isEmptyValue(val) { - transformed["maxPendingRequests"] = transformedMaxPendingRequests - } - - transformedMaxRequests, err := expandComputeBackendServiceCircuitBreakersMaxRequests(original["max_requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxRequests); val.IsValid() && !isEmptyValue(val) { - transformed["maxRequests"] = transformedMaxRequests - } - - transformedMaxRetries, err := expandComputeBackendServiceCircuitBreakersMaxRetries(original["max_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxRetries); val.IsValid() && !isEmptyValue(val) { - transformed["maxRetries"] = transformedMaxRetries - } - - return transformed, nil -} - -func expandComputeBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCircuitBreakersMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCircuitBreakersMaxRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCircuitBreakersMaxRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceConsistentHash(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpCookie, err := expandComputeBackendServiceConsistentHashHttpCookie(original["http_cookie"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedHttpCookie); val.IsValid() && !isEmptyValue(val) { - transformed["httpCookie"] = transformedHttpCookie - } - - transformedHttpHeaderName, err := expandComputeBackendServiceConsistentHashHttpHeaderName(original["http_header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedHttpHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["httpHeaderName"] = transformedHttpHeaderName - } - - transformedMinimumRingSize, err := expandComputeBackendServiceConsistentHashMinimumRingSize(original["minimum_ring_size"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMinimumRingSize); val.IsValid() && !isEmptyValue(val) { - transformed["minimumRingSize"] = transformedMinimumRingSize - } - - return transformed, nil -} - -func expandComputeBackendServiceConsistentHashHttpCookie(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTtl, err := expandComputeBackendServiceConsistentHashHttpCookieTtl(original["ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedTtl); val.IsValid() && !isEmptyValue(val) { - transformed["ttl"] = transformedTtl - } - - transformedName, err := expandComputeBackendServiceConsistentHashHttpCookieName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPath, err := expandComputeBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandComputeBackendServiceConsistentHashHttpCookieTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeBackendServiceConsistentHashHttpCookieTtlSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceConsistentHashHttpCookieName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceConsistentHashHttpCookiePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceConsistentHashHttpHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceConsistentHashMinimumRingSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCacheKeyPolicy, err := expandComputeBackendServiceCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy - } - - transformedSignedUrlCacheMaxAgeSec, err := expandComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(original["signed_url_cache_max_age_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSignedUrlCacheMaxAgeSec); val.IsValid() && !isEmptyValue(val) { - transformed["signedUrlCacheMaxAgeSec"] = transformedSignedUrlCacheMaxAgeSec - } - - transformedDefaultTtl, err := expandComputeBackendServiceCdnPolicyDefaultTtl(original["default_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !isEmptyValue(val) { - transformed["defaultTtl"] = transformedDefaultTtl - } - - transformedMaxTtl, err := expandComputeBackendServiceCdnPolicyMaxTtl(original["max_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { - transformed["maxTtl"] = transformedMaxTtl - } - - transformedClientTtl, err := expandComputeBackendServiceCdnPolicyClientTtl(original["client_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedClientTtl); val.IsValid() && !isEmptyValue(val) { - transformed["clientTtl"] = transformedClientTtl - } - - transformedNegativeCaching, err := expandComputeBackendServiceCdnPolicyNegativeCaching(original["negative_caching"], d, config) - if err != nil { - return nil, err - } else { - transformed["negativeCaching"] = transformedNegativeCaching - } - - transformedNegativeCachingPolicy, err := expandComputeBackendServiceCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy - } - - transformedCacheMode, err := expandComputeBackendServiceCdnPolicyCacheMode(original["cache_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { - transformed["cacheMode"] = transformedCacheMode - } - - transformedServeWhileStale, err := expandComputeBackendServiceCdnPolicyServeWhileStale(original["serve_while_stale"], d, config) - if err != nil { - return nil, err - } else { - transformed["serveWhileStale"] = transformedServeWhileStale - } - - return transformed, nil -} - -func expandComputeBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIncludeHost, err := expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(original["include_host"], d, config) - if err != nil { - return nil, err - } else { - transformed["includeHost"] = transformedIncludeHost - } - - transformedIncludeProtocol, err := expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(original["include_protocol"], d, config) - if err != nil { - return nil, err - } else { - transformed["includeProtocol"] = transformedIncludeProtocol - } - - transformedIncludeQueryString, err := expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(original["include_query_string"], d, config) - if err != nil { - return nil, err - } else { - transformed["includeQueryString"] = transformedIncludeQueryString - } - - transformedQueryStringBlacklist, err := expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(original["query_string_blacklist"], d, config) - if err != nil { - return nil, err - } else { - transformed["queryStringBlacklist"] = transformedQueryStringBlacklist - } - - transformedQueryStringWhitelist, err := expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["query_string_whitelist"], d, config) - if err != nil { - return nil, err - } else { - transformed["queryStringWhitelist"] = transformedQueryStringWhitelist - } - - return transformed, nil -} - -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCode, err := expandComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(original["code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedCode); val.IsValid() && !isEmptyValue(val) { - transformed["code"] = transformedCode - } - - transformedTtl, err := expandComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(original["ttl"], d, config) - if err != nil { - return nil, err - } else { - transformed["ttl"] = transformedTtl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyNegativeCachingPolicyTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCdnPolicyServeWhileStale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceConnectionDraining(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedConnectionDrainingTimeoutSec, err := expandComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(d.Get("connection_draining_timeout_sec"), d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedConnectionDrainingTimeoutSec); val.IsValid() && !isEmptyValue(val) { - transformed["drainingTimeoutSec"] = transformedConnectionDrainingTimeoutSec - } - - return transformed, nil -} - -func expandComputeBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceCustomRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeBackendServiceCustomResponseHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeBackendServiceFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceEnableCDN(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceHealthChecks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeBackendServiceIap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOauth2ClientId, err := expandComputeBackendServiceIapOauth2ClientId(original["oauth2_client_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedOauth2ClientId); val.IsValid() && !isEmptyValue(val) { - transformed["oauth2ClientId"] = transformedOauth2ClientId - } - - transformedOauth2ClientSecret, err := expandComputeBackendServiceIapOauth2ClientSecret(original["oauth2_client_secret"], d, config) - if err != nil { - return nil, err - } else { - transformed["oauth2ClientSecret"] = transformedOauth2ClientSecret - } - - transformedOauth2ClientSecretSha256, err := expandComputeBackendServiceIapOauth2ClientSecretSha256(original["oauth2_client_secret_sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedOauth2ClientSecretSha256); val.IsValid() && !isEmptyValue(val) { - transformed["oauth2ClientSecretSha256"] = transformedOauth2ClientSecretSha256 - } - - return transformed, nil -} - -func expandComputeBackendServiceIapOauth2ClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceIapOauth2ClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceIapOauth2ClientSecretSha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceLoadBalancingScheme(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceLocalityLbPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBaseEjectionTime, err := expandComputeBackendServiceOutlierDetectionBaseEjectionTime(original["base_ejection_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedBaseEjectionTime); val.IsValid() && !isEmptyValue(val) { - transformed["baseEjectionTime"] = transformedBaseEjectionTime - } - - transformedConsecutiveErrors, err := expandComputeBackendServiceOutlierDetectionConsecutiveErrors(original["consecutive_errors"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { - transformed["consecutiveErrors"] = transformedConsecutiveErrors - } - - transformedConsecutiveGatewayFailure, err := expandComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(original["consecutive_gateway_failure"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { - transformed["consecutiveGatewayFailure"] = transformedConsecutiveGatewayFailure - } - - transformedEnforcingConsecutiveErrors, err := expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(original["enforcing_consecutive_errors"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedEnforcingConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { - transformed["enforcingConsecutiveErrors"] = transformedEnforcingConsecutiveErrors - } - - transformedEnforcingConsecutiveGatewayFailure, err := expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(original["enforcing_consecutive_gateway_failure"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedEnforcingConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { - transformed["enforcingConsecutiveGatewayFailure"] = transformedEnforcingConsecutiveGatewayFailure - } - - transformedEnforcingSuccessRate, err := expandComputeBackendServiceOutlierDetectionEnforcingSuccessRate(original["enforcing_success_rate"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedEnforcingSuccessRate); val.IsValid() && !isEmptyValue(val) { - transformed["enforcingSuccessRate"] = transformedEnforcingSuccessRate - } - - transformedInterval, err := expandComputeBackendServiceOutlierDetectionInterval(original["interval"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedInterval); val.IsValid() && !isEmptyValue(val) { - transformed["interval"] = transformedInterval - } - - transformedMaxEjectionPercent, err := expandComputeBackendServiceOutlierDetectionMaxEjectionPercent(original["max_ejection_percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedMaxEjectionPercent); val.IsValid() && !isEmptyValue(val) { - transformed["maxEjectionPercent"] = transformedMaxEjectionPercent - } - - transformedSuccessRateMinimumHosts, err := expandComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(original["success_rate_minimum_hosts"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSuccessRateMinimumHosts); val.IsValid() && !isEmptyValue(val) { - transformed["successRateMinimumHosts"] = transformedSuccessRateMinimumHosts - } - - transformedSuccessRateRequestVolume, err := expandComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(original["success_rate_request_volume"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSuccessRateRequestVolume); val.IsValid() && !isEmptyValue(val) { - transformed["successRateRequestVolume"] = transformedSuccessRateRequestVolume - } - - transformedSuccessRateStdevFactor, err := expandComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(original["success_rate_stdev_factor"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSuccessRateStdevFactor); val.IsValid() && !isEmptyValue(val) { - transformed["successRateStdevFactor"] = transformedSuccessRateStdevFactor - } - - return transformed, nil -} - -func expandComputeBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeBackendServiceOutlierDetectionIntervalSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionIntervalNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServicePortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceSessionAffinity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnable, err := expandComputeBackendServiceLogConfigEnable(original["enable"], d, config) - if err != nil { - return nil, err - } else { - transformed["enable"] = transformedEnable - } - - transformedSampleRate, err := expandComputeBackendServiceLogConfigSampleRate(original["sample_rate"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_backend_service_reflect.ValueOf(transformedSampleRate); val.IsValid() && !isEmptyValue(val) { - transformed["sampleRate"] = transformedSampleRate - } - - return transformed, nil -} - -func expandComputeBackendServiceLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeBackendServiceLogConfigSampleRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeBackendServiceEncoder(d *resource_compute_backend_service_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - iapVal := obj["iap"] - if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - data["oauth2ClientId"] = "" - data["oauth2ClientSecret"] = "" - obj["iap"] = data - } else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap - } - - backendsRaw, ok := obj["backends"] - if !ok { - return obj, nil - } - backends := backendsRaw.([]interface{}) - for _, backendRaw := range backends { - backend := backendRaw.(map[string]interface{}) - - if isNegBackend(backend) { - - backend["maxUtilization"] = nil - } - } - - return obj, nil -} - -func resourceComputeBackendServiceDecoder(d *resource_compute_backend_service_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - v, ok := res["iap"] - if !ok || v == nil { - delete(res, "iap") - return res, nil - } - m := v.(map[string]interface{}) - if ok && m["enabled"] == false { - delete(res, "iap") - } - - if v, ok := res["localityLbPolicy"]; ok { - lbPolicy := v.(string) - if lbPolicy != "MAGLEV" && lbPolicy != "RING_HASH" { - delete(res, "consistentHash") - } - } - - return res, nil -} - -func resourceComputeBackendServiceSignedUrlKey() *resource_compute_backend_service_signed_url_key_schema.Resource { - return &resource_compute_backend_service_signed_url_key_schema.Resource{ - Create: resourceComputeBackendServiceSignedUrlKeyCreate, - Read: resourceComputeBackendServiceSignedUrlKeyRead, - Delete: resourceComputeBackendServiceSignedUrlKeyDelete, - - Timeouts: &resource_compute_backend_service_signed_url_key_schema.ResourceTimeout{ - Create: resource_compute_backend_service_signed_url_key_schema.DefaultTimeout(4 * resource_compute_backend_service_signed_url_key_time.Minute), - Delete: resource_compute_backend_service_signed_url_key_schema.DefaultTimeout(4 * resource_compute_backend_service_signed_url_key_time.Minute), - }, - - Schema: map[string]*resource_compute_backend_service_signed_url_key_schema.Schema{ - "backend_service": { - Type: resource_compute_backend_service_signed_url_key_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend service this signed URL key belongs.`, - }, - "key_value": { - Type: resource_compute_backend_service_signed_url_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `128-bit key value used for signing the URL. The key value must be a -valid RFC 4648 Section 5 base64url encoded string.`, - Sensitive: true, - }, - "name": { - Type: resource_compute_backend_service_signed_url_key_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`), - Description: `Name of the signed URL key.`, - }, - "project": { - Type: resource_compute_backend_service_signed_url_key_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeBackendServiceSignedUrlKeyCreate(d *resource_compute_backend_service_signed_url_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - keyNameProp, err := expandNestedComputeBackendServiceSignedUrlKeyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_backend_service_signed_url_key_reflect.ValueOf(keyNameProp)) && (ok || !resource_compute_backend_service_signed_url_key_reflect.DeepEqual(v, keyNameProp)) { - obj["keyName"] = keyNameProp - } - keyValueProp, err := expandNestedComputeBackendServiceSignedUrlKeyKeyValue(d.Get("key_value"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("key_value"); !isEmptyValue(resource_compute_backend_service_signed_url_key_reflect.ValueOf(keyValueProp)) && (ok || !resource_compute_backend_service_signed_url_key_reflect.DeepEqual(v, keyValueProp)) { - obj["keyValue"] = keyValueProp - } - backendServiceProp, err := expandNestedComputeBackendServiceSignedUrlKeyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(resource_compute_backend_service_signed_url_key_reflect.ValueOf(backendServiceProp)) && (ok || !resource_compute_backend_service_signed_url_key_reflect.DeepEqual(v, backendServiceProp)) { - obj["backendService"] = backendServiceProp - } - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendServices/{{backend_service}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}/addSignedUrlKey") - if err != nil { - return err - } - - resource_compute_backend_service_signed_url_key_log.Printf("[DEBUG] Creating new BackendServiceSignedUrlKey: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_service_signed_url_key_schema.TimeoutCreate)) - if err != nil { - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error creating BackendServiceSignedUrlKey: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/backendServices/{{backend_service}}") - if err != nil { - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating BackendServiceSignedUrlKey", userAgent, - d.Timeout(resource_compute_backend_service_signed_url_key_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error waiting to create BackendServiceSignedUrlKey: %s", err) - } - - resource_compute_backend_service_signed_url_key_log.Printf("[DEBUG] Finished creating BackendServiceSignedUrlKey %q: %#v", d.Id(), res) - - return resourceComputeBackendServiceSignedUrlKeyRead(d, meta) -} - -func resourceComputeBackendServiceSignedUrlKeyRead(d *resource_compute_backend_service_signed_url_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_backend_service_signed_url_key_fmt.Sprintf("ComputeBackendServiceSignedUrlKey %q", d.Id())) - } - - res, err = flattenNestedComputeBackendServiceSignedUrlKey(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_backend_service_signed_url_key_log.Printf("[DEBUG] Removing ComputeBackendServiceSignedUrlKey because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error reading BackendServiceSignedUrlKey: %s", err) - } - - if err := d.Set("name", flattenNestedComputeBackendServiceSignedUrlKeyName(res["keyName"], d, config)); err != nil { - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error reading BackendServiceSignedUrlKey: %s", err) - } - - return nil -} - -func resourceComputeBackendServiceSignedUrlKeyDelete(d *resource_compute_backend_service_signed_url_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_backend_service_signed_url_key_fmt.Errorf("Error fetching project for BackendServiceSignedUrlKey: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "signedUrlKey/{{project}}/backendServices/{{backend_service}}/") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/backendServices/{{backend_service}}/deleteSignedUrlKey?keyName={{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_backend_service_signed_url_key_log.Printf("[DEBUG] Deleting BackendServiceSignedUrlKey %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_backend_service_signed_url_key_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BackendServiceSignedUrlKey") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting BackendServiceSignedUrlKey", userAgent, - d.Timeout(resource_compute_backend_service_signed_url_key_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_backend_service_signed_url_key_log.Printf("[DEBUG] Finished deleting BackendServiceSignedUrlKey %q: %#v", d.Id(), res) - return nil -} - -func flattenNestedComputeBackendServiceSignedUrlKeyName(v interface{}, d *resource_compute_backend_service_signed_url_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeBackendServiceSignedUrlKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendServiceSignedUrlKeyKeyValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeBackendServiceSignedUrlKeyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_backend_service_signed_url_key_fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func flattenNestedComputeBackendServiceSignedUrlKey(d *resource_compute_backend_service_signed_url_key_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["cdnPolicy"] - if !ok || v == nil { - return nil, nil - } - res = v.(map[string]interface{}) - - v, ok = res["signedUrlKeyNames"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_backend_service_signed_url_key_fmt.Errorf("expected list or map for value cdnPolicy.signedUrlKeyNames. Actual value: %v", v) - } - - _, item, err := resourceComputeBackendServiceSignedUrlKeyFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeBackendServiceSignedUrlKeyFindNestedObjectInList(d *resource_compute_backend_service_signed_url_key_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeBackendServiceSignedUrlKeyName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeBackendServiceSignedUrlKeyName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - - item := map[string]interface{}{ - "keyName": itemRaw, - } - - itemName := flattenNestedComputeBackendServiceSignedUrlKeyName(item["keyName"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_backend_service_signed_url_key_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_backend_service_signed_url_key_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_backend_service_signed_url_key_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_backend_service_signed_url_key_log.Printf("[DEBUG] Skipping item with keyName= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_backend_service_signed_url_key_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func isDiskShrinkage(_ resource_compute_disk_context.Context, old, new, _ interface{}) bool { - - if old == nil || new == nil { - return false - } - return new.(int) < old.(int) -} - -func diskImageDiffSuppress(_, old, new string, _ *resource_compute_disk_schema.ResourceData) bool { - - matches := resolveImageLink.FindStringSubmatch(old) - if matches == nil { - - return false - } - oldProject := matches[1] - oldName := matches[2] - - if resolveImageProjectFamily.MatchString(new) { - - matches := resolveImageProjectFamily.FindStringSubmatch(new) - newProject := matches[1] - newFamilyName := matches[2] - - return diskImageProjectNameEquals(oldProject, newProject) && diskImageFamilyEquals(oldName, newFamilyName) - } - - if resolveImageProjectImage.MatchString(new) { - - matches := resolveImageProjectImage.FindStringSubmatch(new) - newProject := matches[1] - newImageName := matches[2] - - return diskImageProjectNameEquals(oldProject, newProject) && diskImageEquals(oldName, newImageName) - } - - if resolveImageGlobalFamily.MatchString(new) { - - matches := resolveImageGlobalFamily.FindStringSubmatch(new) - familyName := matches[1] - - return diskImageFamilyEquals(oldName, familyName) - } - - if resolveImageGlobalImage.MatchString(new) { - - matches := resolveImageGlobalImage.FindStringSubmatch(new) - imageName := matches[1] - - return diskImageEquals(oldName, imageName) - } - - if resolveImageFamilyFamily.MatchString(new) { - - matches := resolveImageFamilyFamily.FindStringSubmatch(new) - familyName := matches[1] - - return diskImageFamilyEquals(oldName, familyName) - } - - if resolveImageProjectImageShorthand.MatchString(new) { - - matches := resolveImageProjectImageShorthand.FindStringSubmatch(new) - newProject := matches[1] - newName := matches[2] - - return diskImageProjectNameEquals(oldProject, newProject) && - (diskImageEquals(oldName, newName) || diskImageFamilyEquals(oldName, newName)) - } - - if diskImageEquals(oldName, new) || diskImageFamilyEquals(oldName, new) { - - return true - } - - return false -} - -func diskImageProjectNameEquals(project1, project2 string) bool { - - fullProjectName, ok := imageMap[project2] - if ok { - project2 = fullProjectName - } - - return project1 == project2 -} - -func diskImageEquals(oldImageName, newImageName string) bool { - return oldImageName == newImageName -} - -func diskImageFamilyEquals(imageName, familyName string) bool { - - if resource_compute_disk_strings.Contains(imageName, familyName) { - return true - } - - if suppressCanonicalFamilyDiff(imageName, familyName) { - return true - } - - if suppressCosFamilyDiff(imageName, familyName) { - return true - } - - if suppressWindowsSqlFamilyDiff(imageName, familyName) { - return true - } - - if suppressWindowsFamilyDiff(imageName, familyName) { - return true - } - - return false -} - -func suppressCanonicalFamilyDiff(imageName, familyName string) bool { - parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName) - if len(parts) == 3 { - f := resource_compute_disk_fmt.Sprintf("ubuntu-%s%s-lts", parts[1], parts[2]) - if f == familyName { - return true - } - } - - return false -} - -func suppressCosFamilyDiff(imageName, familyName string) bool { - parts := cosLtsImage.FindStringSubmatch(imageName) - if len(parts) == 2 { - f := resource_compute_disk_fmt.Sprintf("cos-%s-lts", parts[1]) - if f == familyName { - return true - } - } - - return false -} - -func suppressWindowsSqlFamilyDiff(imageName, familyName string) bool { - parts := windowsSqlImage.FindStringSubmatch(imageName) - if len(parts) == 5 { - edition := parts[2] - sqlVersion := parts[1] - windowsVersion := parts[3] - - switch edition { - case "enterprise": - edition = "ent" - case "standard": - edition = "std" - case "express": - edition = "exp" - } - - var f string - if revision := parts[4]; revision != "" { - - f = resource_compute_disk_fmt.Sprintf("sql-%s-%s-win-%s-r%s", edition, sqlVersion, windowsVersion, revision) - } else { - - f = resource_compute_disk_fmt.Sprintf("sql-%s-%s-win-%s", edition, sqlVersion, windowsVersion) - } - - if f == familyName { - return true - } - } - - return false -} - -func suppressWindowsFamilyDiff(imageName, familyName string) bool { - updatedFamilyString := resource_compute_disk_strings.Replace(familyName, "windows-", "windows-server-", 1) - updatedImageName := resource_compute_disk_strings.Replace(imageName, "-dc-", "-", 1) - - return resource_compute_disk_strings.Contains(updatedImageName, updatedFamilyString) -} - -func resourceComputeDisk() *resource_compute_disk_schema.Resource { - return &resource_compute_disk_schema.Resource{ - Create: resourceComputeDiskCreate, - Read: resourceComputeDiskRead, - Update: resourceComputeDiskUpdate, - Delete: resourceComputeDiskDelete, - - Importer: &resource_compute_disk_schema.ResourceImporter{ - State: resourceComputeDiskImport, - }, - - Timeouts: &resource_compute_disk_schema.ResourceTimeout{ - Create: resource_compute_disk_schema.DefaultTimeout(5 * resource_compute_disk_time.Minute), - Update: resource_compute_disk_schema.DefaultTimeout(4 * resource_compute_disk_time.Minute), - Delete: resource_compute_disk_schema.DefaultTimeout(4 * resource_compute_disk_time.Minute), - }, - - CustomizeDiff: resource_compute_disk_customdiff.All( - resource_compute_disk_customdiff.ForceNewIfChange("size", isDiskShrinkage)), - - Schema: map[string]*resource_compute_disk_schema.Schema{ - "name": { - Type: resource_compute_disk_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "disk_encryption_key": { - Type: resource_compute_disk_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encrypts the disk using a customer-supplied encryption key. - -After you encrypt a disk with a customer-supplied key, you must -provide the same key if you use the disk later (e.g. to create a disk -snapshot or an image, or to attach the disk to a virtual machine). - -Customer-supplied encryption keys do not protect access to metadata of -the disk. - -If you do not provide an encryption key when creating the disk, then -the disk will be encrypted using an automatically generated key and -you do not need to provide a key to use the disk later.`, - MaxItems: 1, - Elem: &resource_compute_disk_schema.Resource{ - Schema: map[string]*resource_compute_disk_schema.Schema{ - "kms_key_self_link": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName -in the cloud console. Your project's Compute Engine System service account -('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, - }, - "kms_key_service_account": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - "sha256": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "image": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: diskImageDiffSuppress, - Description: `The image from which to initialize this disk. This can be -one of: the image's 'self_link', 'projects/{project}/global/images/{image}', -'projects/{project}/global/images/family/{family}', 'global/images/{image}', -'global/images/family/{family}', 'family/{family}', '{project}/{family}', -'{project}/{image}', '{family}', or '{image}'. If referred by family, the -images names must include the family name. If they don't, use the -[google_compute_image data source](/docs/providers/google/d/compute_image.html). -For instance, the image 'centos-6-v20180104' includes its family name 'centos-6'. -These images can be referred by family name here.`, - }, - "labels": { - Type: resource_compute_disk_schema.TypeMap, - Optional: true, - Description: `Labels to apply to this disk. A list of key->value pairs.`, - Elem: &resource_compute_disk_schema.Schema{Type: resource_compute_disk_schema.TypeString}, - }, - "physical_block_size_bytes": { - Type: resource_compute_disk_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Physical block size of the persistent disk, in bytes. If not present -in a request, a default value is used. Currently supported sizes -are 4096 and 16384, other sizes may be added in the future. -If an unsupported value is requested, the error message will list -the supported values for the caller's project.`, - }, - "provisioned_iops": { - Type: resource_compute_disk_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `Indicates how many IOPS must be provisioned for the disk.`, - }, - "size": { - Type: resource_compute_disk_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Size of the persistent disk, specified in GB. You can specify this -field when creating a persistent disk using the 'image' or -'snapshot' parameter, or specify it alone to create an empty -persistent disk. - -If you specify this field along with 'image' or 'snapshot', -the value must not be less than the size of the image -or the size of the snapshot. - -~>**NOTE** If you change the size, Terraform updates the disk size -if upsizing is detected but recreates the disk if downsizing is requested. -You can add 'lifecycle.prevent_destroy' in the config to prevent destroying -and recreating.`, - }, - "snapshot": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The source snapshot used to create this disk. You can provide this as -a partial or full URL to the resource. If the snapshot is in another -project than this disk, you must supply a full URL. For example, the -following are valid values: - -* 'https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot' -* 'projects/project/global/snapshots/snapshot' -* 'global/snapshots/snapshot' -* 'snapshot'`, - }, - "source_image_encryption_key": { - Type: resource_compute_disk_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source image. Required if -the source image is protected by a customer-supplied encryption key.`, - MaxItems: 1, - Elem: &resource_compute_disk_schema.Resource{ - Schema: map[string]*resource_compute_disk_schema.Schema{ - "kms_key_self_link": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName -in the cloud console. Your project's Compute Engine System service account -('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, - }, - "kms_key_service_account": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - }, - "sha256": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "source_snapshot_encryption_key": { - Type: resource_compute_disk_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source snapshot. Required -if the source snapshot is protected by a customer-supplied encryption -key.`, - MaxItems: 1, - Elem: &resource_compute_disk_schema.Resource{ - Schema: map[string]*resource_compute_disk_schema.Schema{ - "kms_key_self_link": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key used to encrypt the disk. Also called KmsKeyName -in the cloud console. Your project's Compute Engine System service account -('service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys`, - }, - "kms_key_service_account": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - }, - "sha256": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "type": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the disk type resource describing which disk type to use to -create the disk. Provide this when creating the disk.`, - Default: "pd-standard", - }, - "zone": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the disk resides.`, - }, - "creation_timestamp": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "label_fingerprint": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "last_attach_timestamp": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `Last attach timestamp in RFC3339 text format.`, - }, - "last_detach_timestamp": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `Last detach timestamp in RFC3339 text format.`, - }, - "source_image_id": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `The ID value of the image used to create this disk. This value -identifies the exact image that was used to create this persistent -disk. For example, if you created the persistent disk from an image -that was later deleted and recreated under the same name, the source -image ID would identify the exact version of the image that was used.`, - }, - "source_snapshot_id": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - Description: `The unique ID of the snapshot used to create this disk. This value -identifies the exact snapshot that was used to create this persistent -disk. For example, if you created the persistent disk from a snapshot -that was later deleted and recreated under the same name, the source -snapshot ID would identify the exact version of the snapshot that was -used.`, - }, - "users": { - Type: resource_compute_disk_schema.TypeList, - Computed: true, - Description: `Links to the users of the disk (attached instances) in form: -project/zones/zone/instances/instance`, - Elem: &resource_compute_disk_schema.Schema{ - Type: resource_compute_disk_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "project": { - Type: resource_compute_disk_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_disk_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeDiskCreate(d *resource_compute_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelFingerprintProp, err := expandComputeDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(labelFingerprintProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - descriptionProp, err := expandComputeDiskDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(labelsProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - nameProp, err := expandComputeDiskName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(nameProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(sizeGbProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - physicalBlockSizeBytesProp, err := expandComputeDiskPhysicalBlockSizeBytes(d.Get("physical_block_size_bytes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("physical_block_size_bytes"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(physicalBlockSizeBytesProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, physicalBlockSizeBytesProp)) { - obj["physicalBlockSizeBytes"] = physicalBlockSizeBytesProp - } - typeProp, err := expandComputeDiskType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(typeProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - sourceImageProp, err := expandComputeDiskImage(d.Get("image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("image"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(sourceImageProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, sourceImageProp)) { - obj["sourceImage"] = sourceImageProp - } - provisionedIopsProp, err := expandComputeDiskProvisionedIops(d.Get("provisioned_iops"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("provisioned_iops"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(provisionedIopsProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, provisionedIopsProp)) { - obj["provisionedIops"] = provisionedIopsProp - } - zoneProp, err := expandComputeDiskZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(zoneProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - sourceImageEncryptionKeyProp, err := expandComputeDiskSourceImageEncryptionKey(d.Get("source_image_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_image_encryption_key"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(sourceImageEncryptionKeyProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, sourceImageEncryptionKeyProp)) { - obj["sourceImageEncryptionKey"] = sourceImageEncryptionKeyProp - } - diskEncryptionKeyProp, err := expandComputeDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption_key"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, diskEncryptionKeyProp)) { - obj["diskEncryptionKey"] = diskEncryptionKeyProp - } - sourceSnapshotProp, err := expandComputeDiskSnapshot(d.Get("snapshot"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("snapshot"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(sourceSnapshotProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, sourceSnapshotProp)) { - obj["sourceSnapshot"] = sourceSnapshotProp - } - sourceSnapshotEncryptionKeyProp, err := expandComputeDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_snapshot_encryption_key"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(sourceSnapshotEncryptionKeyProp)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, sourceSnapshotEncryptionKeyProp)) { - obj["sourceSnapshotEncryptionKey"] = sourceSnapshotEncryptionKeyProp - } - - obj, err = resourceComputeDiskEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks") - if err != nil { - return err - } - - resource_compute_disk_log.Printf("[DEBUG] Creating new Disk: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_disk_fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_disk_schema.TimeoutCreate)) - if err != nil { - return resource_compute_disk_fmt.Errorf("Error creating Disk: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return resource_compute_disk_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Disk", userAgent, - d.Timeout(resource_compute_disk_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_disk_fmt.Errorf("Error waiting to create Disk: %s", err) - } - - resource_compute_disk_log.Printf("[DEBUG] Finished creating Disk %q: %#v", d.Id(), res) - - return resourceComputeDiskRead(d, meta) -} - -func resourceComputeDiskRead(d *resource_compute_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_disk_fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_disk_fmt.Sprintf("ComputeDisk %q", d.Id())) - } - - res, err = resourceComputeDiskDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_disk_log.Printf("[DEBUG] Removing ComputeDisk because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - - if err := d.Set("label_fingerprint", flattenComputeDiskLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeDiskCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("description", flattenComputeDiskDescription(res["description"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("last_attach_timestamp", flattenComputeDiskLastAttachTimestamp(res["lastAttachTimestamp"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("last_detach_timestamp", flattenComputeDiskLastDetachTimestamp(res["lastDetachTimestamp"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("labels", flattenComputeDiskLabels(res["labels"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("name", flattenComputeDiskName(res["name"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("size", flattenComputeDiskSize(res["sizeGb"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("users", flattenComputeDiskUsers(res["users"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("physical_block_size_bytes", flattenComputeDiskPhysicalBlockSizeBytes(res["physicalBlockSizeBytes"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("type", flattenComputeDiskType(res["type"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("image", flattenComputeDiskImage(res["sourceImage"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("provisioned_iops", flattenComputeDiskProvisionedIops(res["provisionedIops"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("zone", flattenComputeDiskZone(res["zone"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_image_encryption_key", flattenComputeDiskSourceImageEncryptionKey(res["sourceImageEncryptionKey"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_image_id", flattenComputeDiskSourceImageId(res["sourceImageId"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("disk_encryption_key", flattenComputeDiskDiskEncryptionKey(res["diskEncryptionKey"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("snapshot", flattenComputeDiskSnapshot(res["sourceSnapshot"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_snapshot_encryption_key", flattenComputeDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("source_snapshot_id", flattenComputeDiskSourceSnapshotId(res["sourceSnapshotId"], d, config)); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_disk_fmt.Errorf("Error reading Disk: %s", err) - } - - return nil -} - -func resourceComputeDiskUpdate(d *resource_compute_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_disk_fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("label_fingerprint") || d.HasChange("labels") { - obj := make(map[string]interface{}) - - labelFingerprintProp, err := expandComputeDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(v)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(v)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}/setLabels") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_disk_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_disk_fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) - } else { - resource_compute_disk_log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Disk", userAgent, - d.Timeout(resource_compute_disk_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("size") { - obj := make(map[string]interface{}) - - sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(resource_compute_disk_reflect.ValueOf(v)) && (ok || !resource_compute_disk_reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}/resize") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_disk_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_disk_fmt.Errorf("Error updating Disk %q: %s", d.Id(), err) - } else { - resource_compute_disk_log.Printf("[DEBUG] Finished updating Disk %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Disk", userAgent, - d.Timeout(resource_compute_disk_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeDiskRead(d, meta) -} - -func resourceComputeDiskDelete(d *resource_compute_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_disk_fmt.Errorf("Error fetching project for Disk: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - readRes, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_disk_fmt.Sprintf("ComputeDisk %q", d.Id())) - } - - if v, ok := readRes["users"].([]interface{}); ok { - type detachArgs struct{ project, zone, instance, deviceName string } - var detachCalls []detachArgs - - for _, instance := range convertStringArr(v) { - self := d.Get("self_link").(string) - instanceProject, instanceZone, instanceName, err := GetLocationalResourcePropertiesFromSelfLinkString(instance) - if err != nil { - return err - } - - i, err := config.NewComputeClient(userAgent).Instances.Get(instanceProject, instanceZone, instanceName).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_disk_googleapi.Error); ok && gerr.Code == 404 { - resource_compute_disk_log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance) - continue - } - return resource_compute_disk_fmt.Errorf("Error retrieving instance %s: %s", instance, err.Error()) - } - for _, disk := range i.Disks { - if compareSelfLinkOrResourceName("", disk.Source, self, nil) { - detachCalls = append(detachCalls, detachArgs{ - project: instanceProject, - zone: GetResourceNameFromSelfLink(i.Zone), - instance: i.Name, - deviceName: disk.DeviceName, - }) - } - } - } - - for _, call := range detachCalls { - op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() - if err != nil { - return resource_compute_disk_fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, - call.zone, call.instance, err.Error()) - } - err = computeOperationWaitTime(config, op, call.project, - resource_compute_disk_fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(resource_compute_disk_schema.TimeoutDelete)) - if err != nil { - if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { - resource_compute_disk_log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) - continue - } - return err - } - } - } - resource_compute_disk_log.Printf("[DEBUG] Deleting Disk %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_disk_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Disk") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Disk", userAgent, - d.Timeout(resource_compute_disk_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_disk_log.Printf("[DEBUG] Finished deleting Disk %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeDiskImport(d *resource_compute_disk_schema.ResourceData, meta interface{}) ([]*resource_compute_disk_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/disks/{{name}}") - if err != nil { - return nil, resource_compute_disk_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_disk_schema.ResourceData{d}, nil -} - -func flattenComputeDiskLabelFingerprint(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskCreationTimestamp(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDescription(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskLastAttachTimestamp(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskLastDetachTimestamp(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskLabels(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskName(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSize(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_disk_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeDiskUsers(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeDiskPhysicalBlockSizeBytes(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_disk_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeDiskType(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeDiskImage(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskProvisionedIops(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_disk_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeDiskZone(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeDiskSourceImageEncryptionKey(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeDiskSourceImageEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeDiskSourceImageEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_self_link"] = - flattenComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["kms_key_service_account"] = - flattenComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} - -func flattenComputeDiskSourceImageEncryptionKeyRawKey(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageEncryptionKeySha256(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceImageId(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKey(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeDiskDiskEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeDiskDiskEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_self_link"] = - flattenComputeDiskDiskEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["kms_key_service_account"] = - flattenComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} - -func flattenComputeDiskDiskEncryptionKeyRawKey(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKeySha256(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKeyKmsKeySelfLink(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSnapshot(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeDiskSourceSnapshotEncryptionKey(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["kms_key_self_link"] = - flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["sha256"] = - flattenComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_service_account"] = - flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} - -func flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeDiskSourceSnapshotId(v interface{}, d *resource_compute_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeDiskLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeDiskName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskPhysicalBlockSizeBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("diskTypes", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, resource_compute_disk_fmt.Errorf("Invalid value for type: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeDiskImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskProvisionedIops(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_disk_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeDiskSourceImageEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeDiskSourceImageEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeDiskSourceImageEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeySelfLink, err := expandComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedKmsKeyServiceAccount, err := expandComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeDiskSourceImageEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceImageEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceImageEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceImageEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeDiskDiskEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeDiskDiskEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeySelfLink, err := expandComputeDiskDiskEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedKmsKeyServiceAccount, err := expandComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeDiskDiskEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSnapshot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_disk_fmt.Errorf("Invalid value for snapshot: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeDiskSourceSnapshotEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedKmsKeySelfLink, err := expandComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedSha256, err := expandComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeyServiceAccount, err := expandComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_disk_reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeDiskSourceSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeDiskEncoder(d *resource_compute_disk_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - if v, ok := d.GetOk("type"); ok { - resource_compute_disk_log.Printf("[DEBUG] Loading disk type: %s", v.(string)) - diskType, err := readDiskType(config, d, v.(string)) - if err != nil { - return nil, resource_compute_disk_fmt.Errorf( - "Error loading disk type '%s': %s", - v.(string), err) - } - - obj["type"] = diskType.RelativeLink() - } - - if v, ok := d.GetOk("image"); ok { - resource_compute_disk_log.Printf("[DEBUG] Resolving image name: %s", v.(string)) - imageUrl, err := resolveImage(config, project, v.(string), userAgent) - if err != nil { - return nil, resource_compute_disk_fmt.Errorf( - "Error resolving image name '%s': %s", - v.(string), err) - } - - obj["sourceImage"] = imageUrl - resource_compute_disk_log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) - } - - return obj, nil -} - -func resourceComputeDiskDecoder(d *resource_compute_disk_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["diskEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_disk_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["diskEncryptionKey"] = transformed - } - - if v, ok := res["sourceImageEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_disk_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceImageEncryptionKey"] = transformed - } - - if v, ok := res["sourceSnapshotEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_disk_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceSnapshotEncryptionKey"] = transformed - } - - return res, nil -} - -func resourceComputeDiskResourcePolicyAttachment() *resource_compute_disk_resource_policy_attachment_schema.Resource { - return &resource_compute_disk_resource_policy_attachment_schema.Resource{ - Create: resourceComputeDiskResourcePolicyAttachmentCreate, - Read: resourceComputeDiskResourcePolicyAttachmentRead, - Delete: resourceComputeDiskResourcePolicyAttachmentDelete, - - Importer: &resource_compute_disk_resource_policy_attachment_schema.ResourceImporter{ - State: resourceComputeDiskResourcePolicyAttachmentImport, - }, - - Timeouts: &resource_compute_disk_resource_policy_attachment_schema.ResourceTimeout{ - Create: resource_compute_disk_resource_policy_attachment_schema.DefaultTimeout(4 * resource_compute_disk_resource_policy_attachment_time.Minute), - Delete: resource_compute_disk_resource_policy_attachment_schema.DefaultTimeout(4 * resource_compute_disk_resource_policy_attachment_time.Minute), - }, - - Schema: map[string]*resource_compute_disk_resource_policy_attachment_schema.Schema{ - "disk": { - Type: resource_compute_disk_resource_policy_attachment_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the disk in which the resource policies are attached to.`, - }, - "name": { - Type: resource_compute_disk_resource_policy_attachment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource policy to be attached to the disk for scheduling snapshot -creation. Do not specify the self link.`, - }, - "zone": { - Type: resource_compute_disk_resource_policy_attachment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the disk resides.`, - }, - "project": { - Type: resource_compute_disk_resource_policy_attachment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeDiskResourcePolicyAttachmentCreate(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_disk_resource_policy_attachment_reflect.ValueOf(nameProp)) && (ok || !resource_compute_disk_resource_policy_attachment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceComputeDiskResourcePolicyAttachmentEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}/addResourcePolicies") - if err != nil { - return err - } - - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Creating new DiskResourcePolicyAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_disk_resource_policy_attachment_schema.TimeoutCreate)) - if err != nil { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error creating DiskResourcePolicyAttachment: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{disk}}/{{name}}") - if err != nil { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating DiskResourcePolicyAttachment", userAgent, - d.Timeout(resource_compute_disk_resource_policy_attachment_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error waiting to create DiskResourcePolicyAttachment: %s", err) - } - - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Finished creating DiskResourcePolicyAttachment %q: %#v", d.Id(), res) - - return resourceComputeDiskResourcePolicyAttachmentRead(d, meta) -} - -func resourceComputeDiskResourcePolicyAttachmentRead(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_disk_resource_policy_attachment_fmt.Sprintf("ComputeDiskResourcePolicyAttachment %q", d.Id())) - } - - res, err = flattenNestedComputeDiskResourcePolicyAttachment(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Removing ComputeDiskResourcePolicyAttachment because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeDiskResourcePolicyAttachmentDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Removing ComputeDiskResourcePolicyAttachment because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error reading DiskResourcePolicyAttachment: %s", err) - } - - if err := d.Set("name", flattenNestedComputeDiskResourcePolicyAttachmentName(res["name"], d, config)); err != nil { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error reading DiskResourcePolicyAttachment: %s", err) - } - - return nil -} - -func resourceComputeDiskResourcePolicyAttachmentDelete(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error fetching project for DiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/disks/{{disk}}/removeResourcePolicies") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = make(map[string]interface{}) - - zone, err := getZone(d, config) - if err != nil { - return err - } - if zone == "" { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - region := getRegionFromZone(zone) - if region == "" { - return resource_compute_disk_resource_policy_attachment_fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_disk_resource_policy_attachment_reflect.ValueOf(name)) && (ok || !resource_compute_disk_resource_policy_attachment_reflect.DeepEqual(v, name)) { - obj["resourcePolicies"] = []interface{}{resource_compute_disk_resource_policy_attachment_fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} - } - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Deleting DiskResourcePolicyAttachment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_disk_resource_policy_attachment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DiskResourcePolicyAttachment") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting DiskResourcePolicyAttachment", userAgent, - d.Timeout(resource_compute_disk_resource_policy_attachment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Finished deleting DiskResourcePolicyAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeDiskResourcePolicyAttachmentImport(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) ([]*resource_compute_disk_resource_policy_attachment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/disks/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{disk}}/{{name}}") - if err != nil { - return nil, resource_compute_disk_resource_policy_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_disk_resource_policy_attachment_schema.ResourceData{d}, nil -} - -func flattenNestedComputeDiskResourcePolicyAttachmentName(v interface{}, d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeDiskResourcePolicyAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeDiskResourcePolicyAttachmentEncoder(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - if zone == "" { - return nil, resource_compute_disk_resource_policy_attachment_fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - region := getRegionFromZone(zone) - if region == "" { - return nil, resource_compute_disk_resource_policy_attachment_fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - obj["resourcePolicies"] = []interface{}{resource_compute_disk_resource_policy_attachment_fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, obj["name"])} - delete(obj, "name") - return obj, nil -} - -func flattenNestedComputeDiskResourcePolicyAttachment(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["resourcePolicies"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_disk_resource_policy_attachment_fmt.Errorf("expected list or map for value resourcePolicies. Actual value: %v", v) - } - - _, item, err := resourceComputeDiskResourcePolicyAttachmentFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeDiskResourcePolicyAttachmentFindNestedObjectInList(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeDiskResourcePolicyAttachmentName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - - item := map[string]interface{}{ - "name": itemRaw, - } - - item, err := resourceComputeDiskResourcePolicyAttachmentDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemName := flattenNestedComputeDiskResourcePolicyAttachmentName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_disk_resource_policy_attachment_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_disk_resource_policy_attachment_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_disk_resource_policy_attachment_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_disk_resource_policy_attachment_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeDiskResourcePolicyAttachmentDecoder(d *resource_compute_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - res["name"] = GetResourceNameFromSelfLink(res["name"].(string)) - return res, nil -} - -func resourceComputeExternalVpnGateway() *resource_compute_external_vpn_gateway_schema.Resource { - return &resource_compute_external_vpn_gateway_schema.Resource{ - Create: resourceComputeExternalVpnGatewayCreate, - Read: resourceComputeExternalVpnGatewayRead, - Delete: resourceComputeExternalVpnGatewayDelete, - - Importer: &resource_compute_external_vpn_gateway_schema.ResourceImporter{ - State: resourceComputeExternalVpnGatewayImport, - }, - - Timeouts: &resource_compute_external_vpn_gateway_schema.ResourceTimeout{ - Create: resource_compute_external_vpn_gateway_schema.DefaultTimeout(4 * resource_compute_external_vpn_gateway_time.Minute), - Delete: resource_compute_external_vpn_gateway_schema.DefaultTimeout(4 * resource_compute_external_vpn_gateway_time.Minute), - }, - - Schema: map[string]*resource_compute_external_vpn_gateway_schema.Schema{ - "name": { - Type: resource_compute_external_vpn_gateway_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: resource_compute_external_vpn_gateway_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "interface": { - Type: resource_compute_external_vpn_gateway_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A list of interfaces on this external VPN gateway.`, - Elem: &resource_compute_external_vpn_gateway_schema.Resource{ - Schema: map[string]*resource_compute_external_vpn_gateway_schema.Schema{ - "id": { - Type: resource_compute_external_vpn_gateway_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The numeric ID for this interface. Allowed values are based on the redundancy type -of this external VPN gateway -* '0 - SINGLE_IP_INTERNALLY_REDUNDANT' -* '0, 1 - TWO_IPS_REDUNDANCY' -* '0, 1, 2, 3 - FOUR_IPS_REDUNDANCY'`, - }, - "ip_address": { - Type: resource_compute_external_vpn_gateway_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `IP address of the interface in the external VPN gateway. -Only IPv4 is supported. This IP address can be either from -your on-premise gateway or another Cloud provider's VPN gateway, -it cannot be an IP address from Google Compute Engine.`, - }, - }, - }, - }, - "redundancy_type": { - Type: resource_compute_external_vpn_gateway_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_external_vpn_gateway_validation.StringInSlice([]string{"FOUR_IPS_REDUNDANCY", "SINGLE_IP_INTERNALLY_REDUNDANT", "TWO_IPS_REDUNDANCY", ""}, false), - Description: `Indicates the redundancy type of this external VPN gateway Possible values: ["FOUR_IPS_REDUNDANCY", "SINGLE_IP_INTERNALLY_REDUNDANT", "TWO_IPS_REDUNDANCY"]`, - }, - "project": { - Type: resource_compute_external_vpn_gateway_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_external_vpn_gateway_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeExternalVpnGatewayCreate(d *resource_compute_external_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeExternalVpnGatewayDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_external_vpn_gateway_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_external_vpn_gateway_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeExternalVpnGatewayName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_external_vpn_gateway_reflect.ValueOf(nameProp)) && (ok || !resource_compute_external_vpn_gateway_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - redundancyTypeProp, err := expandComputeExternalVpnGatewayRedundancyType(d.Get("redundancy_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redundancy_type"); !isEmptyValue(resource_compute_external_vpn_gateway_reflect.ValueOf(redundancyTypeProp)) && (ok || !resource_compute_external_vpn_gateway_reflect.DeepEqual(v, redundancyTypeProp)) { - obj["redundancyType"] = redundancyTypeProp - } - interfacesProp, err := expandComputeExternalVpnGatewayInterface(d.Get("interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("interface"); !isEmptyValue(resource_compute_external_vpn_gateway_reflect.ValueOf(interfacesProp)) && (ok || !resource_compute_external_vpn_gateway_reflect.DeepEqual(v, interfacesProp)) { - obj["interfaces"] = interfacesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways") - if err != nil { - return err - } - - resource_compute_external_vpn_gateway_log.Printf("[DEBUG] Creating new ExternalVpnGateway: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_external_vpn_gateway_schema.TimeoutCreate)) - if err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error creating ExternalVpnGateway: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating ExternalVpnGateway", userAgent, - d.Timeout(resource_compute_external_vpn_gateway_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_external_vpn_gateway_fmt.Errorf("Error waiting to create ExternalVpnGateway: %s", err) - } - - resource_compute_external_vpn_gateway_log.Printf("[DEBUG] Finished creating ExternalVpnGateway %q: %#v", d.Id(), res) - - return resourceComputeExternalVpnGatewayRead(d, meta) -} - -func resourceComputeExternalVpnGatewayRead(d *resource_compute_external_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_external_vpn_gateway_fmt.Sprintf("ComputeExternalVpnGateway %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - - if err := d.Set("description", flattenComputeExternalVpnGatewayDescription(res["description"], d, config)); err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("name", flattenComputeExternalVpnGatewayName(res["name"], d, config)); err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("redundancy_type", flattenComputeExternalVpnGatewayRedundancyType(res["redundancyType"], d, config)); err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("interface", flattenComputeExternalVpnGatewayInterface(res["interfaces"], d, config)); err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error reading ExternalVpnGateway: %s", err) - } - - return nil -} - -func resourceComputeExternalVpnGatewayDelete(d *resource_compute_external_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_external_vpn_gateway_fmt.Errorf("Error fetching project for ExternalVpnGateway: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_external_vpn_gateway_log.Printf("[DEBUG] Deleting ExternalVpnGateway %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_external_vpn_gateway_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ExternalVpnGateway") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting ExternalVpnGateway", userAgent, - d.Timeout(resource_compute_external_vpn_gateway_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_external_vpn_gateway_log.Printf("[DEBUG] Finished deleting ExternalVpnGateway %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeExternalVpnGatewayImport(d *resource_compute_external_vpn_gateway_schema.ResourceData, meta interface{}) ([]*resource_compute_external_vpn_gateway_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/externalVpnGateways/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/externalVpnGateways/{{name}}") - if err != nil { - return nil, resource_compute_external_vpn_gateway_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_external_vpn_gateway_schema.ResourceData{d}, nil -} - -func flattenComputeExternalVpnGatewayDescription(v interface{}, d *resource_compute_external_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeExternalVpnGatewayName(v interface{}, d *resource_compute_external_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeExternalVpnGatewayRedundancyType(v interface{}, d *resource_compute_external_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeExternalVpnGatewayInterface(v interface{}, d *resource_compute_external_vpn_gateway_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenComputeExternalVpnGatewayInterfaceId(original["id"], d, config), - "ip_address": flattenComputeExternalVpnGatewayInterfaceIpAddress(original["ipAddress"], d, config), - }) - } - return transformed -} - -func flattenComputeExternalVpnGatewayInterfaceId(v interface{}, d *resource_compute_external_vpn_gateway_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_external_vpn_gateway_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d *resource_compute_external_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeExternalVpnGatewayDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayRedundancyType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandComputeExternalVpnGatewayInterfaceId(original["id"], d, config) - if err != nil { - return nil, err - } else { - transformed["id"] = transformedId - } - - transformedIpAddress, err := expandComputeExternalVpnGatewayInterfaceIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_external_vpn_gateway_reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeExternalVpnGatewayInterfaceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeExternalVpnGatewayInterfaceIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeFirewallRuleHash(v interface{}) int { - var buf resource_compute_firewall_bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(resource_compute_firewall_fmt.Sprintf("%s-", resource_compute_firewall_strings.ToLower(m["protocol"].(string)))) - - if v, ok := m["ports"]; ok && v != nil { - s := convertStringArr(v.([]interface{})) - resource_compute_firewall_sort.Strings(s) - - for _, v := range s { - buf.WriteString(resource_compute_firewall_fmt.Sprintf("%s-", v)) - } - } - - return hashcode(buf.String()) -} - -func compareCaseInsensitive(k, old, new string, d *resource_compute_firewall_schema.ResourceData) bool { - return resource_compute_firewall_strings.ToLower(old) == resource_compute_firewall_strings.ToLower(new) -} - -func diffSuppressEnableLogging(k, old, new string, d *resource_compute_firewall_schema.ResourceData) bool { - if k == "log_config.#" { - if new == "0" && d.Get("enable_logging").(bool) { - return true - } - } - - return false -} - -func resourceComputeFirewallEnableLoggingCustomizeDiff(_ resource_compute_firewall_context.Context, diff *resource_compute_firewall_schema.ResourceDiff, v interface{}) error { - enableLogging, enableExists := diff.GetOkExists("enable_logging") - if !enableExists { - return nil - } - - logConfigExists := diff.Get("log_config.#").(int) != 0 - if logConfigExists && enableLogging == false { - return resource_compute_firewall_fmt.Errorf("log_config cannot be defined when enable_logging is false") - } - - return nil -} - -func resourceComputeFirewallSourceFieldsCustomizeDiff(_ resource_compute_firewall_context.Context, diff *resource_compute_firewall_schema.ResourceDiff, v interface{}) error { - direction := diff.Get("direction").(string) - - if direction != "EGRESS" { - _, tagsOk := diff.GetOk("source_tags") - _, rangesOk := diff.GetOk("source_ranges") - _, sasOk := diff.GetOk("source_service_accounts") - - _, tagsExist := diff.GetOkExists("source_tags") - - _, sasExist := diff.GetOkExists("source_service_accounts") - - if !tagsOk && !rangesOk && !sasOk && !tagsExist && !sasExist { - return resource_compute_firewall_fmt.Errorf("one of source_tags, source_ranges, or source_service_accounts must be defined") - } - } - - return nil -} - -func diffSuppressSourceRanges(k, old, new string, d *resource_compute_firewall_schema.ResourceData) bool { - if k == "source_ranges.#" { - if old == "1" && new == "0" { - - return true - } - - return false - } - kLength := "source_ranges.#" - oldLength, newLength := d.GetChange(kLength) - oldInt, ok := oldLength.(int) - - if !ok { - return false - } - - newInt, ok := newLength.(int) - if !ok { - return false - } - - if oldInt == 1 && newInt == 1 { - if old == "0.0.0.0/0" && new == "" { - return true - } - } - - return false -} - -func resourceComputeFirewall() *resource_compute_firewall_schema.Resource { - return &resource_compute_firewall_schema.Resource{ - Create: resourceComputeFirewallCreate, - Read: resourceComputeFirewallRead, - Update: resourceComputeFirewallUpdate, - Delete: resourceComputeFirewallDelete, - - Importer: &resource_compute_firewall_schema.ResourceImporter{ - State: resourceComputeFirewallImport, - }, - - Timeouts: &resource_compute_firewall_schema.ResourceTimeout{ - Create: resource_compute_firewall_schema.DefaultTimeout(4 * resource_compute_firewall_time.Minute), - Update: resource_compute_firewall_schema.DefaultTimeout(4 * resource_compute_firewall_time.Minute), - Delete: resource_compute_firewall_schema.DefaultTimeout(4 * resource_compute_firewall_time.Minute), - }, - - SchemaVersion: 1, - MigrateState: resourceComputeFirewallMigrateState, - CustomizeDiff: resource_compute_firewall_customdiff.All( - resourceComputeFirewallEnableLoggingCustomizeDiff, - resourceComputeFirewallSourceFieldsCustomizeDiff, - ), - - Schema: map[string]*resource_compute_firewall_schema.Schema{ - "name": { - Type: resource_compute_firewall_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: resource_compute_firewall_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the network to attach this firewall to.`, - }, - "allow": { - Type: resource_compute_firewall_schema.TypeSet, - Optional: true, - Description: `The list of ALLOW rules specified by this firewall. Each rule -specifies a protocol and port-range tuple that describes a permitted -connection.`, - Elem: computeFirewallAllowSchema(), - Set: resourceComputeFirewallRuleHash, - ExactlyOneOf: []string{"allow", "deny"}, - }, - "deny": { - Type: resource_compute_firewall_schema.TypeSet, - Optional: true, - Description: `The list of DENY rules specified by this firewall. Each rule specifies -a protocol and port-range tuple that describes a denied connection.`, - Elem: computeFirewallDenySchema(), - Set: resourceComputeFirewallRuleHash, - ExactlyOneOf: []string{"allow", "deny"}, - }, - "description": { - Type: resource_compute_firewall_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "destination_ranges": { - Type: resource_compute_firewall_schema.TypeSet, - Computed: true, - Optional: true, - Description: `If destination ranges are specified, the firewall will apply only to -traffic that has destination IP address in these ranges. These ranges -must be expressed in CIDR format. Only IPv4 is supported.`, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - Set: resource_compute_firewall_schema.HashString, - ConflictsWith: []string{"source_ranges", "source_tags"}, - }, - "direction": { - Type: resource_compute_firewall_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_firewall_validation.StringInSlice([]string{"INGRESS", "EGRESS", ""}, false), - Description: `Direction of traffic to which this firewall applies; default is -INGRESS. Note: For INGRESS traffic, it is NOT supported to specify -destinationRanges; For EGRESS traffic, it is NOT supported to specify -'source_ranges' OR 'source_tags'. For INGRESS traffic, one of 'source_ranges', -'source_tags' or 'source_service_accounts' is required. Possible values: ["INGRESS", "EGRESS"]`, - }, - "disabled": { - Type: resource_compute_firewall_schema.TypeBool, - Optional: true, - Description: `Denotes whether the firewall rule is disabled, i.e not applied to the -network it is associated with. When set to true, the firewall rule is -not enforced and the network behaves as if it did not exist. If this -is unspecified, the firewall rule will be enabled.`, - }, - "log_config": { - Type: resource_compute_firewall_schema.TypeList, - Optional: true, - DiffSuppressFunc: diffSuppressEnableLogging, - Description: `This field denotes the logging options for a particular firewall rule. -If defined, logging is enabled, and logs will be exported to Cloud Logging.`, - MaxItems: 1, - Elem: &resource_compute_firewall_schema.Resource{ - Schema: map[string]*resource_compute_firewall_schema.Schema{ - "metadata": { - Type: resource_compute_firewall_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_firewall_validation.StringInSlice([]string{"EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA"}, false), - Description: `This field denotes whether to include or exclude metadata for firewall logs. Possible values: ["EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA"]`, - }, - }, - }, - }, - "priority": { - Type: resource_compute_firewall_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_firewall_validation.IntBetween(0, 65535), - Description: `Priority for this rule. This is an integer between 0 and 65535, both -inclusive. When not specified, the value assumed is 1000. Relative -priorities determine precedence of conflicting rules. Lower value of -priority implies higher precedence (eg, a rule with priority 0 has -higher precedence than a rule with priority 1). DENY rules take -precedence over ALLOW rules having equal priority.`, - Default: 1000, - }, - "source_ranges": { - Type: resource_compute_firewall_schema.TypeSet, - Optional: true, - DiffSuppressFunc: diffSuppressSourceRanges, - Description: `If source ranges are specified, the firewall will apply only to -traffic that has source IP address in these ranges. These ranges must -be expressed in CIDR format. One or both of sourceRanges and -sourceTags may be set. If both properties are set, the firewall will -apply to traffic that has source IP address within sourceRanges OR the -source IP that belongs to a tag listed in the sourceTags property. The -connection does not need to match both properties for the firewall to -apply. Only IPv4 is supported.`, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - Set: resource_compute_firewall_schema.HashString, - ConflictsWith: []string{"destination_ranges"}, - }, - "source_service_accounts": { - Type: resource_compute_firewall_schema.TypeSet, - Optional: true, - Description: `If source service accounts are specified, the firewall will apply only -to traffic originating from an instance with a service account in this -list. Source service accounts cannot be used to control traffic to an -instance's external IP address because service accounts are associated -with an instance, not an IP address. sourceRanges can be set at the -same time as sourceServiceAccounts. If both are set, the firewall will -apply to traffic that has source IP address within sourceRanges OR the -source IP belongs to an instance with service account listed in -sourceServiceAccount. The connection does not need to match both -properties for the firewall to apply. sourceServiceAccounts cannot be -used at the same time as sourceTags or targetTags.`, - MaxItems: 10, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - Set: resource_compute_firewall_schema.HashString, - ConflictsWith: []string{"source_tags", "target_tags"}, - }, - "source_tags": { - Type: resource_compute_firewall_schema.TypeSet, - Optional: true, - Description: `If source tags are specified, the firewall will apply only to traffic -with source IP that belongs to a tag listed in source tags. Source -tags cannot be used to control traffic to an instance's external IP -address. Because tags are associated with an instance, not an IP -address. One or both of sourceRanges and sourceTags may be set. If -both properties are set, the firewall will apply to traffic that has -source IP address within sourceRanges OR the source IP that belongs to -a tag listed in the sourceTags property. The connection does not need -to match both properties for the firewall to apply.`, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - Set: resource_compute_firewall_schema.HashString, - ConflictsWith: []string{"destination_ranges", "source_service_accounts", "target_service_accounts"}, - }, - "target_service_accounts": { - Type: resource_compute_firewall_schema.TypeSet, - Optional: true, - Description: `A list of service accounts indicating sets of instances located in the -network that may make network connections as specified in allowed[]. -targetServiceAccounts cannot be used at the same time as targetTags or -sourceTags. If neither targetServiceAccounts nor targetTags are -specified, the firewall rule applies to all instances on the specified -network.`, - MaxItems: 10, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - Set: resource_compute_firewall_schema.HashString, - ConflictsWith: []string{"source_tags", "target_tags"}, - }, - "target_tags": { - Type: resource_compute_firewall_schema.TypeSet, - Optional: true, - Description: `A list of instance tags indicating sets of instances located in the -network that may make network connections as specified in allowed[]. -If no targetTags are specified, the firewall rule applies to all -instances on the specified network.`, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - Set: resource_compute_firewall_schema.HashString, - ConflictsWith: []string{"source_service_accounts", "target_service_accounts"}, - }, - "creation_timestamp": { - Type: resource_compute_firewall_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "enable_logging": { - Type: resource_compute_firewall_schema.TypeBool, - Optional: true, - Computed: true, - Deprecated: "Deprecated in favor of log_config", - Description: "This field denotes whether to enable logging for a particular firewall rule. If logging is enabled, logs will be exported to Stackdriver.", - }, - "project": { - Type: resource_compute_firewall_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_firewall_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeFirewallAllowSchema() *resource_compute_firewall_schema.Resource { - return &resource_compute_firewall_schema.Resource{ - Schema: map[string]*resource_compute_firewall_schema.Schema{ - "protocol": { - Type: resource_compute_firewall_schema.TypeString, - Required: true, - DiffSuppressFunc: compareCaseInsensitive, - Description: `The IP protocol to which this rule applies. The protocol type is -required when creating a firewall rule. This value can either be -one of the following well known protocol strings (tcp, udp, -icmp, esp, ah, sctp, ipip, all), or the IP protocol number.`, - }, - "ports": { - Type: resource_compute_firewall_schema.TypeList, - Optional: true, - Description: `An optional list of ports to which this rule applies. This field -is only applicable for UDP or TCP protocol. Each entry must be -either an integer or a range. If not specified, this rule -applies to connections through any port. - -Example inputs include: ["22"], ["80","443"], and -["12345-12349"].`, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - }, - }, - } -} - -func computeFirewallDenySchema() *resource_compute_firewall_schema.Resource { - return &resource_compute_firewall_schema.Resource{ - Schema: map[string]*resource_compute_firewall_schema.Schema{ - "protocol": { - Type: resource_compute_firewall_schema.TypeString, - Required: true, - DiffSuppressFunc: compareCaseInsensitive, - Description: `The IP protocol to which this rule applies. The protocol type is -required when creating a firewall rule. This value can either be -one of the following well known protocol strings (tcp, udp, -icmp, esp, ah, sctp, ipip, all), or the IP protocol number.`, - }, - "ports": { - Type: resource_compute_firewall_schema.TypeList, - Optional: true, - Description: `An optional list of ports to which this rule applies. This field -is only applicable for UDP or TCP protocol. Each entry must be -either an integer or a range. If not specified, this rule -applies to connections through any port. - -Example inputs include: ["22"], ["80","443"], and -["12345-12349"].`, - Elem: &resource_compute_firewall_schema.Schema{ - Type: resource_compute_firewall_schema.TypeString, - }, - }, - }, - } -} - -func resourceComputeFirewallCreate(d *resource_compute_firewall_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - allowedProp, err := expandComputeFirewallAllow(d.Get("allow"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(allowedProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, allowedProp)) { - obj["allowed"] = allowedProp - } - deniedProp, err := expandComputeFirewallDeny(d.Get("deny"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deny"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(deniedProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, deniedProp)) { - obj["denied"] = deniedProp - } - descriptionProp, err := expandComputeFirewallDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - destinationRangesProp, err := expandComputeFirewallDestinationRanges(d.Get("destination_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_ranges"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(destinationRangesProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, destinationRangesProp)) { - obj["destinationRanges"] = destinationRangesProp - } - directionProp, err := expandComputeFirewallDirection(d.Get("direction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("direction"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(directionProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, directionProp)) { - obj["direction"] = directionProp - } - disabledProp, err := expandComputeFirewallDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); ok || !resource_compute_firewall_reflect.DeepEqual(v, disabledProp) { - obj["disabled"] = disabledProp - } - logConfigProp, err := expandComputeFirewallLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !resource_compute_firewall_reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - nameProp, err := expandComputeFirewallName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(nameProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeFirewallNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(networkProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputeFirewallPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); ok || !resource_compute_firewall_reflect.DeepEqual(v, priorityProp) { - obj["priority"] = priorityProp - } - sourceRangesProp, err := expandComputeFirewallSourceRanges(d.Get("source_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_ranges"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(sourceRangesProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, sourceRangesProp)) { - obj["sourceRanges"] = sourceRangesProp - } - sourceServiceAccountsProp, err := expandComputeFirewallSourceServiceAccounts(d.Get("source_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_service_accounts"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(sourceServiceAccountsProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, sourceServiceAccountsProp)) { - obj["sourceServiceAccounts"] = sourceServiceAccountsProp - } - sourceTagsProp, err := expandComputeFirewallSourceTags(d.Get("source_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_tags"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(sourceTagsProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, sourceTagsProp)) { - obj["sourceTags"] = sourceTagsProp - } - targetServiceAccountsProp, err := expandComputeFirewallTargetServiceAccounts(d.Get("target_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_service_accounts"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(targetServiceAccountsProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, targetServiceAccountsProp)) { - obj["targetServiceAccounts"] = targetServiceAccountsProp - } - targetTagsProp, err := expandComputeFirewallTargetTags(d.Get("target_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_tags"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(targetTagsProp)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, targetTagsProp)) { - obj["targetTags"] = targetTagsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls") - if err != nil { - return err - } - - resource_compute_firewall_log.Printf("[DEBUG] Creating new Firewall: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_firewall_fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_firewall_schema.TimeoutCreate)) - if err != nil { - return resource_compute_firewall_fmt.Errorf("Error creating Firewall: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return resource_compute_firewall_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Firewall", userAgent, - d.Timeout(resource_compute_firewall_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_firewall_fmt.Errorf("Error waiting to create Firewall: %s", err) - } - - resource_compute_firewall_log.Printf("[DEBUG] Finished creating Firewall %q: %#v", d.Id(), res) - - return resourceComputeFirewallRead(d, meta) -} - -func resourceComputeFirewallRead(d *resource_compute_firewall_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_firewall_fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_firewall_fmt.Sprintf("ComputeFirewall %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - - if err := d.Set("allow", flattenComputeFirewallAllow(res["allowed"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeFirewallCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("deny", flattenComputeFirewallDeny(res["denied"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("description", flattenComputeFirewallDescription(res["description"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("destination_ranges", flattenComputeFirewallDestinationRanges(res["destinationRanges"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("direction", flattenComputeFirewallDirection(res["direction"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("disabled", flattenComputeFirewallDisabled(res["disabled"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("log_config", flattenComputeFirewallLogConfig(res["logConfig"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("name", flattenComputeFirewallName(res["name"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("network", flattenComputeFirewallNetwork(res["network"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("priority", flattenComputeFirewallPriority(res["priority"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("source_ranges", flattenComputeFirewallSourceRanges(res["sourceRanges"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("source_service_accounts", flattenComputeFirewallSourceServiceAccounts(res["sourceServiceAccounts"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("source_tags", flattenComputeFirewallSourceTags(res["sourceTags"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("target_service_accounts", flattenComputeFirewallTargetServiceAccounts(res["targetServiceAccounts"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("target_tags", flattenComputeFirewallTargetTags(res["targetTags"], d, config)); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_firewall_fmt.Errorf("Error reading Firewall: %s", err) - } - - return nil -} - -func resourceComputeFirewallUpdate(d *resource_compute_firewall_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_firewall_fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - allowedProp, err := expandComputeFirewallAllow(d.Get("allow"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, allowedProp)) { - obj["allowed"] = allowedProp - } - deniedProp, err := expandComputeFirewallDeny(d.Get("deny"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deny"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, deniedProp)) { - obj["denied"] = deniedProp - } - descriptionProp, err := expandComputeFirewallDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - destinationRangesProp, err := expandComputeFirewallDestinationRanges(d.Get("destination_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination_ranges"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, destinationRangesProp)) { - obj["destinationRanges"] = destinationRangesProp - } - disabledProp, err := expandComputeFirewallDisabled(d.Get("disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disabled"); ok || !resource_compute_firewall_reflect.DeepEqual(v, disabledProp) { - obj["disabled"] = disabledProp - } - logConfigProp, err := expandComputeFirewallLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !resource_compute_firewall_reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - networkProp, err := expandComputeFirewallNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputeFirewallPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); ok || !resource_compute_firewall_reflect.DeepEqual(v, priorityProp) { - obj["priority"] = priorityProp - } - sourceRangesProp, err := expandComputeFirewallSourceRanges(d.Get("source_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_ranges"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, sourceRangesProp)) { - obj["sourceRanges"] = sourceRangesProp - } - sourceServiceAccountsProp, err := expandComputeFirewallSourceServiceAccounts(d.Get("source_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_service_accounts"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, sourceServiceAccountsProp)) { - obj["sourceServiceAccounts"] = sourceServiceAccountsProp - } - sourceTagsProp, err := expandComputeFirewallSourceTags(d.Get("source_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_tags"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, sourceTagsProp)) { - obj["sourceTags"] = sourceTagsProp - } - targetServiceAccountsProp, err := expandComputeFirewallTargetServiceAccounts(d.Get("target_service_accounts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_service_accounts"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, targetServiceAccountsProp)) { - obj["targetServiceAccounts"] = targetServiceAccountsProp - } - targetTagsProp, err := expandComputeFirewallTargetTags(d.Get("target_tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_tags"); !isEmptyValue(resource_compute_firewall_reflect.ValueOf(v)) && (ok || !resource_compute_firewall_reflect.DeepEqual(v, targetTagsProp)) { - obj["targetTags"] = targetTagsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return err - } - - resource_compute_firewall_log.Printf("[DEBUG] Updating Firewall %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_firewall_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_firewall_fmt.Errorf("Error updating Firewall %q: %s", d.Id(), err) - } else { - resource_compute_firewall_log.Printf("[DEBUG] Finished updating Firewall %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Firewall", userAgent, - d.Timeout(resource_compute_firewall_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeFirewallRead(d, meta) -} - -func resourceComputeFirewallDelete(d *resource_compute_firewall_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_firewall_fmt.Errorf("Error fetching project for Firewall: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_firewall_log.Printf("[DEBUG] Deleting Firewall %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_firewall_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Firewall") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Firewall", userAgent, - d.Timeout(resource_compute_firewall_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_firewall_log.Printf("[DEBUG] Finished deleting Firewall %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeFirewallImport(d *resource_compute_firewall_schema.ResourceData, meta interface{}) ([]*resource_compute_firewall_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/firewalls/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/firewalls/{{name}}") - if err != nil { - return nil, resource_compute_firewall_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_firewall_schema.ResourceData{d}, nil -} - -func flattenComputeFirewallAllow(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_firewall_schema.NewSet(resourceComputeFirewallRuleHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "protocol": flattenComputeFirewallAllowProtocol(original["IPProtocol"], d, config), - "ports": flattenComputeFirewallAllowPorts(original["ports"], d, config), - }) - } - return transformed -} - -func flattenComputeFirewallAllowProtocol(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallAllowPorts(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallCreationTimestamp(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDeny(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_firewall_schema.NewSet(resourceComputeFirewallRuleHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "protocol": flattenComputeFirewallDenyProtocol(original["IPProtocol"], d, config), - "ports": flattenComputeFirewallDenyPorts(original["ports"], d, config), - }) - } - return transformed -} - -func flattenComputeFirewallDenyProtocol(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDenyPorts(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDescription(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDestinationRanges(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_firewall_schema.NewSet(resource_compute_firewall_schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallDirection(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallDisabled(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallLogConfig(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - - v, ok := original["enable"] - if ok && !v.(bool) { - return nil - } - - transformed := make(map[string]interface{}) - transformed["metadata"] = original["metadata"] - return []interface{}{transformed} -} - -func flattenComputeFirewallName(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeFirewallNetwork(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeFirewallPriority(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_firewall_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeFirewallSourceRanges(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_firewall_schema.NewSet(resource_compute_firewall_schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallSourceServiceAccounts(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_firewall_schema.NewSet(resource_compute_firewall_schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallSourceTags(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_firewall_schema.NewSet(resource_compute_firewall_schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallTargetServiceAccounts(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_firewall_schema.NewSet(resource_compute_firewall_schema.HashString, v.([]interface{})) -} - -func flattenComputeFirewallTargetTags(v interface{}, d *resource_compute_firewall_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_firewall_schema.NewSet(resource_compute_firewall_schema.HashString, v.([]interface{})) -} - -func expandComputeFirewallAllow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProtocol, err := expandComputeFirewallAllowProtocol(original["protocol"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_firewall_reflect.ValueOf(transformedProtocol); val.IsValid() && !isEmptyValue(val) { - transformed["IPProtocol"] = transformedProtocol - } - - transformedPorts, err := expandComputeFirewallAllowPorts(original["ports"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_firewall_reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["ports"] = transformedPorts - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeFirewallAllowProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallAllowPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDeny(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProtocol, err := expandComputeFirewallDenyProtocol(original["protocol"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_firewall_reflect.ValueOf(transformedProtocol); val.IsValid() && !isEmptyValue(val) { - transformed["IPProtocol"] = transformedProtocol - } - - transformedPorts, err := expandComputeFirewallDenyPorts(original["ports"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_firewall_reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { - transformed["ports"] = transformedPorts - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeFirewallDenyProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDenyPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDestinationRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - return v, nil -} - -func expandComputeFirewallDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - transformed := make(map[string]interface{}) - - if len(l) == 0 || l[0] == nil { - - transformed["enable"] = d.Get("enable_logging").(bool) - return transformed, nil - } - - raw := l[0] - original := raw.(map[string]interface{}) - - transformed["enable"] = true - transformed["metadata"] = original["metadata"] - - return transformed, nil -} - -func expandComputeFirewallName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_firewall_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeFirewallPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeFirewallSourceRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - return v, nil -} - -func expandComputeFirewallSourceServiceAccounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - return v, nil -} - -func expandComputeFirewallSourceTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - return v, nil -} - -func expandComputeFirewallTargetServiceAccounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - return v, nil -} - -func expandComputeFirewallTargetTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_firewall_schema.Set).List() - return v, nil -} - -func resourceComputeFirewallMigrateState( - v int, is *resource_compute_firewall_migrate_terraform.InstanceState, meta interface{}) (*resource_compute_firewall_migrate_terraform.InstanceState, error) { - if is.Empty() { - resource_compute_firewall_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - resource_compute_firewall_migrate_log.Println("[INFO] Found Compute Firewall State v0; migrating to v1") - is, err := migrateFirewallStateV0toV1(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, resource_compute_firewall_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateFirewallStateV0toV1(is *resource_compute_firewall_migrate_terraform.InstanceState) (*resource_compute_firewall_migrate_terraform.InstanceState, error) { - resource_compute_firewall_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - idx := 0 - portCount := 0 - newPorts := make(map[string]string) - keys := make([]string, len(is.Attributes)) - for k := range is.Attributes { - keys[idx] = k - idx++ - - } - resource_compute_firewall_migrate_sort.Strings(keys) - for _, k := range keys { - if !resource_compute_firewall_migrate_strings.HasPrefix(k, "allow.") { - continue - } - - if k == "allow.#" { - continue - } - - if resource_compute_firewall_migrate_strings.HasSuffix(k, ".ports.#") { - continue - } - - if resource_compute_firewall_migrate_strings.HasSuffix(k, ".protocol") { - continue - } - - kParts := resource_compute_firewall_migrate_strings.Split(k, ".") - - badFormat := false - if len(kParts) != 4 { - badFormat = true - } else if _, err := resource_compute_firewall_migrate_strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, resource_compute_firewall_migrate_fmt.Errorf( - "migration error: found port key in unexpected format: %s", k) - } - allowHash, _ := resource_compute_firewall_migrate_strconv.Atoi(kParts[1]) - newK := resource_compute_firewall_migrate_fmt.Sprintf("allow.%d.ports.%d", allowHash, portCount) - portCount++ - newPorts[newK] = is.Attributes[k] - delete(is.Attributes, k) - } - - for k, v := range newPorts { - is.Attributes[k] = v - } - - resource_compute_firewall_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func resourceComputeFirewallPolicy() *resource_compute_firewall_policy_schema.Resource { - return &resource_compute_firewall_policy_schema.Resource{ - Create: resourceComputeFirewallPolicyCreate, - Read: resourceComputeFirewallPolicyRead, - Update: resourceComputeFirewallPolicyUpdate, - Delete: resourceComputeFirewallPolicyDelete, - - Importer: &resource_compute_firewall_policy_schema.ResourceImporter{ - State: resourceComputeFirewallPolicyImport, - }, - - Timeouts: &resource_compute_firewall_policy_schema.ResourceTimeout{ - Create: resource_compute_firewall_policy_schema.DefaultTimeout(10 * resource_compute_firewall_policy_time.Minute), - Update: resource_compute_firewall_policy_schema.DefaultTimeout(10 * resource_compute_firewall_policy_time.Minute), - Delete: resource_compute_firewall_policy_schema.DefaultTimeout(10 * resource_compute_firewall_policy_time.Minute), - }, - - Schema: map[string]*resource_compute_firewall_policy_schema.Schema{ - "parent": { - Type: resource_compute_firewall_policy_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The parent of the firewall policy.", - }, - - "short_name": { - Type: resource_compute_firewall_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: "User-provided name of the Organization firewall policy. The name should be unique in the organization in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - }, - - "description": { - Type: resource_compute_firewall_policy_schema.TypeString, - Optional: true, - Description: "An optional description of this resource. Provide this property when you create the resource.", - }, - - "creation_timestamp": { - Type: resource_compute_firewall_policy_schema.TypeString, - Computed: true, - Description: "Creation timestamp in RFC3339 text format.", - }, - - "fingerprint": { - Type: resource_compute_firewall_policy_schema.TypeString, - Computed: true, - Description: "Fingerprint of the resource. This field is used internally during updates of this resource.", - }, - - "firewall_policy_id": { - Type: resource_compute_firewall_policy_schema.TypeString, - Computed: true, - Description: "The unique identifier for the resource. This identifier is defined by the server.", - }, - - "name": { - Type: resource_compute_firewall_policy_schema.TypeString, - Computed: true, - Description: "Name of the resource. It is a numeric ID allocated by GCP which uniquely identifies the Firewall Policy.", - }, - - "rule_tuple_count": { - Type: resource_compute_firewall_policy_schema.TypeInt, - Computed: true, - Description: "Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples.", - }, - - "self_link": { - Type: resource_compute_firewall_policy_schema.TypeString, - Computed: true, - Description: "Server-defined URL for the resource.", - }, - - "self_link_with_id": { - Type: resource_compute_firewall_policy_schema.TypeString, - Computed: true, - Description: "Server-defined URL for this resource with the resource id.", - }, - }, - } -} - -func resourceComputeFirewallPolicyCreate(d *resource_compute_firewall_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_computecompute.FirewallPolicy{ - Parent: resource_compute_firewall_policy_dcldcl.String(d.Get("parent").(string)), - ShortName: resource_compute_firewall_policy_dcldcl.String(d.Get("short_name").(string)), - Description: resource_compute_firewall_policy_dcldcl.String(d.Get("description").(string)), - } - - id, err := replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") - if err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_schema.TimeoutCreate)) - res, err := client.ApplyFirewallPolicy(resource_compute_firewall_policy_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_compute_firewall_policy_dcldcl.DiffAfterApplyError); ok { - resource_compute_firewall_policy_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_firewall_policy_fmt.Errorf("Error creating FirewallPolicy: %s", err) - } - - resource_compute_firewall_policy_log.Printf("[DEBUG] Finished creating FirewallPolicy %q: %#v", d.Id(), res) - - if err = d.Set("name", res.Name); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting name in state: %s", err) - } - - id, err = replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") - if err != nil { - return resource_compute_firewall_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return resourceComputeFirewallPolicyRead(d, meta) -} - -func resourceComputeFirewallPolicyRead(d *resource_compute_firewall_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_computecompute.FirewallPolicy{ - Parent: resource_compute_firewall_policy_dcldcl.String(d.Get("parent").(string)), - ShortName: resource_compute_firewall_policy_dcldcl.String(d.Get("short_name").(string)), - Description: resource_compute_firewall_policy_dcldcl.String(d.Get("description").(string)), - Name: resource_compute_firewall_policy_dcldcl.StringOrNil(d.Get("name").(string)), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_schema.TimeoutRead)) - res, err := client.GetFirewallPolicy(resource_compute_firewall_policy_context.Background(), obj) - if err != nil { - resourceName := resource_compute_firewall_policy_fmt.Sprintf("ComputeFirewallPolicy %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("parent", res.Parent); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting parent in state: %s", err) - } - if err = d.Set("short_name", res.ShortName); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting short_name in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("creation_timestamp", res.CreationTimestamp); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting creation_timestamp in state: %s", err) - } - if err = d.Set("fingerprint", res.Fingerprint); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting fingerprint in state: %s", err) - } - if err = d.Set("firewall_policy_id", res.Id); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting firewall_policy_id in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting rule_tuple_count in state: %s", err) - } - if err = d.Set("self_link", res.SelfLink); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting self_link in state: %s", err) - } - if err = d.Set("self_link_with_id", res.SelfLinkWithId); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("error setting self_link_with_id in state: %s", err) - } - - return nil -} - -func resourceComputeFirewallPolicyUpdate(d *resource_compute_firewall_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_computecompute.FirewallPolicy{ - Parent: resource_compute_firewall_policy_dcldcl.String(d.Get("parent").(string)), - ShortName: resource_compute_firewall_policy_dcldcl.String(d.Get("short_name").(string)), - Description: resource_compute_firewall_policy_dcldcl.String(d.Get("description").(string)), - Name: resource_compute_firewall_policy_dcldcl.StringOrNil(d.Get("name").(string)), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_schema.TimeoutUpdate)) - res, err := client.ApplyFirewallPolicy(resource_compute_firewall_policy_context.Background(), obj, directive...) - - if _, ok := err.(resource_compute_firewall_policy_dcldcl.DiffAfterApplyError); ok { - resource_compute_firewall_policy_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_firewall_policy_fmt.Errorf("Error updating FirewallPolicy: %s", err) - } - - resource_compute_firewall_policy_log.Printf("[DEBUG] Finished creating FirewallPolicy %q: %#v", d.Id(), res) - - return resourceComputeFirewallPolicyRead(d, meta) -} - -func resourceComputeFirewallPolicyDelete(d *resource_compute_firewall_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_computecompute.FirewallPolicy{ - Parent: resource_compute_firewall_policy_dcldcl.String(d.Get("parent").(string)), - ShortName: resource_compute_firewall_policy_dcldcl.String(d.Get("short_name").(string)), - Description: resource_compute_firewall_policy_dcldcl.String(d.Get("description").(string)), - Name: resource_compute_firewall_policy_dcldcl.StringOrNil(d.Get("name").(string)), - } - - resource_compute_firewall_policy_log.Printf("[DEBUG] Deleting FirewallPolicy %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_schema.TimeoutDelete)) - if err := client.DeleteFirewallPolicy(resource_compute_firewall_policy_context.Background(), obj); err != nil { - return resource_compute_firewall_policy_fmt.Errorf("Error deleting FirewallPolicy: %s", err) - } - - resource_compute_firewall_policy_log.Printf("[DEBUG] Finished deleting FirewallPolicy %q", d.Id()) - return nil -} - -func resourceComputeFirewallPolicyImport(d *resource_compute_firewall_policy_schema.ResourceData, meta interface{}) ([]*resource_compute_firewall_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/global/firewallPolicies/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "locations/global/firewallPolicies/{{name}}") - if err != nil { - return nil, resource_compute_firewall_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_firewall_policy_schema.ResourceData{d}, nil -} - -func resourceComputeFirewallPolicyAssociation() *resource_compute_firewall_policy_association_schema.Resource { - return &resource_compute_firewall_policy_association_schema.Resource{ - Create: resourceComputeFirewallPolicyAssociationCreate, - Read: resourceComputeFirewallPolicyAssociationRead, - Delete: resourceComputeFirewallPolicyAssociationDelete, - - Importer: &resource_compute_firewall_policy_association_schema.ResourceImporter{ - State: resourceComputeFirewallPolicyAssociationImport, - }, - - Timeouts: &resource_compute_firewall_policy_association_schema.ResourceTimeout{ - Create: resource_compute_firewall_policy_association_schema.DefaultTimeout(10 * resource_compute_firewall_policy_association_time.Minute), - Delete: resource_compute_firewall_policy_association_schema.DefaultTimeout(10 * resource_compute_firewall_policy_association_time.Minute), - }, - - Schema: map[string]*resource_compute_firewall_policy_association_schema.Schema{ - "attachment_target": { - Type: resource_compute_firewall_policy_association_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The target that the firewall policy is attached to.", - }, - - "firewall_policy": { - Type: resource_compute_firewall_policy_association_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The firewall policy ID of the association.", - }, - - "name": { - Type: resource_compute_firewall_policy_association_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The name for an association.", - }, - - "short_name": { - Type: resource_compute_firewall_policy_association_schema.TypeString, - Computed: true, - Description: "The short name of the firewall policy of the association.", - }, - }, - } -} - -func resourceComputeFirewallPolicyAssociationCreate(d *resource_compute_firewall_policy_association_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_association_computecompute.FirewallPolicyAssociation{ - AttachmentTarget: resource_compute_firewall_policy_association_dcldcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: resource_compute_firewall_policy_association_dcldcl.String(d.Get("firewall_policy").(string)), - Name: resource_compute_firewall_policy_association_dcldcl.String(d.Get("name").(string)), - } - - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") - if err != nil { - return resource_compute_firewall_policy_association_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_association_schema.TimeoutCreate)) - res, err := client.ApplyFirewallPolicyAssociation(resource_compute_firewall_policy_association_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_compute_firewall_policy_association_dcldcl.DiffAfterApplyError); ok { - resource_compute_firewall_policy_association_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_firewall_policy_association_fmt.Errorf("Error creating FirewallPolicyAssociation: %s", err) - } - - resource_compute_firewall_policy_association_log.Printf("[DEBUG] Finished creating FirewallPolicyAssociation %q: %#v", d.Id(), res) - - return resourceComputeFirewallPolicyAssociationRead(d, meta) -} - -func resourceComputeFirewallPolicyAssociationRead(d *resource_compute_firewall_policy_association_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_association_computecompute.FirewallPolicyAssociation{ - AttachmentTarget: resource_compute_firewall_policy_association_dcldcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: resource_compute_firewall_policy_association_dcldcl.String(d.Get("firewall_policy").(string)), - Name: resource_compute_firewall_policy_association_dcldcl.String(d.Get("name").(string)), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_association_schema.TimeoutRead)) - res, err := client.GetFirewallPolicyAssociation(resource_compute_firewall_policy_association_context.Background(), obj) - if err != nil { - resourceName := resource_compute_firewall_policy_association_fmt.Sprintf("ComputeFirewallPolicyAssociation %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("attachment_target", res.AttachmentTarget); err != nil { - return resource_compute_firewall_policy_association_fmt.Errorf("error setting attachment_target in state: %s", err) - } - if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { - return resource_compute_firewall_policy_association_fmt.Errorf("error setting firewall_policy in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return resource_compute_firewall_policy_association_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("short_name", res.ShortName); err != nil { - return resource_compute_firewall_policy_association_fmt.Errorf("error setting short_name in state: %s", err) - } - - return nil -} - -func resourceComputeFirewallPolicyAssociationDelete(d *resource_compute_firewall_policy_association_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_association_computecompute.FirewallPolicyAssociation{ - AttachmentTarget: resource_compute_firewall_policy_association_dcldcl.String(d.Get("attachment_target").(string)), - FirewallPolicy: resource_compute_firewall_policy_association_dcldcl.String(d.Get("firewall_policy").(string)), - Name: resource_compute_firewall_policy_association_dcldcl.String(d.Get("name").(string)), - } - - resource_compute_firewall_policy_association_log.Printf("[DEBUG] Deleting FirewallPolicyAssociation %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_association_schema.TimeoutDelete)) - if err := client.DeleteFirewallPolicyAssociation(resource_compute_firewall_policy_association_context.Background(), obj); err != nil { - return resource_compute_firewall_policy_association_fmt.Errorf("Error deleting FirewallPolicyAssociation: %s", err) - } - - resource_compute_firewall_policy_association_log.Printf("[DEBUG] Finished deleting FirewallPolicyAssociation %q", d.Id()) - return nil -} - -func resourceComputeFirewallPolicyAssociationImport(d *resource_compute_firewall_policy_association_schema.ResourceData, meta interface{}) ([]*resource_compute_firewall_policy_association_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/global/firewallPolicies/(?P[^/]+)/associations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/associations/{{name}}") - if err != nil { - return nil, resource_compute_firewall_policy_association_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_firewall_policy_association_schema.ResourceData{d}, nil -} - -func resourceComputeFirewallPolicyRule() *resource_compute_firewall_policy_rule_schema.Resource { - return &resource_compute_firewall_policy_rule_schema.Resource{ - Create: resourceComputeFirewallPolicyRuleCreate, - Read: resourceComputeFirewallPolicyRuleRead, - Update: resourceComputeFirewallPolicyRuleUpdate, - Delete: resourceComputeFirewallPolicyRuleDelete, - - Importer: &resource_compute_firewall_policy_rule_schema.ResourceImporter{ - State: resourceComputeFirewallPolicyRuleImport, - }, - - Timeouts: &resource_compute_firewall_policy_rule_schema.ResourceTimeout{ - Create: resource_compute_firewall_policy_rule_schema.DefaultTimeout(10 * resource_compute_firewall_policy_rule_time.Minute), - Update: resource_compute_firewall_policy_rule_schema.DefaultTimeout(10 * resource_compute_firewall_policy_rule_time.Minute), - Delete: resource_compute_firewall_policy_rule_schema.DefaultTimeout(10 * resource_compute_firewall_policy_rule_time.Minute), - }, - - Schema: map[string]*resource_compute_firewall_policy_rule_schema.Schema{ - "action": { - Type: resource_compute_firewall_policy_rule_schema.TypeString, - Required: true, - Description: "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", - }, - - "direction": { - Type: resource_compute_firewall_policy_rule_schema.TypeString, - Required: true, - Description: "The direction in which this rule applies. Possible values: INGRESS, EGRESS", - }, - - "firewall_policy": { - Type: resource_compute_firewall_policy_rule_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The firewall policy of the resource.", - }, - - "match": { - Type: resource_compute_firewall_policy_rule_schema.TypeList, - Required: true, - Description: "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced.", - MaxItems: 1, - Elem: ComputeFirewallPolicyRuleMatchSchema(), - }, - - "priority": { - Type: resource_compute_firewall_policy_rule_schema.TypeInt, - Required: true, - ForceNew: true, - Description: "An integer indicating the priority of a rule in the list. The priority must be a positive value between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest prority.", - }, - - "description": { - Type: resource_compute_firewall_policy_rule_schema.TypeString, - Optional: true, - Description: "An optional description for this resource.", - }, - - "disabled": { - Type: resource_compute_firewall_policy_rule_schema.TypeBool, - Optional: true, - Description: "Denotes whether the firewall policy rule is disabled. When set to true, the firewall policy rule is not enforced and traffic behaves as if it did not exist. If this is unspecified, the firewall policy rule will be enabled.", - }, - - "enable_logging": { - Type: resource_compute_firewall_policy_rule_schema.TypeBool, - Optional: true, - Description: "Denotes whether to enable logging for a particular rule. If logging is enabled, logs will be exported to the configured export destination in Stackdriver. Logs may be exported to BigQuery or Pub/Sub. Note: you cannot enable logging on \"goto_next\" rules.", - }, - - "target_resources": { - Type: resource_compute_firewall_policy_rule_schema.TypeList, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "A list of network resource URLs to which this rule applies. This field allows you to control which network's VMs get this rule. If this field is left blank, all VMs within the organization will receive the rule.", - Elem: &resource_compute_firewall_policy_rule_schema.Schema{Type: resource_compute_firewall_policy_rule_schema.TypeString}, - }, - - "target_service_accounts": { - Type: resource_compute_firewall_policy_rule_schema.TypeList, - Optional: true, - Description: "A list of service accounts indicating the sets of instances that are applied with this rule.", - Elem: &resource_compute_firewall_policy_rule_schema.Schema{Type: resource_compute_firewall_policy_rule_schema.TypeString}, - }, - - "kind": { - Type: resource_compute_firewall_policy_rule_schema.TypeString, - Computed: true, - Description: "Type of the resource. Always `compute#firewallPolicyRule` for firewall policy rules", - }, - - "rule_tuple_count": { - Type: resource_compute_firewall_policy_rule_schema.TypeInt, - Computed: true, - Description: "Calculation of the complexity of a single firewall policy rule.", - }, - }, - } -} - -func ComputeFirewallPolicyRuleMatchSchema() *resource_compute_firewall_policy_rule_schema.Resource { - return &resource_compute_firewall_policy_rule_schema.Resource{ - Schema: map[string]*resource_compute_firewall_policy_rule_schema.Schema{ - "layer4_configs": { - Type: resource_compute_firewall_policy_rule_schema.TypeList, - Required: true, - Description: "Pairs of IP protocols and ports that the rule should match.", - Elem: ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema(), - }, - - "dest_ip_ranges": { - Type: resource_compute_firewall_policy_rule_schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 256.", - Elem: &resource_compute_firewall_policy_rule_schema.Schema{Type: resource_compute_firewall_policy_rule_schema.TypeString}, - }, - - "src_ip_ranges": { - Type: resource_compute_firewall_policy_rule_schema.TypeList, - Optional: true, - Description: "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 256.", - Elem: &resource_compute_firewall_policy_rule_schema.Schema{Type: resource_compute_firewall_policy_rule_schema.TypeString}, - }, - }, - } -} - -func ComputeFirewallPolicyRuleMatchLayer4ConfigsSchema() *resource_compute_firewall_policy_rule_schema.Resource { - return &resource_compute_firewall_policy_rule_schema.Resource{ - Schema: map[string]*resource_compute_firewall_policy_rule_schema.Schema{ - "ip_protocol": { - Type: resource_compute_firewall_policy_rule_schema.TypeString, - Required: true, - Description: "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (`tcp`, `udp`, `icmp`, `esp`, `ah`, `ipip`, `sctp`), or the IP protocol number.", - }, - - "ports": { - Type: resource_compute_firewall_policy_rule_schema.TypeList, - Optional: true, - Description: "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port. Example inputs include: ``.", - Elem: &resource_compute_firewall_policy_rule_schema.Schema{Type: resource_compute_firewall_policy_rule_schema.TypeString}, - }, - }, - } -} - -func resourceComputeFirewallPolicyRuleCreate(d *resource_compute_firewall_policy_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRule{ - Action: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("action").(string)), - Direction: resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: resource_compute_firewall_policy_rule_dcldcl.Int64(int64(d.Get("priority").(int))), - Description: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("description").(string)), - Disabled: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("disabled").(bool)), - EnableLogging: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") - if err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_rule_schema.TimeoutCreate)) - res, err := client.ApplyFirewallPolicyRule(resource_compute_firewall_policy_rule_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_compute_firewall_policy_rule_dcldcl.DiffAfterApplyError); ok { - resource_compute_firewall_policy_rule_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_firewall_policy_rule_fmt.Errorf("Error creating FirewallPolicyRule: %s", err) - } - - resource_compute_firewall_policy_rule_log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) - - return resourceComputeFirewallPolicyRuleRead(d, meta) -} - -func resourceComputeFirewallPolicyRuleRead(d *resource_compute_firewall_policy_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRule{ - Action: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("action").(string)), - Direction: resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: resource_compute_firewall_policy_rule_dcldcl.Int64(int64(d.Get("priority").(int))), - Description: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("description").(string)), - Disabled: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("disabled").(bool)), - EnableLogging: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_rule_schema.TimeoutRead)) - res, err := client.GetFirewallPolicyRule(resource_compute_firewall_policy_rule_context.Background(), obj) - if err != nil { - resourceName := resource_compute_firewall_policy_rule_fmt.Sprintf("ComputeFirewallPolicyRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("action", res.Action); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting action in state: %s", err) - } - if err = d.Set("direction", res.Direction); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting direction in state: %s", err) - } - if err = d.Set("firewall_policy", res.FirewallPolicy); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting firewall_policy in state: %s", err) - } - if err = d.Set("match", flattenComputeFirewallPolicyRuleMatch(res.Match)); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting match in state: %s", err) - } - if err = d.Set("priority", res.Priority); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting priority in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("disabled", res.Disabled); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting disabled in state: %s", err) - } - if err = d.Set("enable_logging", res.EnableLogging); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting enable_logging in state: %s", err) - } - if err = d.Set("target_resources", res.TargetResources); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting target_resources in state: %s", err) - } - if err = d.Set("target_service_accounts", res.TargetServiceAccounts); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting target_service_accounts in state: %s", err) - } - if err = d.Set("kind", res.Kind); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting kind in state: %s", err) - } - if err = d.Set("rule_tuple_count", res.RuleTupleCount); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("error setting rule_tuple_count in state: %s", err) - } - - return nil -} - -func resourceComputeFirewallPolicyRuleUpdate(d *resource_compute_firewall_policy_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRule{ - Action: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("action").(string)), - Direction: resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: resource_compute_firewall_policy_rule_dcldcl.Int64(int64(d.Get("priority").(int))), - Description: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("description").(string)), - Disabled: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("disabled").(bool)), - EnableLogging: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_rule_schema.TimeoutUpdate)) - res, err := client.ApplyFirewallPolicyRule(resource_compute_firewall_policy_rule_context.Background(), obj, directive...) - - if _, ok := err.(resource_compute_firewall_policy_rule_dcldcl.DiffAfterApplyError); ok { - resource_compute_firewall_policy_rule_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_firewall_policy_rule_fmt.Errorf("Error updating FirewallPolicyRule: %s", err) - } - - resource_compute_firewall_policy_rule_log.Printf("[DEBUG] Finished creating FirewallPolicyRule %q: %#v", d.Id(), res) - - return resourceComputeFirewallPolicyRuleRead(d, meta) -} - -func resourceComputeFirewallPolicyRuleDelete(d *resource_compute_firewall_policy_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRule{ - Action: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("action").(string)), - Direction: resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleDirectionEnumRef(d.Get("direction").(string)), - FirewallPolicy: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("firewall_policy").(string)), - Match: expandComputeFirewallPolicyRuleMatch(d.Get("match")), - Priority: resource_compute_firewall_policy_rule_dcldcl.Int64(int64(d.Get("priority").(int))), - Description: resource_compute_firewall_policy_rule_dcldcl.String(d.Get("description").(string)), - Disabled: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("disabled").(bool)), - EnableLogging: resource_compute_firewall_policy_rule_dcldcl.Bool(d.Get("enable_logging").(bool)), - TargetResources: expandStringArray(d.Get("target_resources")), - TargetServiceAccounts: expandStringArray(d.Get("target_service_accounts")), - } - - resource_compute_firewall_policy_rule_log.Printf("[DEBUG] Deleting FirewallPolicyRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_firewall_policy_rule_schema.TimeoutDelete)) - if err := client.DeleteFirewallPolicyRule(resource_compute_firewall_policy_rule_context.Background(), obj); err != nil { - return resource_compute_firewall_policy_rule_fmt.Errorf("Error deleting FirewallPolicyRule: %s", err) - } - - resource_compute_firewall_policy_rule_log.Printf("[DEBUG] Finished deleting FirewallPolicyRule %q", d.Id()) - return nil -} - -func resourceComputeFirewallPolicyRuleImport(d *resource_compute_firewall_policy_rule_schema.ResourceData, meta interface{}) ([]*resource_compute_firewall_policy_rule_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "locations/global/firewallPolicies/(?P[^/]+)/rules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "locations/global/firewallPolicies/{{firewall_policy}}/rules/{{priority}}") - if err != nil { - return nil, resource_compute_firewall_policy_rule_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_firewall_policy_rule_schema.ResourceData{d}, nil -} - -func expandComputeFirewallPolicyRuleMatch(o interface{}) *resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatch { - if o == nil { - return resource_compute_firewall_policy_rule_computecompute.EmptyFirewallPolicyRuleMatch - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_compute_firewall_policy_rule_computecompute.EmptyFirewallPolicyRuleMatch - } - obj := objArr[0].(map[string]interface{}) - return &resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatch{ - Layer4Configs: expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj["layer4_configs"]), - DestIPRanges: expandStringArray(obj["dest_ip_ranges"]), - SrcIPRanges: expandStringArray(obj["src_ip_ranges"]), - } -} - -func flattenComputeFirewallPolicyRuleMatch(obj *resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatch) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "layer4_configs": flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(obj.Layer4Configs), - "dest_ip_ranges": obj.DestIPRanges, - "src_ip_ranges": obj.SrcIPRanges, - } - - return []interface{}{transformed} - -} - -func expandComputeFirewallPolicyRuleMatchLayer4ConfigsArray(o interface{}) []resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return make([]resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs, 0) - } - - items := make([]resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs, 0, len(objs)) - for _, item := range objs { - i := expandComputeFirewallPolicyRuleMatchLayer4Configs(item) - items = append(items, *i) - } - - return items -} - -func expandComputeFirewallPolicyRuleMatchLayer4Configs(o interface{}) *resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs { - if o == nil { - return resource_compute_firewall_policy_rule_computecompute.EmptyFirewallPolicyRuleMatchLayer4Configs - } - - obj := o.(map[string]interface{}) - return &resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs{ - IPProtocol: resource_compute_firewall_policy_rule_dcldcl.String(obj["ip_protocol"].(string)), - Ports: expandStringArray(obj["ports"]), - } -} - -func flattenComputeFirewallPolicyRuleMatchLayer4ConfigsArray(objs []resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenComputeFirewallPolicyRuleMatchLayer4Configs(&item) - items = append(items, i) - } - - return items -} - -func flattenComputeFirewallPolicyRuleMatchLayer4Configs(obj *resource_compute_firewall_policy_rule_computecompute.FirewallPolicyRuleMatchLayer4Configs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "ip_protocol": obj.IPProtocol, - "ports": obj.Ports, - } - - return transformed - -} - -func resourceComputeForwardingRule() *resource_compute_forwarding_rule_schema.Resource { - return &resource_compute_forwarding_rule_schema.Resource{ - Create: resourceComputeForwardingRuleCreate, - Read: resourceComputeForwardingRuleRead, - Update: resourceComputeForwardingRuleUpdate, - Delete: resourceComputeForwardingRuleDelete, - - Importer: &resource_compute_forwarding_rule_schema.ResourceImporter{ - State: resourceComputeForwardingRuleImport, - }, - - Timeouts: &resource_compute_forwarding_rule_schema.ResourceTimeout{ - Create: resource_compute_forwarding_rule_schema.DefaultTimeout(10 * resource_compute_forwarding_rule_time.Minute), - Update: resource_compute_forwarding_rule_schema.DefaultTimeout(10 * resource_compute_forwarding_rule_time.Minute), - Delete: resource_compute_forwarding_rule_schema.DefaultTimeout(10 * resource_compute_forwarding_rule_time.Minute), - }, - - Schema: map[string]*resource_compute_forwarding_rule_schema.Schema{ - "name": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - }, - - "all_ports": { - Type: resource_compute_forwarding_rule_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "This field is used along with the `backend_service` field for internal load balancing or with the `target` field for internal TargetInstance. This field cannot be used with `port` or `portRange` fields. When the load balancing scheme is `INTERNAL` and protocol is TCP/UDP, specify this field to allow packets addressed to any ports will be forwarded to the backends configured with this forwarding rule.", - }, - - "allow_global_access": { - Type: resource_compute_forwarding_rule_schema.TypeBool, - Optional: true, - Description: "This field is used along with the `backend_service` field for internal load balancing or with the `target` field for internal TargetInstance. If the field is set to `TRUE`, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", - }, - - "backend_service": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is only used for `INTERNAL` load balancing. For internal load balancing, this field identifies the BackendService resource to receive the matched traffic.", - }, - - "description": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "An optional description of this resource. Provide this property when you create the resource.", - }, - - "ip_address": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: internalIpDiffSuppress, - Description: "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule. If you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name` * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` The loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", - }, - - "ip_protocol": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: caseDiffSuppress, - Description: "The IP protocol to which this rule applies. For protocol forwarding, valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one of `TCP` or `UDP` are valid. For Traffic Director, the load balancing scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`, and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one of `TCP` or `UDP` is valid.", - }, - - "is_mirroring_collector": { - Type: resource_compute_forwarding_rule_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "Indicates whether or not this load balancer can be used as a collector for packet mirroring. To prevent mirroring loops, instances behind this load balancer will not have their traffic mirrored even if a `PacketMirroring` rule applies to them. This can only be set to true for load balancers that have their `loadBalancingScheme` set to `INTERNAL`.", - }, - - "labels": { - Type: resource_compute_forwarding_rule_schema.TypeMap, - Optional: true, - Description: "Labels to apply to this rule.", - Elem: &resource_compute_forwarding_rule_schema.Schema{Type: resource_compute_forwarding_rule_schema.TypeString}, - }, - - "load_balancing_scheme": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Specifies the forwarding rule type.\n\n* `EXTERNAL` is used for:\n * Classic Cloud VPN gateways\n * Protocol forwarding to VMs from an external IP address\n * The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\n* `INTERNAL` is used for:\n * Protocol forwarding to VMs from an internal IP address\n * Internal TCP/UDP load balancers\n* `INTERNAL_MANAGED` is used for:\n * Internal HTTP(S) load balancers\n* `INTERNAL_SELF_MANAGED` is used for:\n * Traffic Director\n\nFor more information about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts). Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED, EXTERNAL", - Default: "EXTERNAL", - }, - - "network": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is not used for external load balancing. For `INTERNAL` and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", - }, - - "network_tier": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: "This signifies the networking tier used for configuring this load balancer and can only take the following values: `PREMIUM`, `STANDARD`. For regional ForwardingRule, the valid values are `PREMIUM` and `STANDARD`. For GlobalForwardingRule, the valid value is `PREMIUM`. If this field is not specified, it is assumed to be `PREMIUM`. If `IPAddress` is specified, this value must be equal to the networkTier of the Address.", - }, - - "port_range": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: portRangeDiffSuppress, - Description: "When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n* TargetHttpProxy: 80, 8080\n* TargetHttpsProxy: 443\n* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetVpnGateway: 500, 4500\n\n@pattern: d+(?:-d+)?", - }, - - "ports": { - Type: resource_compute_forwarding_rule_schema.TypeSet, - Optional: true, - ForceNew: true, - Description: "This field is used along with the `backend_service` field for internal load balancing. When the load balancing scheme is `INTERNAL`, a list of ports can be configured, for example, ['80'], ['8000','9000']. Only packets addressed to these ports are forwarded to the backends configured with the forwarding rule. If the forwarding rule's loadBalancingScheme is INTERNAL, you can specify ports in one of the following ways: * A list of up to five ports, which can be non-contiguous * Keyword `ALL`, which causes the forwarding rule to forward traffic on any port of the forwarding rule's protocol. @pattern: d+(?:-d+)? For more information, refer to [Port specifications](/load-balancing/docs/forwarding-rule-concepts#port_specifications).", - MaxItems: 5, - Elem: &resource_compute_forwarding_rule_schema.Schema{Type: resource_compute_forwarding_rule_schema.TypeString}, - Set: resource_compute_forwarding_rule_schema.HashString, - }, - - "project": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project this resource belongs in.", - }, - - "region": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The location of this resource.", - }, - - "service_label": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", - ValidateFunc: validateGCPName, - }, - - "subnetwork": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is only used for `INTERNAL` load balancing. For internal load balancing, this field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule. If the network specified is in auto subnet mode, this field is optional. However, if the network is in custom subnet mode, a subnetwork must be specified.", - }, - - "target": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.", - }, - - "creation_timestamp": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Description: "[Output Only] Creation timestamp in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.", - }, - - "label_fingerprint": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Description: "Used internally during label updates.", - }, - - "self_link": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Description: "[Output Only] Server-defined URL for the resource.", - }, - - "service_name": { - Type: resource_compute_forwarding_rule_schema.TypeString, - Computed: true, - Description: "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", - }, - }, - } -} - -func resourceComputeForwardingRuleCreate(d *resource_compute_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &resource_compute_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_forwarding_rule_dcldcl.String(d.Get("name").(string)), - AllPorts: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: resource_compute_forwarding_rule_dcldcl.String(d.Get("backend_service").(string)), - Description: resource_compute_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - NetworkTier: resource_compute_forwarding_rule_computecompute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: resource_compute_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: resource_compute_forwarding_rule_dcldcl.String(project), - Location: resource_compute_forwarding_rule_dcldcl.String(region), - ServiceLabel: resource_compute_forwarding_rule_dcldcl.String(d.Get("service_label").(string)), - Subnetwork: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("subnetwork").(string)), - Target: resource_compute_forwarding_rule_dcldcl.String(d.Get("target").(string)), - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") - if err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_forwarding_rule_schema.TimeoutCreate)) - res, err := client.ApplyForwardingRule(resource_compute_forwarding_rule_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_compute_forwarding_rule_dcldcl.DiffAfterApplyError); ok { - resource_compute_forwarding_rule_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_forwarding_rule_fmt.Errorf("Error creating ForwardingRule: %s", err) - } - - resource_compute_forwarding_rule_log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeForwardingRuleRead(d, meta) -} - -func resourceComputeForwardingRuleRead(d *resource_compute_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &resource_compute_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_forwarding_rule_dcldcl.String(d.Get("name").(string)), - AllPorts: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: resource_compute_forwarding_rule_dcldcl.String(d.Get("backend_service").(string)), - Description: resource_compute_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - NetworkTier: resource_compute_forwarding_rule_computecompute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: resource_compute_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: resource_compute_forwarding_rule_dcldcl.String(project), - Location: resource_compute_forwarding_rule_dcldcl.String(region), - ServiceLabel: resource_compute_forwarding_rule_dcldcl.String(d.Get("service_label").(string)), - Subnetwork: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("subnetwork").(string)), - Target: resource_compute_forwarding_rule_dcldcl.String(d.Get("target").(string)), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_forwarding_rule_schema.TimeoutRead)) - res, err := client.GetForwardingRule(resource_compute_forwarding_rule_context.Background(), obj) - if err != nil { - resourceName := resource_compute_forwarding_rule_fmt.Sprintf("ComputeForwardingRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("name", res.Name); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("all_ports", res.AllPorts); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting all_ports in state: %s", err) - } - if err = d.Set("allow_global_access", res.AllowGlobalAccess); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting allow_global_access in state: %s", err) - } - if err = d.Set("backend_service", res.BackendService); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting backend_service in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("ip_address", res.IPAddress); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting ip_address in state: %s", err) - } - if err = d.Set("ip_protocol", res.IPProtocol); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting ip_protocol in state: %s", err) - } - if err = d.Set("is_mirroring_collector", res.IsMirroringCollector); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting is_mirroring_collector in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("load_balancing_scheme", res.LoadBalancingScheme); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting load_balancing_scheme in state: %s", err) - } - if err = d.Set("network", res.Network); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting network in state: %s", err) - } - if err = d.Set("network_tier", res.NetworkTier); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting network_tier in state: %s", err) - } - if err = d.Set("port_range", res.PortRange); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting port_range in state: %s", err) - } - if err = d.Set("ports", res.Ports); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting ports in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("region", res.Location); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting region in state: %s", err) - } - if err = d.Set("service_label", res.ServiceLabel); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting service_label in state: %s", err) - } - if err = d.Set("subnetwork", res.Subnetwork); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting subnetwork in state: %s", err) - } - if err = d.Set("target", res.Target); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting target in state: %s", err) - } - if err = d.Set("creation_timestamp", res.CreationTimestamp); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting creation_timestamp in state: %s", err) - } - if err = d.Set("label_fingerprint", res.LabelFingerprint); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting label_fingerprint in state: %s", err) - } - if err = d.Set("self_link", res.SelfLink); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting self_link in state: %s", err) - } - if err = d.Set("service_name", res.ServiceName); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("error setting service_name in state: %s", err) - } - - return nil -} - -func resourceComputeForwardingRuleUpdate(d *resource_compute_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &resource_compute_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_forwarding_rule_dcldcl.String(d.Get("name").(string)), - AllPorts: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: resource_compute_forwarding_rule_dcldcl.String(d.Get("backend_service").(string)), - Description: resource_compute_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - NetworkTier: resource_compute_forwarding_rule_computecompute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: resource_compute_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: resource_compute_forwarding_rule_dcldcl.String(project), - Location: resource_compute_forwarding_rule_dcldcl.String(region), - ServiceLabel: resource_compute_forwarding_rule_dcldcl.String(d.Get("service_label").(string)), - Subnetwork: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("subnetwork").(string)), - Target: resource_compute_forwarding_rule_dcldcl.String(d.Get("target").(string)), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_forwarding_rule_schema.TimeoutUpdate)) - res, err := client.ApplyForwardingRule(resource_compute_forwarding_rule_context.Background(), obj, directive...) - - if _, ok := err.(resource_compute_forwarding_rule_dcldcl.DiffAfterApplyError); ok { - resource_compute_forwarding_rule_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_forwarding_rule_fmt.Errorf("Error updating ForwardingRule: %s", err) - } - - resource_compute_forwarding_rule_log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeForwardingRuleRead(d, meta) -} - -func resourceComputeForwardingRuleDelete(d *resource_compute_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - region, err := getRegion(d, config) - if err != nil { - return err - } - - obj := &resource_compute_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_forwarding_rule_dcldcl.String(d.Get("name").(string)), - AllPorts: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("all_ports").(bool)), - AllowGlobalAccess: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("allow_global_access").(bool)), - BackendService: resource_compute_forwarding_rule_dcldcl.String(d.Get("backend_service").(string)), - Description: resource_compute_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IsMirroringCollector: resource_compute_forwarding_rule_dcldcl.Bool(d.Get("is_mirroring_collector").(bool)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - Network: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - NetworkTier: resource_compute_forwarding_rule_computecompute.ForwardingRuleNetworkTierEnumRef(d.Get("network_tier").(string)), - PortRange: resource_compute_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Ports: expandStringArray(d.Get("ports")), - Project: resource_compute_forwarding_rule_dcldcl.String(project), - Location: resource_compute_forwarding_rule_dcldcl.String(region), - ServiceLabel: resource_compute_forwarding_rule_dcldcl.String(d.Get("service_label").(string)), - Subnetwork: resource_compute_forwarding_rule_dcldcl.StringOrNil(d.Get("subnetwork").(string)), - Target: resource_compute_forwarding_rule_dcldcl.String(d.Get("target").(string)), - } - - resource_compute_forwarding_rule_log.Printf("[DEBUG] Deleting ForwardingRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_forwarding_rule_schema.TimeoutDelete)) - if err := client.DeleteForwardingRule(resource_compute_forwarding_rule_context.Background(), obj); err != nil { - return resource_compute_forwarding_rule_fmt.Errorf("Error deleting ForwardingRule: %s", err) - } - - resource_compute_forwarding_rule_log.Printf("[DEBUG] Finished deleting ForwardingRule %q", d.Id()) - return nil -} - -func resourceComputeForwardingRuleImport(d *resource_compute_forwarding_rule_schema.ResourceData, meta interface{}) ([]*resource_compute_forwarding_rule_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/forwardingRules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}") - if err != nil { - return nil, resource_compute_forwarding_rule_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_forwarding_rule_schema.ResourceData{d}, nil -} - -func resourceComputeGlobalAddress() *resource_compute_global_address_schema.Resource { - return &resource_compute_global_address_schema.Resource{ - Create: resourceComputeGlobalAddressCreate, - Read: resourceComputeGlobalAddressRead, - Delete: resourceComputeGlobalAddressDelete, - - Importer: &resource_compute_global_address_schema.ResourceImporter{ - State: resourceComputeGlobalAddressImport, - }, - - Timeouts: &resource_compute_global_address_schema.ResourceTimeout{ - Create: resource_compute_global_address_schema.DefaultTimeout(4 * resource_compute_global_address_time.Minute), - Delete: resource_compute_global_address_schema.DefaultTimeout(4 * resource_compute_global_address_time.Minute), - }, - - Schema: map[string]*resource_compute_global_address_schema.Schema{ - "name": { - Type: resource_compute_global_address_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "address": { - Type: resource_compute_global_address_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The IP address or beginning of the address range represented by this -resource. This can be supplied as an input to reserve a specific -address or omitted to allow GCP to choose a valid one for you.`, - }, - "address_type": { - Type: resource_compute_global_address_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_global_address_validation.StringInSlice([]string{"EXTERNAL", "INTERNAL", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("EXTERNAL"), - Description: `The type of the address to reserve. - -* EXTERNAL indicates public/external single IP address. -* INTERNAL indicates internal IP ranges belonging to some network. Default value: "EXTERNAL" Possible values: ["EXTERNAL", "INTERNAL"]`, - Default: "EXTERNAL", - }, - "description": { - Type: resource_compute_global_address_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "ip_version": { - Type: resource_compute_global_address_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_global_address_validation.StringInSlice([]string{"IPV4", "IPV6", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("IPV4"), - Description: `The IP Version that will be used by this address. The default value is 'IPV4'. Possible values: ["IPV4", "IPV6"]`, - }, - "network": { - Type: resource_compute_global_address_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the network in which to reserve the IP range. The IP range -must be in RFC1918 space. The network cannot be deleted if there are -any reserved IP ranges referring to it. - -This should only be set when using an Internal address.`, - }, - "prefix_length": { - Type: resource_compute_global_address_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The prefix length of the IP range. If not present, it means the -address field is a single IP address. - -This field is not applicable to addresses with addressType=EXTERNAL, -or addressType=INTERNAL when purpose=PRIVATE_SERVICE_CONNECT`, - }, - "purpose": { - Type: resource_compute_global_address_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The purpose of the resource. Possible values include: - -* VPC_PEERING - for peer networks - -* PRIVATE_SERVICE_CONNECT - for ([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) only) Private Service Connect networks`, - }, - "creation_timestamp": { - Type: resource_compute_global_address_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_global_address_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_global_address_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeGlobalAddressCreate(d *resource_compute_global_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - addressProp, err := expandComputeGlobalAddressAddress(d.Get("address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(addressProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, addressProp)) { - obj["address"] = addressProp - } - descriptionProp, err := expandComputeGlobalAddressDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeGlobalAddressName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(nameProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - ipVersionProp, err := expandComputeGlobalAddressIpVersion(d.Get("ip_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_version"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(ipVersionProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, ipVersionProp)) { - obj["ipVersion"] = ipVersionProp - } - prefixLengthProp, err := expandComputeGlobalAddressPrefixLength(d.Get("prefix_length"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("prefix_length"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(prefixLengthProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, prefixLengthProp)) { - obj["prefixLength"] = prefixLengthProp - } - addressTypeProp, err := expandComputeGlobalAddressAddressType(d.Get("address_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("address_type"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(addressTypeProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, addressTypeProp)) { - obj["addressType"] = addressTypeProp - } - purposeProp, err := expandComputeGlobalAddressPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(purposeProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - networkProp, err := expandComputeGlobalAddressNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_global_address_reflect.ValueOf(networkProp)) && (ok || !resource_compute_global_address_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses") - if err != nil { - return err - } - - resource_compute_global_address_log.Printf("[DEBUG] Creating new GlobalAddress: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_address_fmt.Errorf("Error fetching project for GlobalAddress: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_global_address_schema.TimeoutCreate)) - if err != nil { - return resource_compute_global_address_fmt.Errorf("Error creating GlobalAddress: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return resource_compute_global_address_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating GlobalAddress", userAgent, - d.Timeout(resource_compute_global_address_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_global_address_fmt.Errorf("Error waiting to create GlobalAddress: %s", err) - } - - resource_compute_global_address_log.Printf("[DEBUG] Finished creating GlobalAddress %q: %#v", d.Id(), res) - - return resourceComputeGlobalAddressRead(d, meta) -} - -func resourceComputeGlobalAddressRead(d *resource_compute_global_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_address_fmt.Errorf("Error fetching project for GlobalAddress: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_global_address_fmt.Sprintf("ComputeGlobalAddress %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - - if err := d.Set("address", flattenComputeGlobalAddressAddress(res["address"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeGlobalAddressCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("description", flattenComputeGlobalAddressDescription(res["description"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("name", flattenComputeGlobalAddressName(res["name"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("ip_version", flattenComputeGlobalAddressIpVersion(res["ipVersion"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("prefix_length", flattenComputeGlobalAddressPrefixLength(res["prefixLength"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("address_type", flattenComputeGlobalAddressAddressType(res["addressType"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("purpose", flattenComputeGlobalAddressPurpose(res["purpose"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("network", flattenComputeGlobalAddressNetwork(res["network"], d, config)); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_global_address_fmt.Errorf("Error reading GlobalAddress: %s", err) - } - - return nil -} - -func resourceComputeGlobalAddressDelete(d *resource_compute_global_address_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_address_fmt.Errorf("Error fetching project for GlobalAddress: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_global_address_log.Printf("[DEBUG] Deleting GlobalAddress %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_global_address_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GlobalAddress") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting GlobalAddress", userAgent, - d.Timeout(resource_compute_global_address_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_global_address_log.Printf("[DEBUG] Finished deleting GlobalAddress %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeGlobalAddressImport(d *resource_compute_global_address_schema.ResourceData, meta interface{}) ([]*resource_compute_global_address_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/addresses/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/addresses/{{name}}") - if err != nil { - return nil, resource_compute_global_address_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_global_address_schema.ResourceData{d}, nil -} - -func flattenComputeGlobalAddressAddress(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressCreationTimestamp(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressDescription(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressName(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressIpVersion(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressPrefixLength(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_global_address_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeGlobalAddressAddressType(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressPurpose(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalAddressNetwork(v interface{}, d *resource_compute_global_address_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeGlobalAddressAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressIpVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressPrefixLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressAddressType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalAddressNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_global_address_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeGlobalForwardingRule() *resource_compute_global_forwarding_rule_schema.Resource { - return &resource_compute_global_forwarding_rule_schema.Resource{ - Create: resourceComputeGlobalForwardingRuleCreate, - Read: resourceComputeGlobalForwardingRuleRead, - Update: resourceComputeGlobalForwardingRuleUpdate, - Delete: resourceComputeGlobalForwardingRuleDelete, - - Importer: &resource_compute_global_forwarding_rule_schema.ResourceImporter{ - State: resourceComputeGlobalForwardingRuleImport, - }, - - Timeouts: &resource_compute_global_forwarding_rule_schema.ResourceTimeout{ - Create: resource_compute_global_forwarding_rule_schema.DefaultTimeout(10 * resource_compute_global_forwarding_rule_time.Minute), - Update: resource_compute_global_forwarding_rule_schema.DefaultTimeout(10 * resource_compute_global_forwarding_rule_time.Minute), - Delete: resource_compute_global_forwarding_rule_schema.DefaultTimeout(10 * resource_compute_global_forwarding_rule_time.Minute), - }, - - Schema: map[string]*resource_compute_global_forwarding_rule_schema.Schema{ - "name": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - }, - - "target": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For `INTERNAL_SELF_MANAGED` load balancing, only `targetHttpProxy` is valid, not `targetHttpsProxy`.", - }, - - "description": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "An optional description of this resource. Provide this property when you create the resource.", - }, - - "ip_address": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: internalIpDiffSuppress, - Description: "IP address that this forwarding rule serves. When a client sends traffic to this IP address, the forwarding rule directs the traffic to the target that you specify in the forwarding rule. If you don't specify a reserved IP address, an ephemeral IP address is assigned. Methods for specifying an IP address: * IPv4 dotted decimal, as in `100.1.2.3` * Full URL, as in `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` * Partial URL or by name, as in: * `projects/project_id/regions/region/addresses/address-name` * `regions/region/addresses/address-name` * `global/addresses/address-name` * `address-name` The loadBalancingScheme and the forwarding rule's target determine the type of IP address that you can use. For detailed information, refer to [IP address specifications](/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications).", - }, - - "ip_protocol": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: caseDiffSuppress, - Description: "The IP protocol to which this rule applies. For protocol forwarding, valid options are `TCP`, `UDP`, `ESP`, `AH`, `SCTP` or `ICMP`. For Internal TCP/UDP Load Balancing, the load balancing scheme is `INTERNAL`, and one of `TCP` or `UDP` are valid. For Traffic Director, the load balancing scheme is `INTERNAL_SELF_MANAGED`, and only `TCP`is valid. For Internal HTTP(S) Load Balancing, the load balancing scheme is `INTERNAL_MANAGED`, and only `TCP` is valid. For HTTP(S), SSL Proxy, and TCP Proxy Load Balancing, the load balancing scheme is `EXTERNAL` and only `TCP` is valid. For Network TCP/UDP Load Balancing, the load balancing scheme is `EXTERNAL`, and one of `TCP` or `UDP` is valid.", - }, - - "ip_version": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The IP Version that will be used by this forwarding rule. Valid options are `IPV4` or `IPV6`. This can only be specified for an external global forwarding rule. Possible values: UNSPECIFIED_VERSION, IPV4, IPV6", - }, - - "labels": { - Type: resource_compute_global_forwarding_rule_schema.TypeMap, - Optional: true, - Description: "Labels to apply to this rule.", - Elem: &resource_compute_global_forwarding_rule_schema.Schema{Type: resource_compute_global_forwarding_rule_schema.TypeString}, - }, - - "load_balancing_scheme": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Specifies the forwarding rule type.\n\n* `EXTERNAL` is used for:\n * Classic Cloud VPN gateways\n * Protocol forwarding to VMs from an external IP address\n * The following load balancers: HTTP(S), SSL Proxy, TCP Proxy, and Network TCP/UDP\n* `INTERNAL` is used for:\n * Protocol forwarding to VMs from an internal IP address\n * Internal TCP/UDP load balancers\n* `INTERNAL_MANAGED` is used for:\n * Internal HTTP(S) load balancers\n* `INTERNAL_SELF_MANAGED` is used for:\n * Traffic Director\n\nFor more information about forwarding rules, refer to [Forwarding rule concepts](/load-balancing/docs/forwarding-rule-concepts). Possible values: INVALID, INTERNAL, INTERNAL_MANAGED, INTERNAL_SELF_MANAGED, EXTERNAL", - Default: "EXTERNAL", - }, - - "metadata_filters": { - Type: resource_compute_global_forwarding_rule_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Opaque filter criteria used by Loadbalancer to restrict routing configuration to a limited set of [xDS](https://github.com/envoyproxy/data-plane-api/blob/master/XDS_PROTOCOL.md) compliant clients. In their xDS requests to Loadbalancer, xDS clients present [node metadata](https://github.com/envoyproxy/data-plane-api/search?q=%22message+Node%22+in%3A%2Fenvoy%2Fapi%2Fv2%2Fcore%2Fbase.proto&). If a match takes place, the relevant configuration is made available to those proxies. Otherwise, all the resources (e.g. `TargetHttpProxy`, `UrlMap`) referenced by the `ForwardingRule` will not be visible to those proxies.\n\nFor each `metadataFilter` in this list, if its `filterMatchCriteria` is set to MATCH_ANY, at least one of the `filterLabel`s must match the corresponding label provided in the metadata. If its `filterMatchCriteria` is set to MATCH_ALL, then all of its `filterLabel`s must match with corresponding labels provided in the metadata.\n\n`metadataFilters` specified here will be applifed before those specified in the `UrlMap` that this `ForwardingRule` references.\n\n`metadataFilters` only applies to Loadbalancers that have their loadBalancingScheme set to `INTERNAL_SELF_MANAGED`.", - Elem: ComputeGlobalForwardingRuleMetadataFilterSchema(), - }, - - "network": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "This field is not used for external load balancing. For `INTERNAL` and `INTERNAL_SELF_MANAGED` load balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used.", - }, - - "port_range": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: portRangeDiffSuppress, - Description: "When the load balancing scheme is `EXTERNAL`, `INTERNAL_SELF_MANAGED` and `INTERNAL_MANAGED`, you can specify a `port_range`. Use with a forwarding rule that points to a target proxy or a target pool. Do not use with a forwarding rule that points to a backend service. This field is used along with the `target` field for TargetHttpProxy, TargetHttpsProxy, TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, TargetInstance. Applicable only when `IPProtocol` is `TCP`, `UDP`, or `SCTP`, only packets addressed to ports in the specified range will be forwarded to `target`. Forwarding rules with the same `[IPAddress, IPProtocol]` pair must have disjoint port ranges. Some types of forwarding target have constraints on the acceptable ports:\n\n* TargetHttpProxy: 80, 8080\n* TargetHttpsProxy: 443\n* TargetTcpProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetSslProxy: 25, 43, 110, 143, 195, 443, 465, 587, 700, 993, 995, 1688, 1883, 5222\n* TargetVpnGateway: 500, 4500\n\n@pattern: d+(?:-d+)?", - }, - - "project": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project this resource belongs in.", - }, - - "label_fingerprint": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Computed: true, - Description: "Used internally during label updates.", - }, - - "self_link": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Computed: true, - Description: "[Output Only] Server-defined URL for the resource.", - }, - }, - } -} - -func ComputeGlobalForwardingRuleMetadataFilterSchema() *resource_compute_global_forwarding_rule_schema.Resource { - return &resource_compute_global_forwarding_rule_schema.Resource{ - Schema: map[string]*resource_compute_global_forwarding_rule_schema.Schema{ - "filter_labels": { - Type: resource_compute_global_forwarding_rule_schema.TypeList, - Required: true, - ForceNew: true, - Description: "The list of label value pairs that must match labels in the provided metadata based on `filterMatchCriteria`\n\nThis list must not be empty and can have at the most 64 entries.", - MaxItems: 64, - MinItems: 1, - Elem: ComputeGlobalForwardingRuleMetadataFilterFilterLabelSchema(), - }, - - "filter_match_criteria": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Specifies how individual `filterLabel` matches within the list of `filterLabel`s contribute towards the overall `metadataFilter` match.\n\nSupported values are:\n\n* MATCH_ANY: At least one of the `filterLabels` must have a matching label in the provided metadata.\n* MATCH_ALL: All `filterLabels` must have matching labels in the provided metadata. Possible values: NOT_SET, MATCH_ALL, MATCH_ANY", - }, - }, - } -} - -func ComputeGlobalForwardingRuleMetadataFilterFilterLabelSchema() *resource_compute_global_forwarding_rule_schema.Resource { - return &resource_compute_global_forwarding_rule_schema.Resource{ - Schema: map[string]*resource_compute_global_forwarding_rule_schema.Schema{ - "name": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of metadata label.\n\nThe name can have a maximum length of 1024 characters and must be at least 1 character long.", - }, - - "value": { - Type: resource_compute_global_forwarding_rule_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The value of the label must match the specified value.\n\nvalue can have a maximum length of 1024 characters.", - }, - }, - } -} - -func resourceComputeGlobalForwardingRuleCreate(d *resource_compute_global_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_compute_global_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("name").(string)), - Target: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("target").(string)), - Description: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - PortRange: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Project: resource_compute_global_forwarding_rule_dcldcl.String(project), - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") - if err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_global_forwarding_rule_schema.TimeoutCreate)) - res, err := client.ApplyForwardingRule(resource_compute_global_forwarding_rule_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_compute_global_forwarding_rule_dcldcl.DiffAfterApplyError); ok { - resource_compute_global_forwarding_rule_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_global_forwarding_rule_fmt.Errorf("Error creating ForwardingRule: %s", err) - } - - resource_compute_global_forwarding_rule_log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} - -func resourceComputeGlobalForwardingRuleRead(d *resource_compute_global_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_compute_global_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("name").(string)), - Target: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("target").(string)), - Description: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - PortRange: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Project: resource_compute_global_forwarding_rule_dcldcl.String(project), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_global_forwarding_rule_schema.TimeoutRead)) - res, err := client.GetForwardingRule(resource_compute_global_forwarding_rule_context.Background(), obj) - if err != nil { - resourceName := resource_compute_global_forwarding_rule_fmt.Sprintf("ComputeGlobalForwardingRule %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("name", res.Name); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("target", res.Target); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting target in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("ip_address", res.IPAddress); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting ip_address in state: %s", err) - } - if err = d.Set("ip_protocol", res.IPProtocol); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting ip_protocol in state: %s", err) - } - if err = d.Set("ip_version", res.IPVersion); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting ip_version in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("load_balancing_scheme", res.LoadBalancingScheme); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting load_balancing_scheme in state: %s", err) - } - if err = d.Set("metadata_filters", flattenComputeGlobalForwardingRuleMetadataFilterArray(res.MetadataFilter)); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting metadata_filters in state: %s", err) - } - if err = d.Set("network", res.Network); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting network in state: %s", err) - } - if err = d.Set("port_range", res.PortRange); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting port_range in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("label_fingerprint", res.LabelFingerprint); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting label_fingerprint in state: %s", err) - } - if err = d.Set("self_link", res.SelfLink); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("error setting self_link in state: %s", err) - } - - return nil -} - -func resourceComputeGlobalForwardingRuleUpdate(d *resource_compute_global_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_compute_global_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("name").(string)), - Target: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("target").(string)), - Description: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - PortRange: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Project: resource_compute_global_forwarding_rule_dcldcl.String(project), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_global_forwarding_rule_schema.TimeoutUpdate)) - res, err := client.ApplyForwardingRule(resource_compute_global_forwarding_rule_context.Background(), obj, directive...) - - if _, ok := err.(resource_compute_global_forwarding_rule_dcldcl.DiffAfterApplyError); ok { - resource_compute_global_forwarding_rule_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_compute_global_forwarding_rule_fmt.Errorf("Error updating ForwardingRule: %s", err) - } - - resource_compute_global_forwarding_rule_log.Printf("[DEBUG] Finished creating ForwardingRule %q: %#v", d.Id(), res) - - return resourceComputeGlobalForwardingRuleRead(d, meta) -} - -func resourceComputeGlobalForwardingRuleDelete(d *resource_compute_global_forwarding_rule_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_compute_global_forwarding_rule_computecompute.ForwardingRule{ - Name: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("name").(string)), - Target: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("target").(string)), - Description: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("description").(string)), - IPAddress: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("ip_address").(string)), - IPProtocol: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPProtocolEnumRef(d.Get("ip_protocol").(string)), - IPVersion: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleIPVersionEnumRef(d.Get("ip_version").(string)), - Labels: checkStringMap(d.Get("labels")), - LoadBalancingScheme: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleLoadBalancingSchemeEnumRef(d.Get("load_balancing_scheme").(string)), - MetadataFilter: expandComputeGlobalForwardingRuleMetadataFilterArray(d.Get("metadata_filters")), - Network: resource_compute_global_forwarding_rule_dcldcl.StringOrNil(d.Get("network").(string)), - PortRange: resource_compute_global_forwarding_rule_dcldcl.String(d.Get("port_range").(string)), - Project: resource_compute_global_forwarding_rule_dcldcl.String(project), - } - - resource_compute_global_forwarding_rule_log.Printf("[DEBUG] Deleting ForwardingRule %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLComputeClient(config, userAgent, billingProject, d.Timeout(resource_compute_global_forwarding_rule_schema.TimeoutDelete)) - if err := client.DeleteForwardingRule(resource_compute_global_forwarding_rule_context.Background(), obj); err != nil { - return resource_compute_global_forwarding_rule_fmt.Errorf("Error deleting ForwardingRule: %s", err) - } - - resource_compute_global_forwarding_rule_log.Printf("[DEBUG] Finished deleting ForwardingRule %q", d.Id()) - return nil -} - -func resourceComputeGlobalForwardingRuleImport(d *resource_compute_global_forwarding_rule_schema.ResourceData, meta interface{}) ([]*resource_compute_global_forwarding_rule_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/forwardingRules/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/global/forwardingRules/{{name}}") - if err != nil { - return nil, resource_compute_global_forwarding_rule_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_global_forwarding_rule_schema.ResourceData{d}, nil -} - -func expandComputeGlobalForwardingRuleMetadataFilterArray(o interface{}) []resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter { - if o == nil { - return make([]resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter, 0) - } - - items := make([]resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter, 0, len(objs)) - for _, item := range objs { - i := expandComputeGlobalForwardingRuleMetadataFilter(item) - items = append(items, *i) - } - - return items -} - -func expandComputeGlobalForwardingRuleMetadataFilter(o interface{}) *resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter { - if o == nil { - return resource_compute_global_forwarding_rule_computecompute.EmptyForwardingRuleMetadataFilter - } - - obj := o.(map[string]interface{}) - return &resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter{ - FilterLabel: expandComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(obj["filter_labels"]), - FilterMatchCriteria: resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterMatchCriteriaEnumRef(obj["filter_match_criteria"].(string)), - } -} - -func flattenComputeGlobalForwardingRuleMetadataFilterArray(objs []resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenComputeGlobalForwardingRuleMetadataFilter(&item) - items = append(items, i) - } - - return items -} - -func flattenComputeGlobalForwardingRuleMetadataFilter(obj *resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilter) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "filter_labels": flattenComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(obj.FilterLabel), - "filter_match_criteria": obj.FilterMatchCriteria, - } - - return transformed - -} - -func expandComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(o interface{}) []resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel { - if o == nil { - return make([]resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel, 0) - } - - items := make([]resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel, 0, len(objs)) - for _, item := range objs { - i := expandComputeGlobalForwardingRuleMetadataFilterFilterLabel(item) - items = append(items, *i) - } - - return items -} - -func expandComputeGlobalForwardingRuleMetadataFilterFilterLabel(o interface{}) *resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel { - if o == nil { - return resource_compute_global_forwarding_rule_computecompute.EmptyForwardingRuleMetadataFilterFilterLabel - } - - obj := o.(map[string]interface{}) - return &resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel{ - Name: resource_compute_global_forwarding_rule_dcldcl.String(obj["name"].(string)), - Value: resource_compute_global_forwarding_rule_dcldcl.String(obj["value"].(string)), - } -} - -func flattenComputeGlobalForwardingRuleMetadataFilterFilterLabelArray(objs []resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenComputeGlobalForwardingRuleMetadataFilterFilterLabel(&item) - items = append(items, i) - } - - return items -} - -func flattenComputeGlobalForwardingRuleMetadataFilterFilterLabel(obj *resource_compute_global_forwarding_rule_computecompute.ForwardingRuleMetadataFilterFilterLabel) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "name": obj.Name, - "value": obj.Value, - } - - return transformed - -} - -func resourceComputeGlobalNetworkEndpoint() *resource_compute_global_network_endpoint_schema.Resource { - return &resource_compute_global_network_endpoint_schema.Resource{ - Create: resourceComputeGlobalNetworkEndpointCreate, - Read: resourceComputeGlobalNetworkEndpointRead, - Delete: resourceComputeGlobalNetworkEndpointDelete, - - Importer: &resource_compute_global_network_endpoint_schema.ResourceImporter{ - State: resourceComputeGlobalNetworkEndpointImport, - }, - - Timeouts: &resource_compute_global_network_endpoint_schema.ResourceTimeout{ - Create: resource_compute_global_network_endpoint_schema.DefaultTimeout(6 * resource_compute_global_network_endpoint_time.Minute), - Delete: resource_compute_global_network_endpoint_schema.DefaultTimeout(6 * resource_compute_global_network_endpoint_time.Minute), - }, - - Schema: map[string]*resource_compute_global_network_endpoint_schema.Schema{ - "global_network_endpoint_group": { - Type: resource_compute_global_network_endpoint_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The global network endpoint group this endpoint is part of.`, - }, - "port": { - Type: resource_compute_global_network_endpoint_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Port number of the external endpoint.`, - }, - "fqdn": { - Type: resource_compute_global_network_endpoint_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Fully qualified domain name of network endpoint. -This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.`, - AtLeastOneOf: []string{"fqdn", "ip_address"}, - }, - "ip_address": { - Type: resource_compute_global_network_endpoint_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `IPv4 address external endpoint.`, - }, - "project": { - Type: resource_compute_global_network_endpoint_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeGlobalNetworkEndpointCreate(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - portProp, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(portProp)) && (ok || !resource_compute_global_network_endpoint_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - ipAddressProp, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(ipAddressProp)) && (ok || !resource_compute_global_network_endpoint_reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - fqdnProp, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fqdn"); !isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(fqdnProp)) && (ok || !resource_compute_global_network_endpoint_reflect.DeepEqual(v, fqdnProp)) { - obj["fqdn"] = fqdnProp - } - - obj, err = resourceComputeGlobalNetworkEndpointEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{global_network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/attachNetworkEndpoints") - if err != nil { - return err - } - - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Creating new GlobalNetworkEndpoint: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_global_network_endpoint_schema.TimeoutCreate)) - if err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error creating GlobalNetworkEndpoint: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}") - if err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating GlobalNetworkEndpoint", userAgent, - d.Timeout(resource_compute_global_network_endpoint_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_global_network_endpoint_fmt.Errorf("Error waiting to create GlobalNetworkEndpoint: %s", err) - } - - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Finished creating GlobalNetworkEndpoint %q: %#v", d.Id(), res) - - return resourceComputeGlobalNetworkEndpointRead(d, meta) -} - -func resourceComputeGlobalNetworkEndpointRead(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/listNetworkEndpoints") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_global_network_endpoint_fmt.Sprintf("ComputeGlobalNetworkEndpoint %q", d.Id())) - } - - res, err = flattenNestedComputeGlobalNetworkEndpoint(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Removing ComputeGlobalNetworkEndpoint because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeGlobalNetworkEndpointDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Removing ComputeGlobalNetworkEndpoint because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - - if err := d.Set("port", flattenNestedComputeGlobalNetworkEndpointPort(res["port"], d, config)); err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - if err := d.Set("ip_address", flattenNestedComputeGlobalNetworkEndpointIpAddress(res["ipAddress"], d, config)); err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - if err := d.Set("fqdn", flattenNestedComputeGlobalNetworkEndpointFqdn(res["fqdn"], d, config)); err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error reading GlobalNetworkEndpoint: %s", err) - } - - return nil -} - -func resourceComputeGlobalNetworkEndpointDelete(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_network_endpoint_fmt.Errorf("Error fetching project for GlobalNetworkEndpoint: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{global_network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/detachNetworkEndpoints") - if err != nil { - return err - } - - var obj map[string]interface{} - toDelete := make(map[string]interface{}) - portProp, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } - if portProp != "" { - toDelete["port"] = portProp - } - - ipAddressProp, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } - if ipAddressProp != "" { - toDelete["ipAddress"] = ipAddressProp - } - - fqdnProp, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, config) - if err != nil { - return err - } - if fqdnProp != "" { - toDelete["fqdn"] = fqdnProp - } - - obj = map[string]interface{}{ - "networkEndpoints": []map[string]interface{}{toDelete}, - } - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Deleting GlobalNetworkEndpoint %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_global_network_endpoint_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GlobalNetworkEndpoint") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting GlobalNetworkEndpoint", userAgent, - d.Timeout(resource_compute_global_network_endpoint_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Finished deleting GlobalNetworkEndpoint %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeGlobalNetworkEndpointImport(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}) ([]*resource_compute_global_network_endpoint_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networkEndpointGroups/(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)", - "(?P[^/]+)/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}") - if err != nil { - return nil, resource_compute_global_network_endpoint_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_global_network_endpoint_schema.ResourceData{d}, nil -} - -func flattenNestedComputeGlobalNetworkEndpointPort(v interface{}, d *resource_compute_global_network_endpoint_schema.ResourceData, config *Config) interface{} { - - if floatVal, ok := v.(float64); ok { - return int(floatVal) - } - return v -} - -func flattenNestedComputeGlobalNetworkEndpointIpAddress(v interface{}, d *resource_compute_global_network_endpoint_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeGlobalNetworkEndpointFqdn(v interface{}, d *resource_compute_global_network_endpoint_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeGlobalNetworkEndpointPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeGlobalNetworkEndpointIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeGlobalNetworkEndpointFqdn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeGlobalNetworkEndpointEncoder(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if err := d.Set("global_network_endpoint_group", GetResourceNameFromSelfLink(d.Get("global_network_endpoint_group").(string))); err != nil { - return nil, resource_compute_global_network_endpoint_fmt.Errorf("Error setting global_network_endpoint_group: %s", err) - } - - wrappedReq := map[string]interface{}{ - "networkEndpoints": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputeGlobalNetworkEndpoint(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_global_network_endpoint_fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputeGlobalNetworkEndpointFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeGlobalNetworkEndpointFindNestedObjectInList(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedIpAddress, err := expandNestedComputeGlobalNetworkEndpointIpAddress(d.Get("ip_address"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIpAddress := flattenNestedComputeGlobalNetworkEndpointIpAddress(expectedIpAddress, d, meta.(*Config)) - expectedFqdn, err := expandNestedComputeGlobalNetworkEndpointFqdn(d.Get("fqdn"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedFqdn := flattenNestedComputeGlobalNetworkEndpointFqdn(expectedFqdn, d, meta.(*Config)) - expectedPort, err := expandNestedComputeGlobalNetworkEndpointPort(d.Get("port"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPort := flattenNestedComputeGlobalNetworkEndpointPort(expectedPort, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - item, err := resourceComputeGlobalNetworkEndpointDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemIpAddress := flattenNestedComputeGlobalNetworkEndpointIpAddress(item["ipAddress"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(itemIpAddress)) && isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(expectedFlattenedIpAddress))) && !resource_compute_global_network_endpoint_reflect.DeepEqual(itemIpAddress, expectedFlattenedIpAddress) { - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Skipping item with ipAddress= %#v, looking for %#v)", itemIpAddress, expectedFlattenedIpAddress) - continue - } - itemFqdn := flattenNestedComputeGlobalNetworkEndpointFqdn(item["fqdn"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(itemFqdn)) && isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(expectedFlattenedFqdn))) && !resource_compute_global_network_endpoint_reflect.DeepEqual(itemFqdn, expectedFlattenedFqdn) { - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Skipping item with fqdn= %#v, looking for %#v)", itemFqdn, expectedFlattenedFqdn) - continue - } - itemPort := flattenNestedComputeGlobalNetworkEndpointPort(item["port"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(itemPort)) && isEmptyValue(resource_compute_global_network_endpoint_reflect.ValueOf(expectedFlattenedPort))) && !resource_compute_global_network_endpoint_reflect.DeepEqual(itemPort, expectedFlattenedPort) { - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) - continue - } - resource_compute_global_network_endpoint_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeGlobalNetworkEndpointDecoder(d *resource_compute_global_network_endpoint_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - v, ok := res["networkEndpoint"] - if !ok || v == nil { - return res, nil - } - - return v.(map[string]interface{}), nil -} - -func resourceComputeGlobalNetworkEndpointGroup() *resource_compute_global_network_endpoint_group_schema.Resource { - return &resource_compute_global_network_endpoint_group_schema.Resource{ - Create: resourceComputeGlobalNetworkEndpointGroupCreate, - Read: resourceComputeGlobalNetworkEndpointGroupRead, - Delete: resourceComputeGlobalNetworkEndpointGroupDelete, - - Importer: &resource_compute_global_network_endpoint_group_schema.ResourceImporter{ - State: resourceComputeGlobalNetworkEndpointGroupImport, - }, - - Timeouts: &resource_compute_global_network_endpoint_group_schema.ResourceTimeout{ - Create: resource_compute_global_network_endpoint_group_schema.DefaultTimeout(4 * resource_compute_global_network_endpoint_group_time.Minute), - Delete: resource_compute_global_network_endpoint_group_schema.DefaultTimeout(4 * resource_compute_global_network_endpoint_group_time.Minute), - }, - - Schema: map[string]*resource_compute_global_network_endpoint_group_schema.Schema{ - "name": { - Type: resource_compute_global_network_endpoint_group_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network_endpoint_type": { - Type: resource_compute_global_network_endpoint_group_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_compute_global_network_endpoint_group_validation.StringInSlice([]string{"INTERNET_IP_PORT", "INTERNET_FQDN_PORT"}, false), - Description: `Type of network endpoints in this network endpoint group. Possible values: ["INTERNET_IP_PORT", "INTERNET_FQDN_PORT"]`, - }, - "default_port": { - Type: resource_compute_global_network_endpoint_group_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The default port used if the port number is not specified in the -network endpoint.`, - }, - "description": { - Type: resource_compute_global_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "project": { - Type: resource_compute_global_network_endpoint_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_global_network_endpoint_group_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeGlobalNetworkEndpointGroupCreate(d *resource_compute_global_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeGlobalNetworkEndpointGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_global_network_endpoint_group_reflect.ValueOf(nameProp)) && (ok || !resource_compute_global_network_endpoint_group_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeGlobalNetworkEndpointGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_global_network_endpoint_group_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_global_network_endpoint_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - networkEndpointTypeProp, err := expandComputeGlobalNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_endpoint_type"); !isEmptyValue(resource_compute_global_network_endpoint_group_reflect.ValueOf(networkEndpointTypeProp)) && (ok || !resource_compute_global_network_endpoint_group_reflect.DeepEqual(v, networkEndpointTypeProp)) { - obj["networkEndpointType"] = networkEndpointTypeProp - } - defaultPortProp, err := expandComputeGlobalNetworkEndpointGroupDefaultPort(d.Get("default_port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_port"); !isEmptyValue(resource_compute_global_network_endpoint_group_reflect.ValueOf(defaultPortProp)) && (ok || !resource_compute_global_network_endpoint_group_reflect.DeepEqual(v, defaultPortProp)) { - obj["defaultPort"] = defaultPortProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups") - if err != nil { - return err - } - - resource_compute_global_network_endpoint_group_log.Printf("[DEBUG] Creating new GlobalNetworkEndpointGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_global_network_endpoint_group_schema.TimeoutCreate)) - if err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error creating GlobalNetworkEndpointGroup: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating GlobalNetworkEndpointGroup", userAgent, - d.Timeout(resource_compute_global_network_endpoint_group_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error waiting to create GlobalNetworkEndpointGroup: %s", err) - } - - resource_compute_global_network_endpoint_group_log.Printf("[DEBUG] Finished creating GlobalNetworkEndpointGroup %q: %#v", d.Id(), res) - - return resourceComputeGlobalNetworkEndpointGroupRead(d, meta) -} - -func resourceComputeGlobalNetworkEndpointGroupRead(d *resource_compute_global_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_global_network_endpoint_group_fmt.Sprintf("ComputeGlobalNetworkEndpointGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - - if err := d.Set("name", flattenComputeGlobalNetworkEndpointGroupName(res["name"], d, config)); err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("description", flattenComputeGlobalNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("network_endpoint_type", flattenComputeGlobalNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("default_port", flattenComputeGlobalNetworkEndpointGroupDefaultPort(res["defaultPort"], d, config)); err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error reading GlobalNetworkEndpointGroup: %s", err) - } - - return nil -} - -func resourceComputeGlobalNetworkEndpointGroupDelete(d *resource_compute_global_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_global_network_endpoint_group_fmt.Errorf("Error fetching project for GlobalNetworkEndpointGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_global_network_endpoint_group_log.Printf("[DEBUG] Deleting GlobalNetworkEndpointGroup %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_global_network_endpoint_group_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GlobalNetworkEndpointGroup") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting GlobalNetworkEndpointGroup", userAgent, - d.Timeout(resource_compute_global_network_endpoint_group_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_global_network_endpoint_group_log.Printf("[DEBUG] Finished deleting GlobalNetworkEndpointGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeGlobalNetworkEndpointGroupImport(d *resource_compute_global_network_endpoint_group_schema.ResourceData, meta interface{}) ([]*resource_compute_global_network_endpoint_group_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networkEndpointGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/networkEndpointGroups/{{name}}") - if err != nil { - return nil, resource_compute_global_network_endpoint_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_global_network_endpoint_group_schema.ResourceData{d}, nil -} - -func flattenComputeGlobalNetworkEndpointGroupName(v interface{}, d *resource_compute_global_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalNetworkEndpointGroupDescription(v interface{}, d *resource_compute_global_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalNetworkEndpointGroupNetworkEndpointType(v interface{}, d *resource_compute_global_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeGlobalNetworkEndpointGroupDefaultPort(v interface{}, d *resource_compute_global_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_global_network_endpoint_group_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandComputeGlobalNetworkEndpointGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalNetworkEndpointGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalNetworkEndpointGroupNetworkEndpointType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeGlobalNetworkEndpointGroupDefaultPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeHaVpnGateway() *resource_compute_ha_vpn_gateway_schema.Resource { - return &resource_compute_ha_vpn_gateway_schema.Resource{ - Create: resourceComputeHaVpnGatewayCreate, - Read: resourceComputeHaVpnGatewayRead, - Delete: resourceComputeHaVpnGatewayDelete, - - Importer: &resource_compute_ha_vpn_gateway_schema.ResourceImporter{ - State: resourceComputeHaVpnGatewayImport, - }, - - Timeouts: &resource_compute_ha_vpn_gateway_schema.ResourceTimeout{ - Create: resource_compute_ha_vpn_gateway_schema.DefaultTimeout(4 * resource_compute_ha_vpn_gateway_time.Minute), - Delete: resource_compute_ha_vpn_gateway_schema.DefaultTimeout(4 * resource_compute_ha_vpn_gateway_time.Minute), - }, - - Schema: map[string]*resource_compute_ha_vpn_gateway_schema.Schema{ - "name": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network this VPN gateway is accepting traffic for.`, - }, - "description": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region this gateway should sit in.`, - }, - "vpn_interfaces": { - Type: resource_compute_ha_vpn_gateway_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `A list of interfaces on this VPN gateway.`, - Elem: &resource_compute_ha_vpn_gateway_schema.Resource{ - Schema: map[string]*resource_compute_ha_vpn_gateway_schema.Schema{ - "id": { - Type: resource_compute_ha_vpn_gateway_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The numeric ID of this VPN gateway interface.`, - }, - "interconnect_attachment": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the interconnect attachment resource. When the value -of this field is present, the VPN Gateway will be used for -IPsec-encrypted Cloud Interconnect; all Egress or Ingress -traffic for this VPN Gateway interface will go through the -specified interconnect attachment resource. - -Not currently available publicly.`, - }, - "ip_address": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Computed: true, - Description: `The external IP address for this VPN gateway interface.`, - }, - }, - }, - }, - "project": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_ha_vpn_gateway_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeHaVpnGatewayCreate(d *resource_compute_ha_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeHaVpnGatewayDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_ha_vpn_gateway_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_ha_vpn_gateway_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeHaVpnGatewayName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_ha_vpn_gateway_reflect.ValueOf(nameProp)) && (ok || !resource_compute_ha_vpn_gateway_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeHaVpnGatewayNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_ha_vpn_gateway_reflect.ValueOf(networkProp)) && (ok || !resource_compute_ha_vpn_gateway_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - vpnInterfacesProp, err := expandComputeHaVpnGatewayVpnInterfaces(d.Get("vpn_interfaces"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpn_interfaces"); !isEmptyValue(resource_compute_ha_vpn_gateway_reflect.ValueOf(vpnInterfacesProp)) && (ok || !resource_compute_ha_vpn_gateway_reflect.DeepEqual(v, vpnInterfacesProp)) { - obj["vpnInterfaces"] = vpnInterfacesProp - } - regionProp, err := expandComputeHaVpnGatewayRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_ha_vpn_gateway_reflect.ValueOf(regionProp)) && (ok || !resource_compute_ha_vpn_gateway_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways") - if err != nil { - return err - } - - resource_compute_ha_vpn_gateway_log.Printf("[DEBUG] Creating new HaVpnGateway: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_ha_vpn_gateway_schema.TimeoutCreate)) - if err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error creating HaVpnGateway: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating HaVpnGateway", userAgent, - d.Timeout(resource_compute_ha_vpn_gateway_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error waiting to create HaVpnGateway: %s", err) - } - - resource_compute_ha_vpn_gateway_log.Printf("[DEBUG] Finished creating HaVpnGateway %q: %#v", d.Id(), res) - - return resourceComputeHaVpnGatewayRead(d, meta) -} - -func resourceComputeHaVpnGatewayRead(d *resource_compute_ha_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_ha_vpn_gateway_fmt.Sprintf("ComputeHaVpnGateway %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - - if err := d.Set("description", flattenComputeHaVpnGatewayDescription(res["description"], d, config)); err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("name", flattenComputeHaVpnGatewayName(res["name"], d, config)); err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("network", flattenComputeHaVpnGatewayNetwork(res["network"], d, config)); err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("vpn_interfaces", flattenComputeHaVpnGatewayVpnInterfaces(res["vpnInterfaces"], d, config)); err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("region", flattenComputeHaVpnGatewayRegion(res["region"], d, config)); err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error reading HaVpnGateway: %s", err) - } - - return nil -} - -func resourceComputeHaVpnGatewayDelete(d *resource_compute_ha_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ha_vpn_gateway_fmt.Errorf("Error fetching project for HaVpnGateway: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_ha_vpn_gateway_log.Printf("[DEBUG] Deleting HaVpnGateway %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_ha_vpn_gateway_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HaVpnGateway") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting HaVpnGateway", userAgent, - d.Timeout(resource_compute_ha_vpn_gateway_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_ha_vpn_gateway_log.Printf("[DEBUG] Finished deleting HaVpnGateway %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeHaVpnGatewayImport(d *resource_compute_ha_vpn_gateway_schema.ResourceData, meta interface{}) ([]*resource_compute_ha_vpn_gateway_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/vpnGateways/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnGateways/{{name}}") - if err != nil { - return nil, resource_compute_ha_vpn_gateway_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_ha_vpn_gateway_schema.ResourceData{d}, nil -} - -func flattenComputeHaVpnGatewayDescription(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHaVpnGatewayName(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHaVpnGatewayNetwork(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeHaVpnGatewayVpnInterfaces(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenComputeHaVpnGatewayVpnInterfacesId(original["id"], d, config), - "ip_address": flattenComputeHaVpnGatewayVpnInterfacesIpAddress(original["ipAddress"], d, config), - "interconnect_attachment": flattenComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(original["interconnectAttachment"], d, config), - }) - } - return transformed -} - -func flattenComputeHaVpnGatewayVpnInterfacesId(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_ha_vpn_gateway_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHaVpnGatewayVpnInterfacesIpAddress(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeHaVpnGatewayRegion(v interface{}, d *resource_compute_ha_vpn_gateway_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeHaVpnGatewayDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_ha_vpn_gateway_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeHaVpnGatewayVpnInterfaces(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandComputeHaVpnGatewayVpnInterfacesId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_ha_vpn_gateway_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedIpAddress, err := expandComputeHaVpnGatewayVpnInterfacesIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_ha_vpn_gateway_reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - transformedInterconnectAttachment, err := expandComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(original["interconnect_attachment"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_ha_vpn_gateway_reflect.ValueOf(transformedInterconnectAttachment); val.IsValid() && !isEmptyValue(val) { - transformed["interconnectAttachment"] = transformedInterconnectAttachment - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeHaVpnGatewayVpnInterfacesId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayVpnInterfacesIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHaVpnGatewayVpnInterfacesInterconnectAttachment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("interconnectAttachments", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_ha_vpn_gateway_fmt.Errorf("Invalid value for interconnect_attachment: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeHaVpnGatewayRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_ha_vpn_gateway_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func validatePortSpec(diff *resource_compute_health_check_schema.ResourceDiff, blockName string) error { - block := diff.Get(blockName + ".0").(map[string]interface{}) - portSpec := block["port_specification"] - portName := block["port_name"] - port := block["port"] - - hasPort := (port != nil && port != 0) - noName := (portName == nil || portName == "") - - if portSpec == "USE_NAMED_PORT" && hasPort { - return resource_compute_health_check_fmt.Errorf("Error in %s: port cannot be specified when using port_specification USE_NAMED_PORT.", blockName) - } - if portSpec == "USE_NAMED_PORT" && noName { - return resource_compute_health_check_fmt.Errorf("Error in %s: Must specify port_name when using USE_NAMED_PORT as port_specification.", blockName) - } - - if portSpec == "USE_SERVING_PORT" && hasPort { - return resource_compute_health_check_fmt.Errorf("Error in %s: port cannot be specified when using port_specification USE_SERVING_PORT.", blockName) - } - if portSpec == "USE_SERVING_PORT" && !noName { - return resource_compute_health_check_fmt.Errorf("Error in %s: port_name cannot be specified when using port_specification USE_SERVING_PORT.", blockName) - } - - return nil -} - -func healthCheckCustomizeDiff(_ resource_compute_health_check_context.Context, diff *resource_compute_health_check_schema.ResourceDiff, v interface{}) error { - if diff.Get("http_health_check") != nil { - return validatePortSpec(diff, "http_health_check") - } - if diff.Get("https_health_check") != nil { - return validatePortSpec(diff, "https_health_check") - } - if diff.Get("http2_health_check") != nil { - return validatePortSpec(diff, "http2_health_check") - } - if diff.Get("tcp_health_check") != nil { - return validatePortSpec(diff, "tcp_health_check") - } - if diff.Get("ssl_health_check") != nil { - return validatePortSpec(diff, "ssl_health_check") - } - - return nil -} - -func portDiffSuppress(k, old, new string, d *resource_compute_health_check_schema.ResourceData) bool { - b := resource_compute_health_check_strings.Split(k, ".") - if len(b) > 2 { - attr := b[2] - - if attr == "port" { - var defaultPort int64 - - blockType := b[0] - - switch blockType { - case "http_health_check": - defaultPort = 80 - case "https_health_check": - defaultPort = 443 - case "http2_health_check": - defaultPort = 443 - case "tcp_health_check": - defaultPort = 80 - case "ssl_health_check": - defaultPort = 443 - } - - oldPort, _ := resource_compute_health_check_strconv.Atoi(old) - newPort, _ := resource_compute_health_check_strconv.Atoi(new) - - portSpec := d.Get(b[0] + ".0.port_specification") - if int64(oldPort) == defaultPort && newPort == 0 && (portSpec == "USE_FIXED_PORT" || portSpec == "") { - return true - } - } - } - - return false -} - -func resourceComputeHealthCheck() *resource_compute_health_check_schema.Resource { - return &resource_compute_health_check_schema.Resource{ - Create: resourceComputeHealthCheckCreate, - Read: resourceComputeHealthCheckRead, - Update: resourceComputeHealthCheckUpdate, - Delete: resourceComputeHealthCheckDelete, - - Importer: &resource_compute_health_check_schema.ResourceImporter{ - State: resourceComputeHealthCheckImport, - }, - - Timeouts: &resource_compute_health_check_schema.ResourceTimeout{ - Create: resource_compute_health_check_schema.DefaultTimeout(4 * resource_compute_health_check_time.Minute), - Update: resource_compute_health_check_schema.DefaultTimeout(4 * resource_compute_health_check_time.Minute), - Delete: resource_compute_health_check_schema.DefaultTimeout(4 * resource_compute_health_check_time.Minute), - }, - - CustomizeDiff: healthCheckCustomizeDiff, - - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "name": { - Type: resource_compute_health_check_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "check_interval_sec": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to send a health check. The default value is 5 -seconds.`, - Default: 5, - }, - "description": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "grpc_health_check": { - Type: resource_compute_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_health_check_schema.Resource{ - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "grpc_service_name": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The gRPC service name for the health check. -The value of grpcServiceName has the following meanings by convention: - - Empty serviceName means the overall status of all services at the backend. - - Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. -The grpcServiceName can only be ASCII.`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - "port": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `The port number for the health check request. -Must be specified if portName and portSpecification are not set -or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535.`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - "port_name": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - "port_specification": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, gRPC health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "healthy_threshold": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far unhealthy instance will be marked healthy after this many -consecutive successes. The default value is 2.`, - Default: 2, - }, - "http2_health_check": { - Type: resource_compute_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_health_check_schema.Resource{ - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "host": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTP2 health check request. -If left empty (default value), the public IP on behalf of which this health -check is performed will be used.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "port": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTP2 health check request. -The default value is 443.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, HTTP2 health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "request_path": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTP2 health check request. -The default value is /.`, - Default: "/", - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "http_health_check": { - Type: resource_compute_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_health_check_schema.Resource{ - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "host": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTP health check request. -If left empty (default value), the public IP on behalf of which this health -check is performed will be used.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "port": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTP health check request. -The default value is 80.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, HTTP health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "request_path": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTP health check request. -The default value is /.`, - Default: "/", - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "https_health_check": { - Type: resource_compute_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_health_check_schema.Resource{ - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "host": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTPS health check request. -If left empty (default value), the public IP on behalf of which this health -check is performed will be used.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "port": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTPS health check request. -The default value is 443.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, HTTPS health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "request_path": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTPS health check request. -The default value is /.`, - Default: "/", - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "log_config": { - Type: resource_compute_health_check_schema.TypeList, - Computed: true, - Optional: true, - Description: `Configure logging on this health check.`, - MaxItems: 1, - Elem: &resource_compute_health_check_schema.Resource{ - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "enable": { - Type: resource_compute_health_check_schema.TypeBool, - Optional: true, - Description: `Indicates whether or not to export logs. This is false by default, -which means no health check logging will be done.`, - Default: false, - }, - }, - }, - }, - "ssl_health_check": { - Type: resource_compute_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_health_check_schema.Resource{ - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "port": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the SSL health check request. -The default value is 443.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, SSL health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "request": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The application data to send once the SSL connection has been -established (default value is empty). If both request and response are -empty, the connection establishment alone will indicate health. The request -data can only be ASCII.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "tcp_health_check": { - Type: resource_compute_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_health_check_schema.Resource{ - Schema: map[string]*resource_compute_health_check_schema.Schema{ - "port": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the TCP health check request. -The default value is 443.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, TCP health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "request": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The application data to send once the TCP connection has been -established (default value is empty). If both request and response are -empty, the connection establishment alone will indicate health. The request -data can only be ASCII.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "timeout_sec": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `How long (in seconds) to wait before claiming failure. -The default value is 5 seconds. It is invalid for timeoutSec to have -greater value than checkIntervalSec.`, - Default: 5, - }, - "unhealthy_threshold": { - Type: resource_compute_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far healthy instance will be marked unhealthy after this many -consecutive failures. The default value is 2.`, - Default: 2, - }, - "creation_timestamp": { - Type: resource_compute_health_check_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "type": { - Type: resource_compute_health_check_schema.TypeString, - Computed: true, - Description: `The type of the health check. One of HTTP, HTTPS, TCP, or SSL.`, - }, - "project": { - Type: resource_compute_health_check_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_health_check_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeHealthCheckCreate(d *resource_compute_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(checkIntervalSecProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !resource_compute_health_check_reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(healthyThresholdProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - nameProp, err := expandComputeHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(nameProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - timeoutSecProp, err := expandComputeHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(timeoutSecProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(unhealthyThresholdProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - httpHealthCheckProp, err := expandComputeHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(httpHealthCheckProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, httpHealthCheckProp)) { - obj["httpHealthCheck"] = httpHealthCheckProp - } - httpsHealthCheckProp, err := expandComputeHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(httpsHealthCheckProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, httpsHealthCheckProp)) { - obj["httpsHealthCheck"] = httpsHealthCheckProp - } - tcpHealthCheckProp, err := expandComputeHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(tcpHealthCheckProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, tcpHealthCheckProp)) { - obj["tcpHealthCheck"] = tcpHealthCheckProp - } - sslHealthCheckProp, err := expandComputeHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(sslHealthCheckProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, sslHealthCheckProp)) { - obj["sslHealthCheck"] = sslHealthCheckProp - } - http2HealthCheckProp, err := expandComputeHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(http2HealthCheckProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, http2HealthCheckProp)) { - obj["http2HealthCheck"] = http2HealthCheckProp - } - grpcHealthCheckProp, err := expandComputeHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(grpcHealthCheckProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, grpcHealthCheckProp)) { - obj["grpcHealthCheck"] = grpcHealthCheckProp - } - logConfigProp, err := expandComputeHealthCheckLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(logConfigProp)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - - obj, err = resourceComputeHealthCheckEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks") - if err != nil { - return err - } - - resource_compute_health_check_log.Printf("[DEBUG] Creating new HealthCheck: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_health_check_fmt.Errorf("Error fetching project for HealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_health_check_schema.TimeoutCreate)) - if err != nil { - return resource_compute_health_check_fmt.Errorf("Error creating HealthCheck: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") - if err != nil { - return resource_compute_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating HealthCheck", userAgent, - d.Timeout(resource_compute_health_check_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_health_check_fmt.Errorf("Error waiting to create HealthCheck: %s", err) - } - - resource_compute_health_check_log.Printf("[DEBUG] Finished creating HealthCheck %q: %#v", d.Id(), res) - - return resourceComputeHealthCheckRead(d, meta) -} - -func resourceComputeHealthCheckRead(d *resource_compute_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_health_check_fmt.Errorf("Error fetching project for HealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_health_check_fmt.Sprintf("ComputeHealthCheck %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - - if err := d.Set("check_interval_sec", flattenComputeHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("description", flattenComputeHealthCheckDescription(res["description"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("healthy_threshold", flattenComputeHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("name", flattenComputeHealthCheckName(res["name"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("unhealthy_threshold", flattenComputeHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("type", flattenComputeHealthCheckType(res["type"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("http_health_check", flattenComputeHealthCheckHttpHealthCheck(res["httpHealthCheck"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("https_health_check", flattenComputeHealthCheckHttpsHealthCheck(res["httpsHealthCheck"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("tcp_health_check", flattenComputeHealthCheckTcpHealthCheck(res["tcpHealthCheck"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("ssl_health_check", flattenComputeHealthCheckSslHealthCheck(res["sslHealthCheck"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("http2_health_check", flattenComputeHealthCheckHttp2HealthCheck(res["http2HealthCheck"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("grpc_health_check", flattenComputeHealthCheckGrpcHealthCheck(res["grpcHealthCheck"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("log_config", flattenComputeHealthCheckLogConfig(res["logConfig"], d, config)); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_health_check_fmt.Errorf("Error reading HealthCheck: %s", err) - } - - return nil -} - -func resourceComputeHealthCheckUpdate(d *resource_compute_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_health_check_fmt.Errorf("Error fetching project for HealthCheck: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !resource_compute_health_check_reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - nameProp, err := expandComputeHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - timeoutSecProp, err := expandComputeHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - httpHealthCheckProp, err := expandComputeHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, httpHealthCheckProp)) { - obj["httpHealthCheck"] = httpHealthCheckProp - } - httpsHealthCheckProp, err := expandComputeHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, httpsHealthCheckProp)) { - obj["httpsHealthCheck"] = httpsHealthCheckProp - } - tcpHealthCheckProp, err := expandComputeHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, tcpHealthCheckProp)) { - obj["tcpHealthCheck"] = tcpHealthCheckProp - } - sslHealthCheckProp, err := expandComputeHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, sslHealthCheckProp)) { - obj["sslHealthCheck"] = sslHealthCheckProp - } - http2HealthCheckProp, err := expandComputeHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, http2HealthCheckProp)) { - obj["http2HealthCheck"] = http2HealthCheckProp - } - grpcHealthCheckProp, err := expandComputeHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, grpcHealthCheckProp)) { - obj["grpcHealthCheck"] = grpcHealthCheckProp - } - logConfigProp, err := expandComputeHealthCheckLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_health_check_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - - obj, err = resourceComputeHealthCheckEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") - if err != nil { - return err - } - - resource_compute_health_check_log.Printf("[DEBUG] Updating HealthCheck %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_health_check_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_health_check_fmt.Errorf("Error updating HealthCheck %q: %s", d.Id(), err) - } else { - resource_compute_health_check_log.Printf("[DEBUG] Finished updating HealthCheck %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating HealthCheck", userAgent, - d.Timeout(resource_compute_health_check_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeHealthCheckRead(d, meta) -} - -func resourceComputeHealthCheckDelete(d *resource_compute_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_health_check_fmt.Errorf("Error fetching project for HealthCheck: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/healthChecks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_health_check_log.Printf("[DEBUG] Deleting HealthCheck %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_health_check_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HealthCheck") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting HealthCheck", userAgent, - d.Timeout(resource_compute_health_check_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_health_check_log.Printf("[DEBUG] Finished deleting HealthCheck %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeHealthCheckImport(d *resource_compute_health_check_schema.ResourceData, meta interface{}) ([]*resource_compute_health_check_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/healthChecks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/healthChecks/{{name}}") - if err != nil { - return nil, resource_compute_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_health_check_schema.ResourceData{d}, nil -} - -func flattenComputeHealthCheckCheckIntervalSec(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckCreationTimestamp(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckDescription(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHealthyThreshold(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckTimeoutSec(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckUnhealthyThreshold(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckType(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpHealthCheck(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenComputeHealthCheckHttpHealthCheckHost(original["host"], d, config) - transformed["request_path"] = - flattenComputeHealthCheckHttpHealthCheckRequestPath(original["requestPath"], d, config) - transformed["response"] = - flattenComputeHealthCheckHttpHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeHealthCheckHttpHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeHealthCheckHttpHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeHealthCheckHttpHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeHealthCheckHttpHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeHealthCheckHttpHealthCheckHost(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpHealthCheckRequestPath(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpHealthCheckResponse(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpHealthCheckPort(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckHttpHealthCheckPortName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpHealthCheckProxyHeader(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpHealthCheckPortSpecification(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpsHealthCheck(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenComputeHealthCheckHttpsHealthCheckHost(original["host"], d, config) - transformed["request_path"] = - flattenComputeHealthCheckHttpsHealthCheckRequestPath(original["requestPath"], d, config) - transformed["response"] = - flattenComputeHealthCheckHttpsHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeHealthCheckHttpsHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeHealthCheckHttpsHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeHealthCheckHttpsHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeHealthCheckHttpsHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeHealthCheckHttpsHealthCheckHost(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpsHealthCheckResponse(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpsHealthCheckPort(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckHttpsHealthCheckPortName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckTcpHealthCheck(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request"] = - flattenComputeHealthCheckTcpHealthCheckRequest(original["request"], d, config) - transformed["response"] = - flattenComputeHealthCheckTcpHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeHealthCheckTcpHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeHealthCheckTcpHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeHealthCheckTcpHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeHealthCheckTcpHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeHealthCheckTcpHealthCheckRequest(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckTcpHealthCheckResponse(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckTcpHealthCheckPort(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckTcpHealthCheckPortName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckTcpHealthCheckProxyHeader(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckTcpHealthCheckPortSpecification(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckSslHealthCheck(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request"] = - flattenComputeHealthCheckSslHealthCheckRequest(original["request"], d, config) - transformed["response"] = - flattenComputeHealthCheckSslHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeHealthCheckSslHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeHealthCheckSslHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeHealthCheckSslHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeHealthCheckSslHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeHealthCheckSslHealthCheckRequest(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckSslHealthCheckResponse(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckSslHealthCheckPort(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckSslHealthCheckPortName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckSslHealthCheckProxyHeader(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckSslHealthCheckPortSpecification(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttp2HealthCheck(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenComputeHealthCheckHttp2HealthCheckHost(original["host"], d, config) - transformed["request_path"] = - flattenComputeHealthCheckHttp2HealthCheckRequestPath(original["requestPath"], d, config) - transformed["response"] = - flattenComputeHealthCheckHttp2HealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeHealthCheckHttp2HealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeHealthCheckHttp2HealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeHealthCheckHttp2HealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeHealthCheckHttp2HealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeHealthCheckHttp2HealthCheckHost(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttp2HealthCheckRequestPath(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttp2HealthCheckResponse(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttp2HealthCheckPort(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckHttp2HealthCheckPortName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckGrpcHealthCheck(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["port"] = - flattenComputeHealthCheckGrpcHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeHealthCheckGrpcHealthCheckPortName(original["portName"], d, config) - transformed["port_specification"] = - flattenComputeHealthCheckGrpcHealthCheckPortSpecification(original["portSpecification"], d, config) - transformed["grpc_service_name"] = - flattenComputeHealthCheckGrpcHealthCheckGrpcServiceName(original["grpcServiceName"], d, config) - return []interface{}{transformed} -} - -func flattenComputeHealthCheckGrpcHealthCheckPort(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHealthCheckGrpcHealthCheckPortName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHealthCheckLogConfig(v interface{}, d *resource_compute_health_check_schema.ResourceData, config *Config) interface{} { - transformed := make(map[string]interface{}) - if v == nil { - - transformed["enable"] = false - return []interface{}{transformed} - } - - original := v.(map[string]interface{}) - transformed["enable"] = original["enable"] - return []interface{}{transformed} -} - -func expandComputeHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHost, err := expandComputeHealthCheckHttpHealthCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedRequestPath, err := expandComputeHealthCheckHttpHealthCheckRequestPath(original["request_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { - transformed["requestPath"] = transformedRequestPath - } - - transformedResponse, err := expandComputeHealthCheckHttpHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeHealthCheckHttpHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeHealthCheckHttpHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeHealthCheckHttpHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeHealthCheckHttpHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeHealthCheckHttpHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpsHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHost, err := expandComputeHealthCheckHttpsHealthCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedRequestPath, err := expandComputeHealthCheckHttpsHealthCheckRequestPath(original["request_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { - transformed["requestPath"] = transformedRequestPath - } - - transformedResponse, err := expandComputeHealthCheckHttpsHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeHealthCheckHttpsHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeHealthCheckHttpsHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeHealthCheckHttpsHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeHealthCheckHttpsHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeHealthCheckHttpsHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpsHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpsHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpsHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckTcpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequest, err := expandComputeHealthCheckTcpHealthCheckRequest(original["request"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { - transformed["request"] = transformedRequest - } - - transformedResponse, err := expandComputeHealthCheckTcpHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeHealthCheckTcpHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeHealthCheckTcpHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeHealthCheckTcpHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeHealthCheckTcpHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeHealthCheckTcpHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckTcpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckTcpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckTcpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckTcpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckTcpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckSslHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequest, err := expandComputeHealthCheckSslHealthCheckRequest(original["request"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { - transformed["request"] = transformedRequest - } - - transformedResponse, err := expandComputeHealthCheckSslHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeHealthCheckSslHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeHealthCheckSslHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeHealthCheckSslHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeHealthCheckSslHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeHealthCheckSslHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckSslHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckSslHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckSslHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckSslHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckSslHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttp2HealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHost, err := expandComputeHealthCheckHttp2HealthCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedRequestPath, err := expandComputeHealthCheckHttp2HealthCheckRequestPath(original["request_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { - transformed["requestPath"] = transformedRequestPath - } - - transformedResponse, err := expandComputeHealthCheckHttp2HealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeHealthCheckHttp2HealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeHealthCheckHttp2HealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeHealthCheckHttp2HealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeHealthCheckHttp2HealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeHealthCheckHttp2HealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttp2HealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttp2HealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttp2HealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttp2HealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckGrpcHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandComputeHealthCheckGrpcHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeHealthCheckGrpcHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedPortSpecification, err := expandComputeHealthCheckGrpcHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - transformedGrpcServiceName, err := expandComputeHealthCheckGrpcHealthCheckGrpcServiceName(original["grpc_service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedGrpcServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["grpcServiceName"] = transformedGrpcServiceName - } - - return transformed, nil -} - -func expandComputeHealthCheckGrpcHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckGrpcHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHealthCheckLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnable, err := expandComputeHealthCheckLogConfigEnable(original["enable"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_health_check_reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { - transformed["enable"] = transformedEnable - } - - return transformed, nil -} - -func expandComputeHealthCheckLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeHealthCheckEncoder(d *resource_compute_health_check_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if _, ok := d.GetOk("http_health_check"); ok { - hc := d.Get("http_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["httpHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 80 - } - } - obj["type"] = "HTTP" - return obj, nil - } - if _, ok := d.GetOk("https_health_check"); ok { - hc := d.Get("https_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["httpsHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 443 - } - } - obj["type"] = "HTTPS" - return obj, nil - } - if _, ok := d.GetOk("http2_health_check"); ok { - hc := d.Get("http2_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["http2HealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 443 - } - } - obj["type"] = "HTTP2" - return obj, nil - } - if _, ok := d.GetOk("tcp_health_check"); ok { - hc := d.Get("tcp_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["tcpHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 80 - } - } - obj["type"] = "TCP" - return obj, nil - } - if _, ok := d.GetOk("ssl_health_check"); ok { - hc := d.Get("ssl_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["sslHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 443 - } - } - obj["type"] = "SSL" - return obj, nil - } - - if _, ok := d.GetOk("grpc_health_check"); ok { - hc := d.Get("grpc_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["grpcHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - return nil, resource_compute_health_check_fmt.Errorf("error in HealthCheck %s: `port` must be set for GRPC health checks`.", d.Get("name").(string)) - } - } - obj["type"] = "GRPC" - return obj, nil - } - - return nil, resource_compute_health_check_fmt.Errorf("error in HealthCheck %s: No health check block specified.", d.Get("name").(string)) -} - -func resourceComputeHttpHealthCheck() *resource_compute_http_health_check_schema.Resource { - return &resource_compute_http_health_check_schema.Resource{ - Create: resourceComputeHttpHealthCheckCreate, - Read: resourceComputeHttpHealthCheckRead, - Update: resourceComputeHttpHealthCheckUpdate, - Delete: resourceComputeHttpHealthCheckDelete, - - Importer: &resource_compute_http_health_check_schema.ResourceImporter{ - State: resourceComputeHttpHealthCheckImport, - }, - - Timeouts: &resource_compute_http_health_check_schema.ResourceTimeout{ - Create: resource_compute_http_health_check_schema.DefaultTimeout(4 * resource_compute_http_health_check_time.Minute), - Update: resource_compute_http_health_check_schema.DefaultTimeout(4 * resource_compute_http_health_check_time.Minute), - Delete: resource_compute_http_health_check_schema.DefaultTimeout(4 * resource_compute_http_health_check_time.Minute), - }, - - Schema: map[string]*resource_compute_http_health_check_schema.Schema{ - "name": { - Type: resource_compute_http_health_check_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "check_interval_sec": { - Type: resource_compute_http_health_check_schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to send a health check. The default value is 5 -seconds.`, - Default: 5, - }, - "description": { - Type: resource_compute_http_health_check_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "healthy_threshold": { - Type: resource_compute_http_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far unhealthy instance will be marked healthy after this many -consecutive successes. The default value is 2.`, - Default: 2, - }, - "host": { - Type: resource_compute_http_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTP health check request. If -left empty (default value), the public IP on behalf of which this -health check is performed will be used.`, - }, - "port": { - Type: resource_compute_http_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTP health check request. -The default value is 80.`, - Default: 80, - }, - "request_path": { - Type: resource_compute_http_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTP health check request. -The default value is /.`, - Default: "/", - }, - "timeout_sec": { - Type: resource_compute_http_health_check_schema.TypeInt, - Optional: true, - Description: `How long (in seconds) to wait before claiming failure. -The default value is 5 seconds. It is invalid for timeoutSec to have -greater value than checkIntervalSec.`, - Default: 5, - }, - "unhealthy_threshold": { - Type: resource_compute_http_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far healthy instance will be marked unhealthy after this many -consecutive failures. The default value is 2.`, - Default: 2, - }, - "creation_timestamp": { - Type: resource_compute_http_health_check_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_http_health_check_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_http_health_check_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeHttpHealthCheckCreate(d *resource_compute_http_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(checkIntervalSecProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(healthyThresholdProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(hostProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(nameProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(portProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(requestPathProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(timeoutSecProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(unhealthyThresholdProp)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks") - if err != nil { - return err - } - - resource_compute_http_health_check_log.Printf("[DEBUG] Creating new HttpHealthCheck: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_http_health_check_schema.TimeoutCreate)) - if err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error creating HttpHealthCheck: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating HttpHealthCheck", userAgent, - d.Timeout(resource_compute_http_health_check_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_http_health_check_fmt.Errorf("Error waiting to create HttpHealthCheck: %s", err) - } - - resource_compute_http_health_check_log.Printf("[DEBUG] Finished creating HttpHealthCheck %q: %#v", d.Id(), res) - - return resourceComputeHttpHealthCheckRead(d, meta) -} - -func resourceComputeHttpHealthCheckRead(d *resource_compute_http_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_http_health_check_fmt.Sprintf("ComputeHttpHealthCheck %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - - if err := d.Set("check_interval_sec", flattenComputeHttpHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeHttpHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("description", flattenComputeHttpHealthCheckDescription(res["description"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("healthy_threshold", flattenComputeHttpHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("host", flattenComputeHttpHealthCheckHost(res["host"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("name", flattenComputeHttpHealthCheckName(res["name"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("port", flattenComputeHttpHealthCheckPort(res["port"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("request_path", flattenComputeHttpHealthCheckRequestPath(res["requestPath"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeHttpHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("unhealthy_threshold", flattenComputeHttpHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error reading HttpHealthCheck: %s", err) - } - - return nil -} - -func resourceComputeHttpHealthCheckUpdate(d *resource_compute_http_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_http_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_http_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return err - } - - resource_compute_http_health_check_log.Printf("[DEBUG] Updating HttpHealthCheck %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_http_health_check_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error updating HttpHealthCheck %q: %s", d.Id(), err) - } else { - resource_compute_http_health_check_log.Printf("[DEBUG] Finished updating HttpHealthCheck %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating HttpHealthCheck", userAgent, - d.Timeout(resource_compute_http_health_check_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeHttpHealthCheckRead(d, meta) -} - -func resourceComputeHttpHealthCheckDelete(d *resource_compute_http_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_http_health_check_fmt.Errorf("Error fetching project for HttpHealthCheck: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_http_health_check_log.Printf("[DEBUG] Deleting HttpHealthCheck %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_http_health_check_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HttpHealthCheck") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting HttpHealthCheck", userAgent, - d.Timeout(resource_compute_http_health_check_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_http_health_check_log.Printf("[DEBUG] Finished deleting HttpHealthCheck %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeHttpHealthCheckImport(d *resource_compute_http_health_check_schema.ResourceData, meta interface{}) ([]*resource_compute_http_health_check_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/httpHealthChecks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/httpHealthChecks/{{name}}") - if err != nil { - return nil, resource_compute_http_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_http_health_check_schema.ResourceData{d}, nil -} - -func flattenComputeHttpHealthCheckCheckIntervalSec(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_http_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpHealthCheckCreationTimestamp(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckDescription(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckHealthyThreshold(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_http_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpHealthCheckHost(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckName(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckPort(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_http_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpHealthCheckRequestPath(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpHealthCheckTimeoutSec(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_http_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpHealthCheckUnhealthyThreshold(v interface{}, d *resource_compute_http_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_http_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandComputeHttpHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeHttpsHealthCheck() *resource_compute_https_health_check_schema.Resource { - return &resource_compute_https_health_check_schema.Resource{ - Create: resourceComputeHttpsHealthCheckCreate, - Read: resourceComputeHttpsHealthCheckRead, - Update: resourceComputeHttpsHealthCheckUpdate, - Delete: resourceComputeHttpsHealthCheckDelete, - - Importer: &resource_compute_https_health_check_schema.ResourceImporter{ - State: resourceComputeHttpsHealthCheckImport, - }, - - Timeouts: &resource_compute_https_health_check_schema.ResourceTimeout{ - Create: resource_compute_https_health_check_schema.DefaultTimeout(4 * resource_compute_https_health_check_time.Minute), - Update: resource_compute_https_health_check_schema.DefaultTimeout(4 * resource_compute_https_health_check_time.Minute), - Delete: resource_compute_https_health_check_schema.DefaultTimeout(4 * resource_compute_https_health_check_time.Minute), - }, - - Schema: map[string]*resource_compute_https_health_check_schema.Schema{ - "name": { - Type: resource_compute_https_health_check_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "check_interval_sec": { - Type: resource_compute_https_health_check_schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to send a health check. The default value is 5 -seconds.`, - Default: 5, - }, - "description": { - Type: resource_compute_https_health_check_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "healthy_threshold": { - Type: resource_compute_https_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far unhealthy instance will be marked healthy after this many -consecutive successes. The default value is 2.`, - Default: 2, - }, - "host": { - Type: resource_compute_https_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTPS health check request. If -left empty (default value), the public IP on behalf of which this -health check is performed will be used.`, - }, - "port": { - Type: resource_compute_https_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTPS health check request. -The default value is 443.`, - Default: 443, - }, - "request_path": { - Type: resource_compute_https_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTPS health check request. -The default value is /.`, - Default: "/", - }, - "timeout_sec": { - Type: resource_compute_https_health_check_schema.TypeInt, - Optional: true, - Description: `How long (in seconds) to wait before claiming failure. -The default value is 5 seconds. It is invalid for timeoutSec to have -greater value than checkIntervalSec.`, - Default: 5, - }, - "unhealthy_threshold": { - Type: resource_compute_https_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far healthy instance will be marked unhealthy after this many -consecutive failures. The default value is 2.`, - Default: 2, - }, - "creation_timestamp": { - Type: resource_compute_https_health_check_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_https_health_check_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_https_health_check_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeHttpsHealthCheckCreate(d *resource_compute_https_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpsHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(checkIntervalSecProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpsHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpsHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(healthyThresholdProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpsHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(hostProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpsHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(nameProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpsHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(portProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpsHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(requestPathProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpsHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(timeoutSecProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpsHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(unhealthyThresholdProp)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks") - if err != nil { - return err - } - - resource_compute_https_health_check_log.Printf("[DEBUG] Creating new HttpsHealthCheck: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_https_health_check_schema.TimeoutCreate)) - if err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error creating HttpsHealthCheck: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating HttpsHealthCheck", userAgent, - d.Timeout(resource_compute_https_health_check_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_https_health_check_fmt.Errorf("Error waiting to create HttpsHealthCheck: %s", err) - } - - resource_compute_https_health_check_log.Printf("[DEBUG] Finished creating HttpsHealthCheck %q: %#v", d.Id(), res) - - return resourceComputeHttpsHealthCheckRead(d, meta) -} - -func resourceComputeHttpsHealthCheckRead(d *resource_compute_https_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_https_health_check_fmt.Sprintf("ComputeHttpsHealthCheck %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - - if err := d.Set("check_interval_sec", flattenComputeHttpsHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeHttpsHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("description", flattenComputeHttpsHealthCheckDescription(res["description"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("healthy_threshold", flattenComputeHttpsHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("host", flattenComputeHttpsHealthCheckHost(res["host"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("name", flattenComputeHttpsHealthCheckName(res["name"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("port", flattenComputeHttpsHealthCheckPort(res["port"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("request_path", flattenComputeHttpsHealthCheckRequestPath(res["requestPath"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeHttpsHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("unhealthy_threshold", flattenComputeHttpsHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error reading HttpsHealthCheck: %s", err) - } - - return nil -} - -func resourceComputeHttpsHealthCheckUpdate(d *resource_compute_https_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeHttpsHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeHttpsHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeHttpsHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - hostProp, err := expandComputeHttpsHealthCheckHost(d.Get("host"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, hostProp)) { - obj["host"] = hostProp - } - nameProp, err := expandComputeHttpsHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandComputeHttpsHealthCheckPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - requestPathProp, err := expandComputeHttpsHealthCheckRequestPath(d.Get("request_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("request_path"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, requestPathProp)) { - obj["requestPath"] = requestPathProp - } - timeoutSecProp, err := expandComputeHttpsHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - unhealthyThresholdProp, err := expandComputeHttpsHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_https_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_https_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return err - } - - resource_compute_https_health_check_log.Printf("[DEBUG] Updating HttpsHealthCheck %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_https_health_check_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error updating HttpsHealthCheck %q: %s", d.Id(), err) - } else { - resource_compute_https_health_check_log.Printf("[DEBUG] Finished updating HttpsHealthCheck %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating HttpsHealthCheck", userAgent, - d.Timeout(resource_compute_https_health_check_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeHttpsHealthCheckRead(d, meta) -} - -func resourceComputeHttpsHealthCheckDelete(d *resource_compute_https_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_https_health_check_fmt.Errorf("Error fetching project for HttpsHealthCheck: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_https_health_check_log.Printf("[DEBUG] Deleting HttpsHealthCheck %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_https_health_check_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HttpsHealthCheck") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting HttpsHealthCheck", userAgent, - d.Timeout(resource_compute_https_health_check_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_https_health_check_log.Printf("[DEBUG] Finished deleting HttpsHealthCheck %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeHttpsHealthCheckImport(d *resource_compute_https_health_check_schema.ResourceData, meta interface{}) ([]*resource_compute_https_health_check_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/httpsHealthChecks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/httpsHealthChecks/{{name}}") - if err != nil { - return nil, resource_compute_https_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_https_health_check_schema.ResourceData{d}, nil -} - -func flattenComputeHttpsHealthCheckCheckIntervalSec(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_https_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpsHealthCheckCreationTimestamp(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckDescription(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckHealthyThreshold(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_https_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpsHealthCheckHost(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckName(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckPort(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_https_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpsHealthCheckRequestPath(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeHttpsHealthCheckTimeoutSec(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_https_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeHttpsHealthCheckUnhealthyThreshold(v interface{}, d *resource_compute_https_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_https_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandComputeHttpsHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeHttpsHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeImage() *resource_compute_image_schema.Resource { - return &resource_compute_image_schema.Resource{ - Create: resourceComputeImageCreate, - Read: resourceComputeImageRead, - Update: resourceComputeImageUpdate, - Delete: resourceComputeImageDelete, - - Importer: &resource_compute_image_schema.ResourceImporter{ - State: resourceComputeImageImport, - }, - - Timeouts: &resource_compute_image_schema.ResourceTimeout{ - Create: resource_compute_image_schema.DefaultTimeout(6 * resource_compute_image_time.Minute), - Update: resource_compute_image_schema.DefaultTimeout(6 * resource_compute_image_time.Minute), - Delete: resource_compute_image_schema.DefaultTimeout(6 * resource_compute_image_time.Minute), - }, - - Schema: map[string]*resource_compute_image_schema.Schema{ - "name": { - Type: resource_compute_image_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "description": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "disk_size_gb": { - Type: resource_compute_image_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Size of the image when restored onto a persistent disk (in GB).`, - }, - "family": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the image family to which this image belongs. You can -create disks by specifying an image family instead of a specific -image name. The image family always returns its latest image that is -not deprecated. The name of the image family must comply with -RFC1035.`, - }, - "guest_os_features": { - Type: resource_compute_image_schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Description: `A list of features to enable on the guest operating system. -Applicable only for bootable images.`, - Elem: computeImageGuestOsFeaturesSchema(), - }, - "labels": { - Type: resource_compute_image_schema.TypeMap, - Optional: true, - Description: `Labels to apply to this Image.`, - Elem: &resource_compute_image_schema.Schema{Type: resource_compute_image_schema.TypeString}, - }, - "licenses": { - Type: resource_compute_image_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Any applicable license URI.`, - Elem: &resource_compute_image_schema.Schema{ - Type: resource_compute_image_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "raw_disk": { - Type: resource_compute_image_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The parameters of the raw disk image.`, - MaxItems: 1, - Elem: &resource_compute_image_schema.Resource{ - Schema: map[string]*resource_compute_image_schema.Schema{ - "source": { - Type: resource_compute_image_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full Google Cloud Storage URL where disk storage is stored -You must provide either this property or the sourceDisk property -but not both.`, - }, - "container_type": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_image_validation.StringInSlice([]string{"TAR", ""}, false), - Description: `The format used to encode and transmit the block device, which -should be TAR. This is just a container and transmission format -and not a runtime format. Provided by the client when the disk -image is created. Default value: "TAR" Possible values: ["TAR"]`, - Default: "TAR", - }, - "sha1": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional SHA1 checksum of the disk image before unpackaging. -This is provided by the client when the disk image is created.`, - }, - }, - }, - }, - "source_disk": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The source disk to create this image based on. -You must provide either this property or the -rawDisk.source property but not both to create an image.`, - }, - "source_image": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the source image used to create this image. In order to create an image, you must provide the full or partial -URL of one of the following: - -* The selfLink URL -* This property -* The rawDisk.source URL -* The sourceDisk URL`, - }, - "source_snapshot": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the source snapshot used to create this image. - -In order to create an image, you must provide the full or partial URL of one of the following: - -* The selfLink URL -* This property -* The sourceImage URL -* The rawDisk.source URL -* The sourceDisk URL`, - }, - "archive_size_bytes": { - Type: resource_compute_image_schema.TypeInt, - Computed: true, - Description: `Size of the image tar.gz archive stored in Google Cloud Storage (in -bytes).`, - }, - "creation_timestamp": { - Type: resource_compute_image_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "label_fingerprint": { - Type: resource_compute_image_schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "project": { - Type: resource_compute_image_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_image_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeImageGuestOsFeaturesSchema() *resource_compute_image_schema.Resource { - return &resource_compute_image_schema.Resource{ - Schema: map[string]*resource_compute_image_schema.Schema{ - "type": { - Type: resource_compute_image_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_compute_image_validation.StringInSlice([]string{"MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC"}, false), - Description: `The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. Possible values: ["MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS", "GVNIC"]`, - }, - }, - } -} - -func resourceComputeImageCreate(d *resource_compute_image_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeImageDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_image_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - diskSizeGbProp, err := expandComputeImageDiskSizeGb(d.Get("disk_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_size_gb"); !isEmptyValue(resource_compute_image_reflect.ValueOf(diskSizeGbProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, diskSizeGbProp)) { - obj["diskSizeGb"] = diskSizeGbProp - } - familyProp, err := expandComputeImageFamily(d.Get("family"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("family"); !isEmptyValue(resource_compute_image_reflect.ValueOf(familyProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, familyProp)) { - obj["family"] = familyProp - } - guestOsFeaturesProp, err := expandComputeImageGuestOsFeatures(d.Get("guest_os_features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("guest_os_features"); !isEmptyValue(resource_compute_image_reflect.ValueOf(guestOsFeaturesProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, guestOsFeaturesProp)) { - obj["guestOsFeatures"] = guestOsFeaturesProp - } - labelsProp, err := expandComputeImageLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_image_reflect.ValueOf(labelsProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeImageLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_image_reflect.ValueOf(labelFingerprintProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - licensesProp, err := expandComputeImageLicenses(d.Get("licenses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("licenses"); !isEmptyValue(resource_compute_image_reflect.ValueOf(licensesProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, licensesProp)) { - obj["licenses"] = licensesProp - } - nameProp, err := expandComputeImageName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_image_reflect.ValueOf(nameProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - rawDiskProp, err := expandComputeImageRawDisk(d.Get("raw_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("raw_disk"); !isEmptyValue(resource_compute_image_reflect.ValueOf(rawDiskProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, rawDiskProp)) { - obj["rawDisk"] = rawDiskProp - } - sourceDiskProp, err := expandComputeImageSourceDisk(d.Get("source_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(resource_compute_image_reflect.ValueOf(sourceDiskProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, sourceDiskProp)) { - obj["sourceDisk"] = sourceDiskProp - } - sourceImageProp, err := expandComputeImageSourceImage(d.Get("source_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_image"); !isEmptyValue(resource_compute_image_reflect.ValueOf(sourceImageProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, sourceImageProp)) { - obj["sourceImage"] = sourceImageProp - } - sourceSnapshotProp, err := expandComputeImageSourceSnapshot(d.Get("source_snapshot"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_snapshot"); !isEmptyValue(resource_compute_image_reflect.ValueOf(sourceSnapshotProp)) && (ok || !resource_compute_image_reflect.DeepEqual(v, sourceSnapshotProp)) { - obj["sourceSnapshot"] = sourceSnapshotProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images") - if err != nil { - return err - } - - resource_compute_image_log.Printf("[DEBUG] Creating new Image: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_image_fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_image_schema.TimeoutCreate)) - if err != nil { - return resource_compute_image_fmt.Errorf("Error creating Image: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") - if err != nil { - return resource_compute_image_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Image", userAgent, - d.Timeout(resource_compute_image_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_image_fmt.Errorf("Error waiting to create Image: %s", err) - } - - resource_compute_image_log.Printf("[DEBUG] Finished creating Image %q: %#v", d.Id(), res) - - return resourceComputeImageRead(d, meta) -} - -func resourceComputeImageRead(d *resource_compute_image_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_image_fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_image_fmt.Sprintf("ComputeImage %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - - if err := d.Set("archive_size_bytes", flattenComputeImageArchiveSizeBytes(res["archiveSizeBytes"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeImageCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("description", flattenComputeImageDescription(res["description"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("disk_size_gb", flattenComputeImageDiskSizeGb(res["diskSizeGb"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("family", flattenComputeImageFamily(res["family"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("guest_os_features", flattenComputeImageGuestOsFeatures(res["guestOsFeatures"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("labels", flattenComputeImageLabels(res["labels"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("label_fingerprint", flattenComputeImageLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("licenses", flattenComputeImageLicenses(res["licenses"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("name", flattenComputeImageName(res["name"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("source_disk", flattenComputeImageSourceDisk(res["sourceDisk"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("source_image", flattenComputeImageSourceImage(res["sourceImage"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("source_snapshot", flattenComputeImageSourceSnapshot(res["sourceSnapshot"], d, config)); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_image_fmt.Errorf("Error reading Image: %s", err) - } - - return nil -} - -func resourceComputeImageUpdate(d *resource_compute_image_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_image_fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("labels") || d.HasChange("label_fingerprint") { - obj := make(map[string]interface{}) - - labelsProp, err := expandComputeImageLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_image_reflect.ValueOf(v)) && (ok || !resource_compute_image_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeImageLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_image_reflect.ValueOf(v)) && (ok || !resource_compute_image_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}/setLabels") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_image_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_image_fmt.Errorf("Error updating Image %q: %s", d.Id(), err) - } else { - resource_compute_image_log.Printf("[DEBUG] Finished updating Image %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Image", userAgent, - d.Timeout(resource_compute_image_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeImageRead(d, meta) -} - -func resourceComputeImageDelete(d *resource_compute_image_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_image_fmt.Errorf("Error fetching project for Image: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/images/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_image_log.Printf("[DEBUG] Deleting Image %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_image_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Image") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Image", userAgent, - d.Timeout(resource_compute_image_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_image_log.Printf("[DEBUG] Finished deleting Image %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeImageImport(d *resource_compute_image_schema.ResourceData, meta interface{}) ([]*resource_compute_image_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/images/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/images/{{name}}") - if err != nil { - return nil, resource_compute_image_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_image_schema.ResourceData{d}, nil -} - -func flattenComputeImageArchiveSizeBytes(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_image_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeImageCreationTimestamp(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageDescription(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageDiskSizeGb(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_image_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeImageFamily(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageGuestOsFeatures(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_image_schema.NewSet(resource_compute_image_schema.HashResource(computeImageGuestOsFeaturesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "type": flattenComputeImageGuestOsFeaturesType(original["type"], d, config), - }) - } - return transformed -} - -func flattenComputeImageGuestOsFeaturesType(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageLabels(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageLabelFingerprint(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageLicenses(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeImageName(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeImageSourceDisk(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeImageSourceImage(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeImageSourceSnapshot(v interface{}, d *resource_compute_image_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeImageDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageFamily(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageGuestOsFeatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_image_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandComputeImageGuestOsFeaturesType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_image_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeImageGuestOsFeaturesType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeImageLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageLicenses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_image_fmt.Errorf("Invalid value for licenses: nil") - } - f, err := parseGlobalFieldValue("licenses", raw.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_image_fmt.Errorf("Invalid value for licenses: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeImageName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageRawDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContainerType, err := expandComputeImageRawDiskContainerType(original["container_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_image_reflect.ValueOf(transformedContainerType); val.IsValid() && !isEmptyValue(val) { - transformed["containerType"] = transformedContainerType - } - - transformedSha1, err := expandComputeImageRawDiskSha1(original["sha1"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_image_reflect.ValueOf(transformedSha1); val.IsValid() && !isEmptyValue(val) { - transformed["sha1Checksum"] = transformedSha1 - } - - transformedSource, err := expandComputeImageRawDiskSource(original["source"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_image_reflect.ValueOf(transformedSource); val.IsValid() && !isEmptyValue(val) { - transformed["source"] = transformedSource - } - - return transformed, nil -} - -func expandComputeImageRawDiskContainerType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageRawDiskSha1(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageRawDiskSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeImageSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, resource_compute_image_fmt.Errorf("Invalid value for source_disk: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeImageSourceImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("images", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_image_fmt.Errorf("Invalid value for source_image: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeImageSourceSnapshot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_image_fmt.Errorf("Invalid value for source_snapshot: %s", err) - } - return f.RelativeLink(), nil -} - -var ( - bootDiskKeys = []string{ - "boot_disk.0.auto_delete", - "boot_disk.0.device_name", - "boot_disk.0.disk_encryption_key_raw", - "boot_disk.0.kms_key_self_link", - "boot_disk.0.initialize_params", - "boot_disk.0.mode", - "boot_disk.0.source", - } - - initializeParamsKeys = []string{ - "boot_disk.0.initialize_params.0.size", - "boot_disk.0.initialize_params.0.type", - "boot_disk.0.initialize_params.0.image", - "boot_disk.0.initialize_params.0.labels", - } - - schedulingKeys = []string{ - "scheduling.0.on_host_maintenance", - "scheduling.0.automatic_restart", - "scheduling.0.preemptible", - "scheduling.0.node_affinities", - "scheduling.0.min_node_cpus", - } - - shieldedInstanceConfigKeys = []string{ - "shielded_instance_config.0.enable_secure_boot", - "shielded_instance_config.0.enable_vtpm", - "shielded_instance_config.0.enable_integrity_monitoring", - } -) - -func forceNewIfNetworkIPNotUpdatable(ctx resource_compute_instance_context.Context, d *resource_compute_instance_schema.ResourceDiff, meta interface{}) error { - - return forceNewIfNetworkIPNotUpdatableFunc(d) -} - -func forceNewIfNetworkIPNotUpdatableFunc(d TerraformResourceDiff) error { - oldCount, newCount := d.GetChange("network_interface.#") - if oldCount.(int) != newCount.(int) { - return nil - } - - for i := 0; i < newCount.(int); i++ { - prefix := resource_compute_instance_fmt.Sprintf("network_interface.%d", i) - networkKey := prefix + ".network" - subnetworkKey := prefix + ".subnetwork" - subnetworkProjectKey := prefix + ".subnetwork_project" - networkIPKey := prefix + ".network_ip" - if d.HasChange(networkIPKey) { - if !d.HasChange(networkKey) && !d.HasChange(subnetworkKey) && !d.HasChange(subnetworkProjectKey) { - if err := d.ForceNew(networkIPKey); err != nil { - return err - } - } - } - } - - return nil -} - -func resourceComputeInstance() *resource_compute_instance_schema.Resource { - return &resource_compute_instance_schema.Resource{ - Create: resourceComputeInstanceCreate, - Read: resourceComputeInstanceRead, - Update: resourceComputeInstanceUpdate, - Delete: resourceComputeInstanceDelete, - Importer: &resource_compute_instance_schema.ResourceImporter{ - State: resourceComputeInstanceImportState, - }, - - SchemaVersion: 6, - MigrateState: resourceComputeInstanceMigrateState, - - Timeouts: &resource_compute_instance_schema.ResourceTimeout{ - Create: resource_compute_instance_schema.DefaultTimeout(20 * resource_compute_instance_time.Minute), - Update: resource_compute_instance_schema.DefaultTimeout(20 * resource_compute_instance_time.Minute), - Delete: resource_compute_instance_schema.DefaultTimeout(20 * resource_compute_instance_time.Minute), - }, - - Schema: map[string]*resource_compute_instance_schema.Schema{ - "boot_disk": { - Type: resource_compute_instance_schema.TypeList, - Required: true, - ForceNew: true, - MaxItems: 1, - Description: `The boot disk for the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "auto_delete": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: bootDiskKeys, - Default: true, - ForceNew: true, - Description: `Whether the disk will be auto-deleted when the instance is deleted.`, - }, - - "device_name": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: bootDiskKeys, - Computed: true, - ForceNew: true, - Description: `Name with which attached disk will be accessible under /dev/disk/by-id/`, - }, - - "disk_encryption_key_raw": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: bootDiskKeys, - ForceNew: true, - ConflictsWith: []string{"boot_disk.0.kms_key_self_link"}, - Sensitive: true, - Description: `A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, - }, - - "disk_encryption_key_sha256": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.`, - }, - - "kms_key_self_link": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: bootDiskKeys, - ForceNew: true, - ConflictsWith: []string{"boot_disk.0.disk_encryption_key_raw"}, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Computed: true, - Description: `The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, - }, - - "initialize_params": { - Type: resource_compute_instance_schema.TypeList, - Optional: true, - AtLeastOneOf: bootDiskKeys, - Computed: true, - ForceNew: true, - MaxItems: 1, - Description: `Parameters with which a disk was created alongside the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "size": { - Type: resource_compute_instance_schema.TypeInt, - Optional: true, - AtLeastOneOf: initializeParamsKeys, - Computed: true, - ForceNew: true, - ValidateFunc: resource_compute_instance_validation.IntAtLeast(1), - Description: `The size of the image in gigabytes.`, - }, - - "type": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: initializeParamsKeys, - Computed: true, - ForceNew: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"pd-standard", "pd-ssd", "pd-balanced"}, false), - Description: `The Google Compute Engine disk type. One of pd-standard, pd-ssd or pd-balanced.`, - }, - - "image": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: initializeParamsKeys, - Computed: true, - ForceNew: true, - DiffSuppressFunc: diskImageDiffSuppress, - Description: `The image from which this disk was initialised.`, - }, - - "labels": { - Type: resource_compute_instance_schema.TypeMap, - Optional: true, - AtLeastOneOf: initializeParamsKeys, - Computed: true, - ForceNew: true, - Description: `A set of key/value label pairs assigned to the disk.`, - }, - }, - }, - }, - - "mode": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: bootDiskKeys, - ForceNew: true, - Default: "READ_WRITE", - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"READ_WRITE", "READ_ONLY"}, false), - Description: `Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE".`, - }, - - "source": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: bootDiskKeys, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"boot_disk.initialize_params"}, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the disk attached to this instance.`, - }, - }, - }, - }, - - "machine_type": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - Description: `The machine type to create.`, - }, - - "name": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the instance. One of name or self_link must be provided.`, - }, - - "network_interface": { - Type: resource_compute_instance_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The networks attached to the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "network": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the network attached to this interface.`, - }, - - "subnetwork": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the subnetwork attached to this interface.`, - }, - - "subnetwork_project": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - Description: `The project in which the subnetwork belongs.`, - }, - - "network_ip": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - Description: `The private IP address assigned to the instance.`, - }, - - "name": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The name of the interface`, - }, - "nic_type": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"GVNIC", "VIRTIO_NET"}, false), - Description: `The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET`, - }, - "access_config": { - Type: resource_compute_instance_schema.TypeList, - Optional: true, - Description: `Access configurations, i.e. IPs via which this instance can be accessed via the Internet.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "nat_ip": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - Description: `The IP address that is be 1:1 mapped to the instance's network ip.`, - }, - - "network_tier": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), - Description: `The networking tier used for configuring this instance. One of PREMIUM or STANDARD.`, - }, - - "public_ptr_domain_name": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Description: `The DNS domain name for the public PTR record.`, - }, - }, - }, - }, - - "alias_ip_range": { - Type: resource_compute_instance_schema.TypeList, - Optional: true, - Description: `An array of alias IP ranges for this network interface.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "ip_cidr_range": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - DiffSuppressFunc: ipCidrRangeDiffSuppress, - Description: `The IP CIDR range represented by this alias IP range.`, - }, - "subnetwork_range_name": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Description: `The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range.`, - }, - }, - }, - }, - - "stack_type": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"IPV4_ONLY", "IPV4_IPV6", ""}, false), - Description: `The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used.`, - }, - - "ipv6_access_type": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork.`, - }, - - "ipv6_access_config": { - Type: resource_compute_instance_schema.TypeList, - Optional: true, - Description: `An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "network_tier": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"PREMIUM"}, false), - Description: `The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6`, - }, - "public_ptr_domain_name": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Description: `The domain name to be used when creating DNSv6 records for the external IPv6 ranges.`, - }, - "external_ipv6": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.`, - }, - "external_ipv6_prefix_length": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The prefix length of the external IPv6 range.`, - }, - }, - }, - }, - - "queue_count": { - Type: resource_compute_instance_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.`, - }, - }, - }, - }, - "allow_stopping_for_update": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - Description: `If true, allows Terraform to stop the instance to update its properties. If you try to update a property that requires stopping the instance without setting this field, the update will fail.`, - }, - - "attached_disk": { - Type: resource_compute_instance_schema.TypeList, - Optional: true, - Description: `List of disks attached to the instance`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "source": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the disk attached to this instance.`, - }, - - "device_name": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - Description: `Name with which the attached disk is accessible under /dev/disk/by-id/`, - }, - - "mode": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Default: "READ_WRITE", - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"READ_WRITE", "READ_ONLY"}, false), - Description: `Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE".`, - }, - - "disk_encryption_key_raw": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Sensitive: true, - Description: `A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, - }, - - "kms_key_self_link": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Computed: true, - Description: `The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, - }, - - "disk_encryption_key_sha256": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.`, - }, - }, - }, - }, - - "can_ip_forward": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - Description: `Whether sending and receiving of packets with non-matching source or destination IPs is allowed.`, - }, - - "description": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A brief description of the resource.`, - }, - - "deletion_protection": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether deletion protection is enabled on this instance.`, - }, - - "enable_display": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - Description: `Whether the instance has virtual displays enabled.`, - }, - - "guest_accelerator": { - Type: resource_compute_instance_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - ConfigMode: resource_compute_instance_schema.SchemaConfigModeAttr, - Description: `List of the type and count of accelerator cards attached to the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "count": { - Type: resource_compute_instance_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of the guest accelerator cards exposed to this instance.`, - }, - "type": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The accelerator type resource exposed to this instance. E.g. nvidia-tesla-k80.`, - }, - }, - }, - }, - - "labels": { - Type: resource_compute_instance_schema.TypeMap, - Optional: true, - Elem: &resource_compute_instance_schema.Schema{Type: resource_compute_instance_schema.TypeString}, - Description: `A set of key/value label pairs assigned to the instance.`, - }, - - "metadata": { - Type: resource_compute_instance_schema.TypeMap, - Optional: true, - Elem: &resource_compute_instance_schema.Schema{Type: resource_compute_instance_schema.TypeString}, - Description: `Metadata key/value pairs made available within the instance.`, - }, - - "metadata_startup_script": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Metadata startup scripts made available within the instance.`, - }, - - "min_cpu_platform": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - Description: `The minimum CPU platform specified for the VM instance.`, - }, - - "project": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If self_link is provided, this value is ignored. If neither self_link nor project are provided, the provider project is used.`, - }, - - "scheduling": { - Type: resource_compute_instance_schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Description: `The scheduling strategy being used by the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - - Schema: map[string]*resource_compute_instance_schema.Schema{ - "on_host_maintenance": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: schedulingKeys, - Description: `Describes maintenance behavior for the instance. One of MIGRATE or TERMINATE,`, - }, - - "automatic_restart": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: schedulingKeys, - Default: true, - Description: `Specifies if the instance should be restarted if it was terminated by Compute Engine (not a user).`, - }, - - "preemptible": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - Default: false, - AtLeastOneOf: schedulingKeys, - ForceNew: true, - Description: `Whether the instance is preemptible.`, - }, - - "node_affinities": { - Type: resource_compute_instance_schema.TypeSet, - Optional: true, - AtLeastOneOf: schedulingKeys, - Elem: instanceSchedulingNodeAffinitiesElemSchema(), - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), - Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, - }, - "min_node_cpus": { - Type: resource_compute_instance_schema.TypeInt, - Optional: true, - AtLeastOneOf: schedulingKeys, - }, - }, - }, - }, - - "scratch_disk": { - Type: resource_compute_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The scratch disks attached to the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "interface": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"SCSI", "NVME"}, false), - Description: `The disk interface used for attaching this disk. One of SCSI or NVME.`, - }, - }, - }, - }, - - "service_account": { - Type: resource_compute_instance_schema.TypeList, - MaxItems: 1, - Optional: true, - DiffSuppressFunc: serviceAccountDiffSuppress, - Description: `The service account to attach to the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "email": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - Description: `The service account e-mail address.`, - }, - - "scopes": { - Type: resource_compute_instance_schema.TypeSet, - Required: true, - Description: `A list of service scopes.`, - Elem: &resource_compute_instance_schema.Schema{ - Type: resource_compute_instance_schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - Set: stringScopeHashcode, - }, - }, - }, - }, - - "shielded_instance_config": { - Type: resource_compute_instance_schema.TypeList, - MaxItems: 1, - Optional: true, - - Computed: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), - Description: `The shielded vm config being used by the instance.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "enable_secure_boot": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: shieldedInstanceConfigKeys, - Default: false, - Description: `Whether secure boot is enabled for the instance.`, - }, - - "enable_vtpm": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: shieldedInstanceConfigKeys, - Default: true, - Description: `Whether the instance uses vTPM.`, - }, - - "enable_integrity_monitoring": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: shieldedInstanceConfigKeys, - Default: true, - Description: `Whether integrity monitoring is enabled for the instance.`, - }, - }, - }, - }, - "advanced_machine_features": { - Type: resource_compute_instance_schema.TypeList, - MaxItems: 1, - Optional: true, - Description: `Controls for advanced machine-related behavior features.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "enable_nested_virtualization": { - Type: resource_compute_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization", "advanced_machine_features.0.threads_per_core"}, - Description: `Whether to enable nested virtualization or not.`, - }, - "threads_per_core": { - Type: resource_compute_instance_schema.TypeInt, - Optional: true, - AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization", "advanced_machine_features.0.threads_per_core"}, - Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, - }, - }, - }, - }, - "confidential_instance_config": { - Type: resource_compute_instance_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "enable_confidential_compute": { - Type: resource_compute_instance_schema.TypeBool, - Required: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - }, - }, - }, - "desired_status": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"RUNNING", "TERMINATED"}, false), - Description: `Desired status of the instance. Either "RUNNING" or "TERMINATED".`, - }, - "current_status": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `Current status of the instance.`, - }, - "tags": { - Type: resource_compute_instance_schema.TypeSet, - Optional: true, - Elem: &resource_compute_instance_schema.Schema{Type: resource_compute_instance_schema.TypeString}, - Set: resource_compute_instance_schema.HashString, - Description: `The list of tags attached to the instance.`, - }, - - "zone": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The zone of the instance. If self_link is provided, this value is ignored. If neither self_link nor zone are provided, the provider zone is used.`, - }, - - "cpu_platform": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The CPU platform used by this instance.`, - }, - - "instance_id": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The server-assigned unique identifier of this instance.`, - }, - - "label_fingerprint": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The unique fingerprint of the labels.`, - }, - - "metadata_fingerprint": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The unique fingerprint of the metadata.`, - }, - - "self_link": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - "tags_fingerprint": { - Type: resource_compute_instance_schema.TypeString, - Computed: true, - Description: `The unique fingerprint of the tags.`, - }, - - "hostname": { - Type: resource_compute_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A custom hostname for the instance. Must be a fully qualified DNS name and RFC-1035-valid. Valid format is a series of labels 1-63 characters long matching the regular expression [a-z]([-a-z0-9]*[a-z0-9]), concatenated with periods. The entire hostname must not exceed 253 characters. Changing this forces a new resource to be created.`, - }, - - "resource_policies": { - Type: resource_compute_instance_schema.TypeList, - Elem: &resource_compute_instance_schema.Schema{Type: resource_compute_instance_schema.TypeString}, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Optional: true, - MaxItems: 1, - Description: `A list of short names or self_links of resource policies to attach to the instance. Currently a max of 1 resource policy is supported.`, - }, - - "reservation_affinity": { - Type: resource_compute_instance_schema.TypeList, - MaxItems: 1, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Specifies the reservations that this instance can consume from.`, - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "type": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_compute_instance_validation.StringInSlice([]string{"ANY_RESERVATION", "SPECIFIC_RESERVATION", "NO_RESERVATION"}, false), - Description: `The type of reservation from which this instance can consume resources.`, - }, - - "specific_reservation": { - Type: resource_compute_instance_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Description: `Specifies the label selector for the reservation to use.`, - - Elem: &resource_compute_instance_schema.Resource{ - Schema: map[string]*resource_compute_instance_schema.Schema{ - "key": { - Type: resource_compute_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value.`, - }, - "values": { - Type: resource_compute_instance_schema.TypeList, - Elem: &resource_compute_instance_schema.Schema{Type: resource_compute_instance_schema.TypeString}, - Required: true, - ForceNew: true, - Description: `Corresponds to the label values of a reservation resource.`, - }, - }, - }, - }, - }, - }, - }, - }, - CustomizeDiff: resource_compute_instance_customdiff.All( - resource_compute_instance_customdiff.If( - func(_ resource_compute_instance_context.Context, d *resource_compute_instance_schema.ResourceDiff, meta interface{}) bool { - return d.HasChange("guest_accelerator") - }, - suppressEmptyGuestAcceleratorDiff, - ), - desiredStatusDiff, - forceNewIfNetworkIPNotUpdatable, - ), - UseJSONNumber: true, - } -} - -func getInstance(config *Config, d *resource_compute_instance_schema.ResourceData) (*resource_compute_instance_compute.Instance, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() - if err != nil { - return nil, handleNotFoundError(err, d, resource_compute_instance_fmt.Sprintf("Instance %s", d.Get("name").(string))) - } - return instance, nil -} - -func getDisk(diskUri string, d *resource_compute_instance_schema.ResourceData, config *Config) (*resource_compute_instance_compute.Disk, error) { - source, err := ParseDiskFieldValue(diskUri, d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - disk, err := config.NewComputeClient(userAgent).Disks.Get(source.Project, source.Zone, source.Name).Do() - if err != nil { - return nil, err - } - - return disk, err -} - -func expandComputeInstance(project string, d *resource_compute_instance_schema.ResourceData, config *Config) (*resource_compute_instance_compute.Instance, error) { - - var machineTypeUrl string - if mt, ok := d.GetOk("machine_type"); ok { - machineType, err := ParseMachineTypesFieldValue(mt.(string), d, config) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf( - "Error loading machine type: %s", - err) - } - machineTypeUrl = machineType.RelativeLink() - } - - disks := []*resource_compute_instance_compute.AttachedDisk{} - if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { - bootDisk, err := expandBootDisk(d, config, project) - if err != nil { - return nil, err - } - disks = append(disks, bootDisk) - } - - if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { - scratchDisks, err := expandScratchDisks(d, config, project) - if err != nil { - return nil, err - } - disks = append(disks, scratchDisks...) - } - - attachedDisksCount := d.Get("attached_disk.#").(int) - - for i := 0; i < attachedDisksCount; i++ { - diskConfig := d.Get(resource_compute_instance_fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) - disk, err := expandAttachedDisk(diskConfig, d, config) - if err != nil { - return nil, err - } - - disks = append(disks, disk) - } - - scheduling, err := expandScheduling(d.Get("scheduling")) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error creating scheduling: %s", err) - } - - metadata, err := resourceInstanceMetadata(d) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error creating metadata: %s", err) - } - - networkInterfaces, err := expandNetworkInterfaces(d, config) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error creating network interfaces: %s", err) - } - accels, err := expandInstanceGuestAccelerators(d, config) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error creating guest accelerators: %s", err) - } - - reservationAffinity, err := expandReservationAffinity(d) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error creating reservation affinity: %s", err) - } - - return &resource_compute_instance_compute.Instance{ - CanIpForward: d.Get("can_ip_forward").(bool), - Description: d.Get("description").(string), - Disks: disks, - MachineType: machineTypeUrl, - Metadata: metadata, - Name: d.Get("name").(string), - NetworkInterfaces: networkInterfaces, - Tags: resourceInstanceTags(d), - Labels: expandLabels(d), - ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), - GuestAccelerators: accels, - MinCpuPlatform: d.Get("min_cpu_platform").(string), - Scheduling: scheduling, - DeletionProtection: d.Get("deletion_protection").(bool), - Hostname: d.Get("hostname").(string), - ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, - ConfidentialInstanceConfig: expandConfidentialInstanceConfig(d), - AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), - ShieldedInstanceConfig: expandShieldedVmConfigs(d), - DisplayDevice: expandDisplayDevice(d), - ResourcePolicies: convertStringArr(d.Get("resource_policies").([]interface{})), - ReservationAffinity: reservationAffinity, - }, nil -} - -var computeInstanceStatus = []string{ - "PROVISIONING", - "REPAIRING", - "RUNNING", - "STAGING", - "STOPPED", - "STOPPING", - "SUSPENDED", - "SUSPENDING", - "TERMINATED", -} - -func getAllStatusBut(status string) []string { - for i, s := range computeInstanceStatus { - if status == s { - return append(computeInstanceStatus[:i], computeInstanceStatus[i+1:]...) - } - } - return computeInstanceStatus -} - -func waitUntilInstanceHasDesiredStatus(config *Config, d *resource_compute_instance_schema.ResourceData) error { - desiredStatus := d.Get("desired_status").(string) - - if desiredStatus != "" { - stateRefreshFunc := func() (interface{}, string, error) { - instance, err := getInstance(config, d) - if err != nil || instance == nil { - resource_compute_instance_log.Printf("Error on InstanceStateRefresh: %s", err) - return nil, "", err - } - return instance.Id, instance.Status, nil - } - stateChangeConf := resource_compute_instance_resource.StateChangeConf{ - Delay: 5 * resource_compute_instance_time.Second, - Pending: getAllStatusBut(desiredStatus), - Refresh: stateRefreshFunc, - Target: []string{desiredStatus}, - Timeout: d.Timeout(resource_compute_instance_schema.TimeoutUpdate), - MinTimeout: 2 * resource_compute_instance_time.Second, - } - _, err := stateChangeConf.WaitForState() - - if err != nil { - return resource_compute_instance_fmt.Errorf( - "Error waiting for instance to reach desired status %s: %s", desiredStatus, err) - } - } - - return nil -} - -func resourceComputeInstanceCreate(d *resource_compute_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - z, err := getZone(d, config) - if err != nil { - return err - } - resource_compute_instance_log.Printf("[DEBUG] Loading zone: %s", z) - zone, err := config.NewComputeClient(userAgent).Zones.Get( - project, z).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error loading zone '%s': %s", z, err) - } - - instance, err := expandComputeInstance(project, d, config) - if err != nil { - return err - } - - resource_compute_instance_log.Printf("[INFO] Requesting instance creation") - op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error creating instance: %s", err) - } - - d.SetId(resource_compute_instance_fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) - - waitErr := computeOperationWaitTime(config, op, project, "instance to create", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutCreate)) - if waitErr != nil { - - d.SetId("") - return waitErr - } - - err = waitUntilInstanceHasDesiredStatus(config, d) - if err != nil { - return resource_compute_instance_fmt.Errorf("Error waiting for status: %s", err) - } - - return resourceComputeInstanceRead(d, meta) -} - -func resourceComputeInstanceRead(d *resource_compute_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - instance, err := getInstance(config, d) - if err != nil || instance == nil { - return err - } - - md := flattenMetadataBeta(instance.Metadata) - - if _, ok := d.GetOk("metadata_startup_script"); ok { - if err := d.Set("metadata_startup_script", md["startup-script"]); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting metadata_startup_script: %s", err) - } - - delete(md, "startup-script") - } - - if err = d.Set("metadata", md); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting metadata: %s", err) - } - - if err := d.Set("metadata_fingerprint", instance.Metadata.Fingerprint); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting metadata_fingerprint: %s", err) - } - if err := d.Set("can_ip_forward", instance.CanIpForward); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting can_ip_forward: %s", err) - } - if err := d.Set("machine_type", GetResourceNameFromSelfLink(instance.MachineType)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting machine_type: %s", err) - } - - networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) - if err != nil { - return err - } - if err := d.Set("network_interface", networkInterfaces); err != nil { - return err - } - - sshIP := externalIP - if sshIP == "" { - sshIP = internalIP - } - - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": sshIP, - }) - - if instance.Tags != nil { - if err := d.Set("tags_fingerprint", instance.Tags.Fingerprint); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting tags_fingerprint: %s", err) - } - if err := d.Set("tags", convertStringArrToInterface(instance.Tags.Items)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting tags: %s", err) - } - } - - if err := d.Set("labels", instance.Labels); err != nil { - return err - } - - if instance.LabelFingerprint != "" { - if err := d.Set("label_fingerprint", instance.LabelFingerprint); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting label_fingerprint: %s", err) - } - } - - attachedDiskSources := make(map[string]int) - for i, v := range d.Get("attached_disk").([]interface{}) { - if v == nil { - - continue - } - disk := v.(map[string]interface{}) - s := disk["source"].(string) - var sourceLink string - if resource_compute_instance_strings.Contains(s, "regions/") { - source, err := ParseRegionDiskFieldValue(disk["source"].(string), d, config) - if err != nil { - return err - } - sourceLink = source.RelativeLink() - } else { - source, err := ParseDiskFieldValue(disk["source"].(string), d, config) - if err != nil { - return err - } - sourceLink = source.RelativeLink() - } - attachedDiskSources[sourceLink] = i - } - - attachedDisks := make([]map[string]interface{}, d.Get("attached_disk.#").(int)) - scratchDisks := []map[string]interface{}{} - for _, disk := range instance.Disks { - if disk.Boot { - if err := d.Set("boot_disk", flattenBootDisk(d, disk, config)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting boot_disk: %s", err) - } - } else if disk.Type == "SCRATCH" { - scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) - } else { - var sourceLink string - if resource_compute_instance_strings.Contains(disk.Source, "regions/") { - source, err := ParseRegionDiskFieldValue(disk.Source, d, config) - if err != nil { - return err - } - sourceLink = source.RelativeLink() - } else { - source, err := ParseDiskFieldValue(disk.Source, d, config) - if err != nil { - return err - } - sourceLink = source.RelativeLink() - } - adIndex, inConfig := attachedDiskSources[sourceLink] - di := map[string]interface{}{ - "source": ConvertSelfLinkToV1(disk.Source), - "device_name": disk.DeviceName, - "mode": disk.Mode, - } - if key := disk.DiskEncryptionKey; key != nil { - if inConfig { - rawKey := d.Get(resource_compute_instance_fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)) - if rawKey != "" { - di["disk_encryption_key_raw"] = rawKey - } - } - if key.KmsKeyName != "" { - - di["kms_key_self_link"] = resource_compute_instance_strings.Split(disk.DiskEncryptionKey.KmsKeyName, "/cryptoKeyVersions")[0] - } - if key.Sha256 != "" { - di["disk_encryption_key_sha256"] = key.Sha256 - } - } - - if inConfig { - attachedDisks[adIndex] = di - } else { - attachedDisks = append(attachedDisks, di) - } - } - } - - if err := d.Set("resource_policies", instance.ResourcePolicies); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting resource_policies: %s", err) - } - - ads := []map[string]interface{}{} - for _, d := range attachedDisks { - if d != nil { - ads = append(ads, d) - } - } - - zone := GetResourceNameFromSelfLink(instance.Zone) - - if err := d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting service_account: %s", err) - } - if err := d.Set("attached_disk", ads); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting attached_disk: %s", err) - } - if err := d.Set("scratch_disk", scratchDisks); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting scratch_disk: %s", err) - } - if err := d.Set("scheduling", flattenScheduling(instance.Scheduling)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting scheduling: %s", err) - } - if err := d.Set("guest_accelerator", flattenGuestAccelerators(instance.GuestAccelerators)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting guest_accelerator: %s", err) - } - if err := d.Set("shielded_instance_config", flattenShieldedVmConfig(instance.ShieldedInstanceConfig)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting shielded_instance_config: %s", err) - } - if err := d.Set("enable_display", flattenEnableDisplay(instance.DisplayDevice)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting enable_display: %s", err) - } - if err := d.Set("cpu_platform", instance.CpuPlatform); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting cpu_platform: %s", err) - } - if err := d.Set("min_cpu_platform", instance.MinCpuPlatform); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting min_cpu_platform: %s", err) - } - if err := d.Set("deletion_protection", instance.DeletionProtection); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting deletion_protection: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("instance_id", resource_compute_instance_fmt.Sprintf("%d", instance.Id)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting instance_id: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", zone); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("name", instance.Name); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", instance.Description); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("hostname", instance.Hostname); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting hostname: %s", err) - } - if err := d.Set("current_status", instance.Status); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting current_status: %s", err) - } - if err := d.Set("confidential_instance_config", flattenConfidentialInstanceConfig(instance.ConfidentialInstanceConfig)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting confidential_instance_config: %s", err) - } - if err := d.Set("advanced_machine_features", flattenAdvancedMachineFeatures(instance.AdvancedMachineFeatures)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting advanced_machine_features: %s", err) - } - if d.Get("desired_status") != "" { - if err := d.Set("desired_status", instance.Status); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting desired_status: %s", err) - } - } - if err := d.Set("reservation_affinity", flattenReservationAffinity(instance.ReservationAffinity)); err != nil { - return resource_compute_instance_fmt.Errorf("Error setting reservation_affinity: %s", err) - } - - d.SetId(resource_compute_instance_fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, zone, instance.Name)) - - return nil -} - -func resourceComputeInstanceUpdate(d *resource_compute_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - - instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_instance_fmt.Sprintf("Instance %s", instance.Name)) - } - - d.Partial(true) - - if d.HasChange("metadata") { - metadata, err := resourceInstanceMetadata(d) - if err != nil { - return resource_compute_instance_fmt.Errorf("Error parsing metadata: %s", err) - } - - metadataV1 := &resource_compute_instance_compute.Metadata{} - if err := Convert(metadata, metadataV1); err != nil { - return err - } - - err = retry( - func() error { - - instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error retrieving metadata: %s", err) - } - - metadataV1.Fingerprint = instance.Metadata.Fingerprint - - op, err := config.NewComputeClient(userAgent).Instances.SetMetadata(project, zone, instance.Name, metadataV1).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating metadata: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "metadata to update", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - - return nil - }, - ) - - if err != nil { - return err - } - } - - if d.HasChange("tags") { - tags := resourceInstanceTags(d) - tagsV1 := &resource_compute_instance_compute.Tags{} - if err := Convert(tags, tagsV1); err != nil { - return err - } - op, err := config.NewComputeClient(userAgent).Instances.SetTags( - project, zone, d.Get("name").(string), tagsV1).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating tags: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "tags to update", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("labels") { - labels := expandLabels(d) - labelFingerprint := d.Get("label_fingerprint").(string) - req := resource_compute_instance_compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: labelFingerprint} - - op, err := config.NewComputeClient(userAgent).Instances.SetLabels(project, zone, instance.Name, &req).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating labels: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "labels to update", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("resource_policies") { - if len(instance.ResourcePolicies) > 0 { - req := resource_compute_instance_compute.InstancesRemoveResourcePoliciesRequest{ResourcePolicies: instance.ResourcePolicies} - - op, err := config.NewComputeClient(userAgent).Instances.RemoveResourcePolicies(project, zone, instance.Name, &req).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error removing existing resource policies: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "resource policies to remove", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - resourcePolicies := convertStringArr(d.Get("resource_policies").([]interface{})) - if len(resourcePolicies) > 0 { - req := resource_compute_instance_compute.InstancesAddResourcePoliciesRequest{ResourcePolicies: resourcePolicies} - - op, err := config.NewComputeClient(userAgent).Instances.AddResourcePolicies(project, zone, instance.Name, &req).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error adding resource policies: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "resource policies to add", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - } - - bootRequiredSchedulingChange := schedulingHasChangeRequiringReboot(d) - bootNotRequiredSchedulingChange := schedulingHasChangeWithoutReboot(d) - if bootNotRequiredSchedulingChange { - scheduling, err := expandScheduling(d.Get("scheduling")) - if err != nil { - return resource_compute_instance_fmt.Errorf("Error creating request data to update scheduling: %s", err) - } - - op, err := config.NewComputeClient(userAgent).Instances.SetScheduling( - project, zone, instance.Name, scheduling).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating scheduling policy: %s", err) - } - - opErr := computeOperationWaitTime( - config, op, project, "scheduling policy update", userAgent, - d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - networkInterfaces, err := expandNetworkInterfaces(d, config) - if err != nil { - return resource_compute_instance_fmt.Errorf("Error getting network interface from config: %s", err) - } - - if len(networkInterfaces) != len(instance.NetworkInterfaces) { - return resource_compute_instance_fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) - } - - var updatesToNIWhileStopped []func(inst *resource_compute_instance_compute.Instance) error - for i := 0; i < len(networkInterfaces); i++ { - prefix := resource_compute_instance_fmt.Sprintf("network_interface.%d", i) - networkInterface := networkInterfaces[i] - instNetworkInterface := instance.NetworkInterfaces[i] - - networkName := d.Get(prefix + ".name").(string) - subnetwork := networkInterface.Subnetwork - updateDuringStop := d.HasChange(prefix+".subnetwork") || d.HasChange(prefix+".network") || d.HasChange(prefix+".subnetwork_project") - - if networkName != instNetworkInterface.Name { - return resource_compute_instance_fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) - } - - if d.HasChange(prefix + ".subnetwork") { - if !d.HasChange(prefix + ".network") { - subnetProjectField := prefix + ".subnetwork_project" - sf, err := ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) - if err != nil { - return resource_compute_instance_fmt.Errorf("Cannot determine self_link for subnetwork %q: %s", subnetwork, err) - } - resp, err := config.NewComputeClient(userAgent).Subnetworks.Get(sf.Project, sf.Region, sf.Name).Do() - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error getting subnetwork value: {{err}}", err) - } - nf, err := ParseNetworkFieldValue(resp.Network, d, config) - if err != nil { - return resource_compute_instance_fmt.Errorf("Cannot determine self_link for network %q: %s", resp.Network, err) - } - networkInterface.Network = nf.RelativeLink() - } - } - - if !updateDuringStop && d.HasChange(prefix+".access_config") { - - err := computeInstanceDeleteAccessConfigs(d, config, instNetworkInterface, project, zone, userAgent, instance.Name) - if err != nil { - return err - } - - err = computeInstanceAddAccessConfigs(d, config, instNetworkInterface, networkInterface.AccessConfigs, project, zone, userAgent, instance.Name) - if err != nil { - return err - } - - instance, err = config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() - if err != nil { - return err - } - instNetworkInterface = instance.NetworkInterfaces[i] - } - - if !updateDuringStop && d.HasChange(prefix+".alias_ip_range") { - - if len(instNetworkInterface.AliasIpRanges) > 0 { - ni := &resource_compute_instance_compute.NetworkInterface{ - Fingerprint: instNetworkInterface.Fingerprint, - ForceSendFields: []string{"AliasIpRanges"}, - } - op, err := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error removing alias_ip_range: {{err}}", err) - } - opErr := computeOperationWaitTime(config, op, project, "updating alias ip ranges", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - - instance, err = config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() - if err != nil { - return err - } - instNetworkInterface = instance.NetworkInterfaces[i] - } - - networkInterfacePatchObj := &resource_compute_instance_compute.NetworkInterface{ - AliasIpRanges: networkInterface.AliasIpRanges, - Fingerprint: instNetworkInterface.Fingerprint, - } - updateCall := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, networkInterfacePatchObj).Do - op, err := updateCall() - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error updating network interface: {{err}}", err) - } - opErr := computeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if updateDuringStop { - - networkInterfacePatchObj := &resource_compute_instance_compute.NetworkInterface{ - Network: networkInterface.Network, - Subnetwork: networkInterface.Subnetwork, - AliasIpRanges: networkInterface.AliasIpRanges, - } - - if d.HasChange(prefix + ".network_ip") { - networkInterfacePatchObj.NetworkIP = networkInterface.NetworkIP - } - - accessConfigsHaveChanged := d.HasChange(prefix + ".access_config") - - updateCall := computeInstanceCreateUpdateWhileStoppedCall(d, config, networkInterfacePatchObj, networkInterface.AccessConfigs, accessConfigsHaveChanged, i, project, zone, userAgent, instance.Name) - updatesToNIWhileStopped = append(updatesToNIWhileStopped, updateCall) - } - } - - if d.HasChange("attached_disk") { - o, n := d.GetChange("attached_disk") - - currDisks := map[string]struct{}{} - for _, disk := range instance.Disks { - if !disk.Boot && disk.Type != "SCRATCH" { - currDisks[disk.DeviceName] = struct{}{} - } - } - - oDisks := map[uint64]string{} - for _, disk := range o.([]interface{}) { - diskConfig := disk.(map[string]interface{}) - computeDisk, err := expandAttachedDisk(diskConfig, d, config) - if err != nil { - return err - } - hash, err := resource_compute_instance_hashstructure.Hash(*computeDisk, nil) - if err != nil { - return err - } - if _, ok := currDisks[computeDisk.DeviceName]; ok { - oDisks[hash] = computeDisk.DeviceName - } - } - - nDisks := map[uint64]struct{}{} - var attach []*resource_compute_instance_compute.AttachedDisk - for _, disk := range n.([]interface{}) { - diskConfig := disk.(map[string]interface{}) - computeDisk, err := expandAttachedDisk(diskConfig, d, config) - if err != nil { - return err - } - hash, err := resource_compute_instance_hashstructure.Hash(*computeDisk, nil) - if err != nil { - return err - } - nDisks[hash] = struct{}{} - - if _, ok := oDisks[hash]; !ok { - computeDiskV1 := &resource_compute_instance_compute.AttachedDisk{} - err = Convert(computeDisk, computeDiskV1) - if err != nil { - return err - } - attach = append(attach, computeDiskV1) - } - } - - for hash, deviceName := range oDisks { - if _, ok := nDisks[hash]; !ok { - op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(project, zone, instance.Name, deviceName).Do() - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error detaching disk: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "detaching disk", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - resource_compute_instance_log.Printf("[DEBUG] Successfully detached disk %s", deviceName) - } - } - - for _, disk := range attach { - op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(project, zone, instance.Name, disk).Do() - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error attaching disk : {{err}}", err) - } - - opErr := computeOperationWaitTime(config, op, project, "attaching disk", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - resource_compute_instance_log.Printf("[DEBUG] Successfully attached disk %s", disk.Source) - } - } - - o, n := d.GetChange("service_account") - oList := o.([]interface{}) - nList := n.([]interface{}) - scopesChange := false - if len(oList) != len(nList) { - scopesChange = true - } else if len(oList) == 1 { - - oScopes := oList[0].(map[string]interface{})["scopes"].(*resource_compute_instance_schema.Set) - nScopes := nList[0].(map[string]interface{})["scopes"].(*resource_compute_instance_schema.Set) - scopesChange = !oScopes.Equal(nScopes) - } - - if d.HasChange("deletion_protection") { - nDeletionProtection := d.Get("deletion_protection").(bool) - - op, err := config.NewComputeClient(userAgent).Instances.SetDeletionProtection(project, zone, d.Get("name").(string)).DeletionProtection(nDeletionProtection).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating deletion protection flag: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "deletion protection to update", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - needToStopInstanceBeforeUpdating := scopesChange || d.HasChange("service_account.0.email") || d.HasChange("machine_type") || d.HasChange("min_cpu_platform") || d.HasChange("enable_display") || d.HasChange("shielded_instance_config") || len(updatesToNIWhileStopped) > 0 || bootRequiredSchedulingChange || d.HasChange("advanced_machine_features") - - if d.HasChange("desired_status") && !needToStopInstanceBeforeUpdating { - desiredStatus := d.Get("desired_status").(string) - - if desiredStatus != "" { - var op *resource_compute_instance_compute.Operation - - if desiredStatus == "RUNNING" { - op, err = startInstanceOperation(d, config) - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error starting instance: {{err}}", err) - } - } else if desiredStatus == "TERMINATED" { - op, err = config.NewComputeClient(userAgent).Instances.Stop(project, zone, instance.Name).Do() - if err != nil { - return err - } - } - opErr := computeOperationWaitTime( - config, op, project, "updating status", userAgent, - d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - } - - if needToStopInstanceBeforeUpdating { - statusBeforeUpdate := instance.Status - desiredStatus := d.Get("desired_status").(string) - - if statusBeforeUpdate == "RUNNING" && desiredStatus != "TERMINATED" && !d.Get("allow_stopping_for_update").(bool) { - return resource_compute_instance_fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities " + - "or network_interface.[#d].(network/subnetwork/subnetwork_project) or advanced_machine_features on a started instance requires stopping it. " + - "To acknowledge this, please set allow_stopping_for_update = true in your config. " + - "You can also stop it by setting desired_status = \"TERMINATED\", but the instance will not be restarted after the update.") - } - - if statusBeforeUpdate != "TERMINATED" { - op, err := config.NewComputeClient(userAgent).Instances.Stop(project, zone, instance.Name).Do() - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error stopping instance: {{err}}", err) - } - - opErr := computeOperationWaitTime(config, op, project, "stopping instance", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("machine_type") { - mt, err := ParseMachineTypesFieldValue(d.Get("machine_type").(string), d, config) - if err != nil { - return err - } - req := &resource_compute_instance_compute.InstancesSetMachineTypeRequest{ - MachineType: mt.RelativeLink(), - } - op, err := config.NewComputeClient(userAgent).Instances.SetMachineType(project, zone, instance.Name, req).Do() - if err != nil { - return err - } - opErr := computeOperationWaitTime(config, op, project, "updating machinetype", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("min_cpu_platform") { - minCpuPlatform, ok := d.GetOk("min_cpu_platform") - - if !ok { - minCpuPlatform = "Automatic" - } - req := &resource_compute_instance_compute.InstancesSetMinCpuPlatformRequest{ - MinCpuPlatform: minCpuPlatform.(string), - } - op, err := config.NewComputeClient(userAgent).Instances.SetMinCpuPlatform(project, zone, instance.Name, req).Do() - if err != nil { - return err - } - opErr := computeOperationWaitTime(config, op, project, "updating min cpu platform", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("service_account.0.email") || scopesChange { - sa := d.Get("service_account").([]interface{}) - req := &resource_compute_instance_compute.InstancesSetServiceAccountRequest{ForceSendFields: []string{"email"}} - if len(sa) > 0 && sa[0] != nil { - saMap := sa[0].(map[string]interface{}) - req.Email = saMap["email"].(string) - req.Scopes = canonicalizeServiceScopes(convertStringSet(saMap["scopes"].(*resource_compute_instance_schema.Set))) - } - op, err := config.NewComputeClient(userAgent).Instances.SetServiceAccount(project, zone, instance.Name, req).Do() - if err != nil { - return err - } - opErr := computeOperationWaitTime(config, op, project, "updating service account", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("enable_display") { - req := &resource_compute_instance_compute.DisplayDevice{ - EnableDisplay: d.Get("enable_display").(bool), - ForceSendFields: []string{"EnableDisplay"}, - } - op, err := config.NewComputeClient(userAgent).Instances.UpdateDisplayDevice(project, zone, instance.Name, req).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating display device: %s", err) - } - opErr := computeOperationWaitTime(config, op, project, "updating display device", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("shielded_instance_config") { - shieldedVmConfig := expandShieldedVmConfigs(d) - - op, err := config.NewComputeClient(userAgent).Instances.UpdateShieldedInstanceConfig(project, zone, instance.Name, shieldedVmConfig).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating shielded vm config: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, - "shielded vm config update", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if bootRequiredSchedulingChange { - scheduling, err := expandScheduling(d.Get("scheduling")) - if err != nil { - return resource_compute_instance_fmt.Errorf("Error creating request data to update scheduling: %s", err) - } - - op, err := config.NewComputeClient(userAgent).Instances.SetScheduling( - project, zone, instance.Name, scheduling).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating scheduling policy: %s", err) - } - - opErr := computeOperationWaitTime( - config, op, project, "scheduling policy update", userAgent, - d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - - if d.HasChange("advanced_machine_features") { - err = retry( - func() error { - - instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error retrieving instance: %s", err) - } - - instance.AdvancedMachineFeatures = expandAdvancedMachineFeatures(d) - - op, err := config.NewComputeClient(userAgent).Instances.Update(project, zone, instance.Name, instance).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error updating instance: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "advanced_machine_features to update", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - - return nil - }, - ) - - if err != nil { - return err - } - } - - if len(updatesToNIWhileStopped) > 0 { - instance, err = config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() - if err != nil { - return err - } - } - for _, patch := range updatesToNIWhileStopped { - err := patch(instance) - if err != nil { - return err - } - } - - if (statusBeforeUpdate == "RUNNING" && desiredStatus != "TERMINATED") || - (statusBeforeUpdate == "TERMINATED" && desiredStatus == "RUNNING") { - op, err := startInstanceOperation(d, config) - if err != nil { - return resource_compute_instance_errwrap.Wrapf("Error starting instance: {{err}}", err) - } - - opErr := computeOperationWaitTime(config, op, project, - "starting instance", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutUpdate)) - if opErr != nil { - return opErr - } - } - } - - d.Partial(false) - - return resourceComputeInstanceRead(d, meta) -} - -func startInstanceOperation(d *resource_compute_instance_schema.ResourceData, config *Config) (*resource_compute_instance_compute.Operation, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() - if err != nil { - return nil, handleNotFoundError(err, d, resource_compute_instance_fmt.Sprintf("Instance %s", instance.Name)) - } - - instanceFromConfig, err := expandComputeInstance(project, d, config) - if err != nil { - return nil, err - } - - var encrypted []*resource_compute_instance_compute.CustomerEncryptionKeyProtectedDisk - for _, disk := range instanceFromConfig.Disks { - if disk.DiskEncryptionKey != nil { - key := resource_compute_instance_compute.CustomerEncryptionKey{RawKey: disk.DiskEncryptionKey.RawKey, KmsKeyName: disk.DiskEncryptionKey.KmsKeyName} - eDisk := resource_compute_instance_compute.CustomerEncryptionKeyProtectedDisk{Source: disk.Source, DiskEncryptionKey: &key} - encrypted = append(encrypted, &eDisk) - } - } - - var op *resource_compute_instance_compute.Operation - - if len(encrypted) > 0 { - request := resource_compute_instance_compute.InstancesStartWithEncryptionKeyRequest{Disks: encrypted} - op, err = config.NewComputeClient(userAgent).Instances.StartWithEncryptionKey(project, zone, instance.Name, &request).Do() - } else { - op, err = config.NewComputeClient(userAgent).Instances.Start(project, zone, instance.Name).Do() - } - - return op, err -} - -func expandAttachedDisk(diskConfig map[string]interface{}, d *resource_compute_instance_schema.ResourceData, meta interface{}) (*resource_compute_instance_compute.AttachedDisk, error) { - config := meta.(*Config) - - s := diskConfig["source"].(string) - var sourceLink string - if resource_compute_instance_strings.Contains(s, "regions/") { - source, err := ParseRegionDiskFieldValue(s, d, config) - if err != nil { - return nil, err - } - sourceLink = source.RelativeLink() - } else { - source, err := ParseDiskFieldValue(s, d, config) - if err != nil { - return nil, err - } - sourceLink = source.RelativeLink() - } - - disk := &resource_compute_instance_compute.AttachedDisk{ - Source: sourceLink, - } - - if v, ok := diskConfig["mode"]; ok { - disk.Mode = v.(string) - } - - if v, ok := diskConfig["device_name"]; ok { - disk.DeviceName = v.(string) - } - - keyValue, keyOk := diskConfig["disk_encryption_key_raw"] - if keyOk { - if keyValue != "" { - disk.DiskEncryptionKey = &resource_compute_instance_compute.CustomerEncryptionKey{ - RawKey: keyValue.(string), - } - } - } - - kmsValue, kmsOk := diskConfig["kms_key_self_link"] - if kmsOk { - if keyOk && keyValue != "" && kmsValue != "" { - return nil, resource_compute_instance_errors.New("Only one of kms_key_self_link and disk_encryption_key_raw can be set") - } - if kmsValue != "" { - disk.DiskEncryptionKey = &resource_compute_instance_compute.CustomerEncryptionKey{ - KmsKeyName: kmsValue.(string), - } - } - } - return disk, nil -} - -func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*resource_compute_instance_compute.AcceleratorConfig, error) { - configs, ok := d.GetOk("guest_accelerator") - if !ok { - return nil, nil - } - accels := configs.([]interface{}) - guestAccelerators := make([]*resource_compute_instance_compute.AcceleratorConfig, 0, len(accels)) - for _, raw := range accels { - data := raw.(map[string]interface{}) - if data["count"].(int) == 0 { - continue - } - at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("cannot parse accelerator type: %v", err) - } - guestAccelerators = append(guestAccelerators, &resource_compute_instance_compute.AcceleratorConfig{ - AcceleratorCount: int64(data["count"].(int)), - AcceleratorType: at.RelativeLink(), - }) - } - - return guestAccelerators, nil -} - -func suppressEmptyGuestAcceleratorDiff(_ resource_compute_instance_context.Context, d *resource_compute_instance_schema.ResourceDiff, meta interface{}) error { - oldi, newi := d.GetChange("guest_accelerator") - - old, ok := oldi.([]interface{}) - if !ok { - return resource_compute_instance_fmt.Errorf("Expected old guest accelerator diff to be a slice") - } - - new, ok := newi.([]interface{}) - if !ok { - return resource_compute_instance_fmt.Errorf("Expected new guest accelerator diff to be a slice") - } - - if len(old) != 0 && len(new) != 1 { - return nil - } - - firstAccel, ok := new[0].(map[string]interface{}) - if !ok { - return resource_compute_instance_fmt.Errorf("Unable to type assert guest accelerator") - } - - if firstAccel["count"].(int) == 0 { - if err := d.Clear("guest_accelerator"); err != nil { - return err - } - } - - return nil -} - -func desiredStatusDiff(_ resource_compute_instance_context.Context, diff *resource_compute_instance_schema.ResourceDiff, meta interface{}) error { - - oldName, _ := diff.GetChange("name") - - if oldName == nil || oldName == "" { - _, newDesiredStatus := diff.GetChange("desired_status") - - if newDesiredStatus == nil || newDesiredStatus == "" { - return nil - } else if newDesiredStatus != "RUNNING" { - return resource_compute_instance_fmt.Errorf("When creating an instance, desired_status can only accept RUNNING value") - } - return nil - } - - return nil -} - -func resourceComputeInstanceDelete(d *resource_compute_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - resource_compute_instance_log.Printf("[INFO] Requesting instance deletion: %s", d.Get("name").(string)) - - if d.Get("deletion_protection").(bool) { - return resource_compute_instance_fmt.Errorf("Cannot delete instance %s: instance Deletion Protection is enabled. Set deletion_protection to false for this resource and run \"terraform apply\" before attempting to delete it.", d.Get("name").(string)) - } else { - op, err := config.NewComputeClient(userAgent).Instances.Delete(project, zone, d.Get("name").(string)).Do() - if err != nil { - return resource_compute_instance_fmt.Errorf("Error deleting instance: %s", err) - } - - opErr := computeOperationWaitTime(config, op, project, "instance to delete", userAgent, d.Timeout(resource_compute_instance_schema.TimeoutDelete)) - if opErr != nil { - - op, _ = config.NewComputeClient(userAgent).ZoneOperations.Get(project, zone, resource_compute_instance_strconv.FormatUint(op.Id, 10)).Do() - - if op == nil || op.Status != "DONE" { - return opErr - } - } - - d.SetId("") - return nil - } -} - -func resourceComputeInstanceImportState(d *resource_compute_instance_schema.ResourceData, meta interface{}) ([]*resource_compute_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instances/{{name}}") - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_instance_schema.ResourceData{d}, nil -} - -func expandBootDisk(d *resource_compute_instance_schema.ResourceData, config *Config, project string) (*resource_compute_instance_compute.AttachedDisk, error) { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - disk := &resource_compute_instance_compute.AttachedDisk{ - AutoDelete: d.Get("boot_disk.0.auto_delete").(bool), - Boot: true, - } - - if v, ok := d.GetOk("boot_disk.0.device_name"); ok { - disk.DeviceName = v.(string) - } - - if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { - if v != "" { - disk.DiskEncryptionKey = &resource_compute_instance_compute.CustomerEncryptionKey{ - RawKey: v.(string), - } - } - } - - if v, ok := d.GetOk("boot_disk.0.kms_key_self_link"); ok { - if v != "" { - disk.DiskEncryptionKey = &resource_compute_instance_compute.CustomerEncryptionKey{ - KmsKeyName: v.(string), - } - } - } - - if v, ok := d.GetOk("boot_disk.0.source"); ok { - source, err := ParseDiskFieldValue(v.(string), d, config) - if err != nil { - return nil, err - } - disk.Source = source.RelativeLink() - } - - if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok { - disk.InitializeParams = &resource_compute_instance_compute.AttachedDiskInitializeParams{} - - if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok { - disk.InitializeParams.DiskSizeGb = int64(v.(int)) - } - - if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok { - diskTypeName := v.(string) - diskType, err := readDiskType(config, d, diskTypeName) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err) - } - disk.InitializeParams.DiskType = diskType.RelativeLink() - } - - if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { - imageName := v.(string) - imageUrl, err := resolveImage(config, project, imageName, userAgent) - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error resolving image name '%s': %s", imageName, err) - } - - disk.InitializeParams.SourceImage = imageUrl - } - - if _, ok := d.GetOk("boot_disk.0.initialize_params.0.labels"); ok { - disk.InitializeParams.Labels = expandStringMap(d, "boot_disk.0.initialize_params.0.labels") - } - } - - if v, ok := d.GetOk("boot_disk.0.mode"); ok { - disk.Mode = v.(string) - } - - return disk, nil -} - -func flattenBootDisk(d *resource_compute_instance_schema.ResourceData, disk *resource_compute_instance_compute.AttachedDisk, config *Config) []map[string]interface{} { - result := map[string]interface{}{ - "auto_delete": disk.AutoDelete, - "device_name": disk.DeviceName, - "mode": disk.Mode, - "source": ConvertSelfLinkToV1(disk.Source), - - "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), - } - - diskDetails, err := getDisk(disk.Source, d, config) - if err != nil { - resource_compute_instance_log.Printf("[WARN] Cannot retrieve boot disk details: %s", err) - - if _, ok := d.GetOk("boot_disk.0.initialize_params.#"); ok { - - m := d.Get("boot_disk.0.initialize_params") - result["initialize_params"] = m - } - } else { - result["initialize_params"] = []map[string]interface{}{{ - "type": GetResourceNameFromSelfLink(diskDetails.Type), - - "image": diskDetails.SourceImage, - "size": diskDetails.SizeGb, - "labels": diskDetails.Labels, - }} - } - - if disk.DiskEncryptionKey != nil { - if disk.DiskEncryptionKey.Sha256 != "" { - result["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 - } - if disk.DiskEncryptionKey.KmsKeyName != "" { - - result["kms_key_self_link"] = resource_compute_instance_strings.Split(disk.DiskEncryptionKey.KmsKeyName, "/cryptoKeyVersions")[0] - } - } - - return []map[string]interface{}{result} -} - -func expandScratchDisks(d *resource_compute_instance_schema.ResourceData, config *Config, project string) ([]*resource_compute_instance_compute.AttachedDisk, error) { - diskType, err := readDiskType(config, d, "local-ssd") - if err != nil { - return nil, resource_compute_instance_fmt.Errorf("Error loading disk type 'local-ssd': %s", err) - } - - n := d.Get("scratch_disk.#").(int) - scratchDisks := make([]*resource_compute_instance_compute.AttachedDisk, 0, n) - for i := 0; i < n; i++ { - scratchDisks = append(scratchDisks, &resource_compute_instance_compute.AttachedDisk{ - AutoDelete: true, - Type: "SCRATCH", - Interface: d.Get(resource_compute_instance_fmt.Sprintf("scratch_disk.%d.interface", i)).(string), - InitializeParams: &resource_compute_instance_compute.AttachedDiskInitializeParams{ - DiskType: diskType.RelativeLink(), - }, - }) - } - - return scratchDisks, nil -} - -func flattenScratchDisk(disk *resource_compute_instance_compute.AttachedDisk) map[string]interface{} { - result := map[string]interface{}{ - "interface": disk.Interface, - } - return result -} - -func hash256(raw string) (string, error) { - decoded, err := resource_compute_instance_base64.StdEncoding.DecodeString(raw) - if err != nil { - return "", err - } - h := resource_compute_instance_sha256.Sum256(decoded) - return resource_compute_instance_base64.StdEncoding.EncodeToString(h[:]), nil -} - -func serviceAccountDiffSuppress(k, old, new string, d *resource_compute_instance_schema.ResourceData) bool { - if k != "service_account.#" { - return false - } - - o, n := d.GetChange("service_account") - var l []interface{} - if old == "0" && new == "1" { - l = n.([]interface{}) - } else if new == "0" && old == "1" { - l = o.([]interface{}) - } else { - - return false - } - - if l[0] != nil { - contents := l[0].(map[string]interface{}) - if scopes, ok := contents["scopes"]; ok { - a := scopes.(*resource_compute_instance_schema.Set).List() - if a != nil && len(a) > 0 { - return false - } - } - } - return true -} - -func resourceComputeInstanceFromTemplate() *resource_compute_instance_from_template_schema.Resource { - return &resource_compute_instance_from_template_schema.Resource{ - Create: resourceComputeInstanceFromTemplateCreate, - Read: resourceComputeInstanceRead, - Update: resourceComputeInstanceUpdate, - Delete: resourceComputeInstanceDelete, - - Timeouts: resourceComputeInstance().Timeouts, - - Schema: computeInstanceFromTemplateSchema(), - CustomizeDiff: resourceComputeInstance().CustomizeDiff, - UseJSONNumber: true, - } -} - -func computeInstanceFromTemplateSchema() map[string]*resource_compute_instance_from_template_schema.Schema { - s := resourceComputeInstance().Schema - - for _, field := range []string{"boot_disk", "machine_type", "network_interface"} { - - s[field].Required = false - s[field].Optional = true - } - - nic := s["network_interface"].Elem.(*resource_compute_instance_from_template_schema.Resource) - nic.Schema["alias_ip_range"].ConfigMode = resource_compute_instance_from_template_schema.SchemaConfigModeAttr - nic.Schema["access_config"].ConfigMode = resource_compute_instance_from_template_schema.SchemaConfigModeAttr - - for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { - s[field].ConfigMode = resource_compute_instance_from_template_schema.SchemaConfigModeAttr - } - - for _, field := range []string{"disk", "network"} { - delete(s, field) - } - - recurseOnSchema(s, func(field *resource_compute_instance_from_template_schema.Schema) { - - field.Default = nil - - if !field.Required && !(field.Deprecated != "") { - field.Computed = true - } - }) - - s["source_instance_template"] = &resource_compute_instance_from_template_schema.Schema{ - Type: resource_compute_instance_from_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name or self link of an instance template to create the instance based on.`, - } - - return s -} - -func recurseOnSchema(s map[string]*resource_compute_instance_from_template_schema.Schema, f func(*resource_compute_instance_from_template_schema.Schema)) { - for _, field := range s { - f(field) - if e := field.Elem; e != nil { - if r, ok := e.(*resource_compute_instance_from_template_schema.Resource); ok { - recurseOnSchema(r.Schema, f) - } - } - } -} - -func resourceComputeInstanceFromTemplateCreate(d *resource_compute_instance_from_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - z, err := getZone(d, config) - if err != nil { - return err - } - resource_compute_instance_from_template_log.Printf("[DEBUG] Loading zone: %s", z) - zone, err := config.NewComputeClient(userAgent).Zones.Get(project, z).Do() - if err != nil { - return resource_compute_instance_from_template_fmt.Errorf("Error loading zone '%s': %s", z, err) - } - - instance, err := expandComputeInstance(project, d, config) - if err != nil { - return err - } - - tpl, err := ParseInstanceTemplateFieldValue(d.Get("source_instance_template").(string), d, config) - if err != nil { - return err - } - - it, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, tpl.Name).Do() - if err != nil { - return err - } - - instance.Disks, err = adjustInstanceFromTemplateDisks(d, config, it, zone, project) - if err != nil { - return err - } - - if _, hasSchedule := d.GetOk("scheduling"); !hasSchedule { - instance.Scheduling = it.Properties.Scheduling - } - - instance.ForceSendFields = []string{} - for f, s := range computeInstanceFromTemplateSchema() { - - if s.Type == resource_compute_instance_from_template_schema.TypeSet { - continue - } - - if _, exists := d.GetOkExists(f); exists { - - instance.ForceSendFields = append(instance.ForceSendFields, SnakeToPascalCase(f)) - } - } - - resource_compute_instance_from_template_log.Printf("[INFO] Requesting instance creation") - op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).SourceInstanceTemplate(tpl.RelativeLink()).Do() - if err != nil { - return resource_compute_instance_from_template_fmt.Errorf("Error creating instance: %s", err) - } - - d.SetId(resource_compute_instance_from_template_fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) - - waitErr := computeOperationWaitTime(config, op, project, - "instance to create", userAgent, d.Timeout(resource_compute_instance_from_template_schema.TimeoutCreate)) - if waitErr != nil { - - d.SetId("") - return waitErr - } - - return resourceComputeInstanceRead(d, meta) -} - -func adjustInstanceFromTemplateDisks(d *resource_compute_instance_from_template_schema.ResourceData, config *Config, it *resource_compute_instance_from_template_compute.InstanceTemplate, zone *resource_compute_instance_from_template_compute.Zone, project string) ([]*resource_compute_instance_from_template_compute.AttachedDisk, error) { - disks := []*resource_compute_instance_from_template_compute.AttachedDisk{} - if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { - bootDisk, err := expandBootDisk(d, config, project) - if err != nil { - return nil, err - } - disks = append(disks, bootDisk) - } else { - - for _, disk := range it.Properties.Disks { - if disk.Boot { - if disk.Source != "" { - - disk.Source = resource_compute_instance_from_template_fmt.Sprintf("projects/%s/zones/%s/disks/%s", project, zone.Name, disk.Source) - } - if disk.InitializeParams != nil { - if dt := disk.InitializeParams.DiskType; dt != "" { - - disk.InitializeParams.DiskType = resource_compute_instance_from_template_fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) - } - } - disks = append(disks, disk) - break - } - } - } - - if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { - scratchDisks, err := expandScratchDisks(d, config, project) - if err != nil { - return nil, err - } - disks = append(disks, scratchDisks...) - } else { - - for _, disk := range it.Properties.Disks { - if disk.Type == "SCRATCH" { - if disk.InitializeParams != nil { - if dt := disk.InitializeParams.DiskType; dt != "" { - - disk.InitializeParams.DiskType = resource_compute_instance_from_template_fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) - } - } - disks = append(disks, disk) - } - } - } - - attachedDisksCount := d.Get("attached_disk.#").(int) - if attachedDisksCount > 0 { - for i := 0; i < attachedDisksCount; i++ { - diskConfig := d.Get(resource_compute_instance_from_template_fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) - disk, err := expandAttachedDisk(diskConfig, d, config) - if err != nil { - return nil, err - } - - disks = append(disks, disk) - } - } else { - - for _, disk := range it.Properties.Disks { - if !disk.Boot && disk.Type != "SCRATCH" { - if s := disk.Source; s != "" { - - disk.Source = resource_compute_instance_from_template_fmt.Sprintf("zones/%s/disks/%s", zone.Name, s) - } - if disk.InitializeParams != nil { - if dt := disk.InitializeParams.DiskType; dt != "" { - - disk.InitializeParams.DiskType = resource_compute_instance_from_template_fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) - } - } - disks = append(disks, disk) - } - } - } - - return disks, nil -} - -func resourceComputeInstanceGroup() *resource_compute_instance_group_schema.Resource { - return &resource_compute_instance_group_schema.Resource{ - Create: resourceComputeInstanceGroupCreate, - Read: resourceComputeInstanceGroupRead, - Update: resourceComputeInstanceGroupUpdate, - Delete: resourceComputeInstanceGroupDelete, - Importer: &resource_compute_instance_group_schema.ResourceImporter{ - State: resourceComputeInstanceGroupImportState, - }, - - Timeouts: &resource_compute_instance_group_schema.ResourceTimeout{ - Create: resource_compute_instance_group_schema.DefaultTimeout(6 * resource_compute_instance_group_time.Minute), - Update: resource_compute_instance_group_schema.DefaultTimeout(6 * resource_compute_instance_group_time.Minute), - Delete: resource_compute_instance_group_schema.DefaultTimeout(6 * resource_compute_instance_group_time.Minute), - }, - - SchemaVersion: 2, - MigrateState: resourceComputeInstanceGroupMigrateState, - - Schema: map[string]*resource_compute_instance_group_schema.Schema{ - "name": { - Type: resource_compute_instance_group_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the instance group. Must be 1-63 characters long and comply with RFC1035. Supported characters include lowercase letters, numbers, and hyphens.`, - }, - - "zone": { - Type: resource_compute_instance_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The zone that this instance group should be created in.`, - }, - - "description": { - Type: resource_compute_instance_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional textual description of the instance group.`, - }, - - "instances": { - Type: resource_compute_instance_group_schema.TypeSet, - Optional: true, - Computed: true, - Elem: &resource_compute_instance_group_schema.Schema{Type: resource_compute_instance_group_schema.TypeString}, - Set: selfLinkRelativePathHash, - Description: `List of instances in the group. They should be given as self_link URLs. When adding instances they must all be in the same network and zone as the instance group.`, - }, - - "named_port": { - Type: resource_compute_instance_group_schema.TypeList, - Optional: true, - Description: `The named port configuration.`, - Elem: &resource_compute_instance_group_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_schema.Schema{ - "name": { - Type: resource_compute_instance_group_schema.TypeString, - Required: true, - Description: `The name which the port will be mapped to.`, - }, - - "port": { - Type: resource_compute_instance_group_schema.TypeInt, - Required: true, - Description: `The port number to map the name to.`, - }, - }, - }, - }, - - "network": { - Type: resource_compute_instance_group_schema.TypeString, - Optional: true, - Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - ForceNew: true, - Description: `The URL of the network the instance group is in. If this is different from the network where the instances are in, the creation fails. Defaults to the network where the instances are in (if neither network nor instances is specified, this field will be blank).`, - }, - - "project": { - Type: resource_compute_instance_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "self_link": { - Type: resource_compute_instance_group_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - "size": { - Type: resource_compute_instance_group_schema.TypeInt, - Computed: true, - Description: `The number of instances in the group.`, - }, - }, - UseJSONNumber: true, - } -} - -func getInstanceReferences(instanceUrls []string) (refs []*resource_compute_instance_group_compute.InstanceReference) { - for _, v := range instanceUrls { - refs = append(refs, &resource_compute_instance_group_compute.InstanceReference{ - Instance: v, - }) - } - return refs -} - -func validInstanceURLs(instanceUrls []string) bool { - for _, v := range instanceUrls { - if !resource_compute_instance_group_strings.HasPrefix(v, "https://") { - return false - } - } - return true -} - -func resourceComputeInstanceGroupCreate(d *resource_compute_instance_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - - instanceGroup := &resource_compute_instance_group_compute.InstanceGroup{ - Name: name, - } - - if v, ok := d.GetOk("description"); ok { - instanceGroup.Description = v.(string) - } - - if v, ok := d.GetOk("named_port"); ok { - instanceGroup.NamedPorts = getNamedPorts(v.([]interface{})) - } - - if v, ok := d.GetOk("network"); ok { - instanceGroup.Network = v.(string) - } - - resource_compute_instance_group_log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup) - op, err := config.NewComputeClient(userAgent).InstanceGroups.Insert( - project, zone, instanceGroup).Do() - if err != nil { - return resource_compute_instance_group_fmt.Errorf("Error creating InstanceGroup: %s", err) - } - - d.SetId(resource_compute_instance_group_fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name)) - - err = computeOperationWaitTime(config, op, project, "Creating InstanceGroup", userAgent, d.Timeout(resource_compute_instance_group_schema.TimeoutCreate)) - if err != nil { - d.SetId("") - return err - } - - if v, ok := d.GetOk("instances"); ok { - tmpUrls := convertStringArr(v.(*resource_compute_instance_group_schema.Set).List()) - - var instanceUrls []string - for _, v := range tmpUrls { - if resource_compute_instance_group_strings.HasPrefix(v, "https://") { - instanceUrls = append(instanceUrls, v) - } else { - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v) - if err != nil { - return err - } - instanceUrls = append(instanceUrls, url) - } - } - - addInstanceReq := &resource_compute_instance_group_compute.InstanceGroupsAddInstancesRequest{ - Instances: getInstanceReferences(instanceUrls), - } - - resource_compute_instance_group_log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq) - op, err := config.NewComputeClient(userAgent).InstanceGroups.AddInstances( - project, zone, name, addInstanceReq).Do() - if err != nil { - return resource_compute_instance_group_fmt.Errorf("Error adding instances to InstanceGroup: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Adding instances to InstanceGroup", userAgent, d.Timeout(resource_compute_instance_group_schema.TimeoutCreate)) - if err != nil { - return err - } - } - - return resourceComputeInstanceGroupRead(d, meta) -} - -func resourceComputeInstanceGroupRead(d *resource_compute_instance_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - - instanceGroup, err := config.NewComputeClient(userAgent).InstanceGroups.Get( - project, zone, name).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_instance_group_fmt.Sprintf("Instance Group %q", name)) - } - - var memberUrls []string - members, err := config.NewComputeClient(userAgent).InstanceGroups.ListInstances( - project, zone, name, &resource_compute_instance_group_compute.InstanceGroupsListInstancesRequest{ - InstanceState: "ALL", - }).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_instance_group_googleapi.Error); ok && gerr.Code == 404 { - - if err := d.Set("instances", nil); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting instances: %s", err) - } - } else { - - return resource_compute_instance_group_fmt.Errorf("Error reading InstanceGroup Members: %s", err) - } - } else { - for _, member := range members.Items { - memberUrls = append(memberUrls, member.Instance) - } - resource_compute_instance_group_log.Printf("[DEBUG] InstanceGroup members: %v", memberUrls) - if err := d.Set("instances", memberUrls); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting instances: %s", err) - } - } - - if err := d.Set("named_port", flattenNamedPorts(instanceGroup.NamedPorts)); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting named_port: %s", err) - } - if err := d.Set("description", instanceGroup.Description); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting description: %s", err) - } - - if err := d.Set("network", instanceGroup.Network); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("size", instanceGroup.Size); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting size: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("zone", zone); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("self_link", instanceGroup.SelfLink); err != nil { - return resource_compute_instance_group_fmt.Errorf("Error setting self_link: %s", err) - } - - return nil -} - -func resourceComputeInstanceGroupUpdate(d *resource_compute_instance_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - - d.Partial(true) - - if d.HasChange("instances") { - - from_, to_ := d.GetChange("instances") - - from := convertStringArr(from_.(*resource_compute_instance_group_schema.Set).List()) - to := convertStringArr(to_.(*resource_compute_instance_group_schema.Set).List()) - - if !validInstanceURLs(from) { - return resource_compute_instance_group_fmt.Errorf("Error invalid instance URLs: %v", from) - } - if !validInstanceURLs(to) { - return resource_compute_instance_group_fmt.Errorf("Error invalid instance URLs: %v", to) - } - - add, remove := calcAddRemove(from, to) - - if len(remove) > 0 { - removeReq := &resource_compute_instance_group_compute.InstanceGroupsRemoveInstancesRequest{ - Instances: getInstanceReferences(remove), - } - - resource_compute_instance_group_log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq) - removeOp, err := config.NewComputeClient(userAgent).InstanceGroups.RemoveInstances( - project, zone, name, removeReq).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_instance_group_googleapi.Error); ok && gerr.Code == 404 { - resource_compute_instance_group_log.Printf("[WARN] Instances already removed from InstanceGroup: %s", remove) - } else { - return resource_compute_instance_group_fmt.Errorf("Error removing instances from InstanceGroup: %s", err) - } - } else { - - err = computeOperationWaitTime(config, removeOp, project, "Updating InstanceGroup", userAgent, d.Timeout(resource_compute_instance_group_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - } - - if len(add) > 0 { - - addReq := &resource_compute_instance_group_compute.InstanceGroupsAddInstancesRequest{ - Instances: getInstanceReferences(add), - } - - resource_compute_instance_group_log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq) - addOp, err := config.NewComputeClient(userAgent).InstanceGroups.AddInstances( - project, zone, name, addReq).Do() - if err != nil { - return resource_compute_instance_group_fmt.Errorf("Error adding instances from InstanceGroup: %s", err) - } - - err = computeOperationWaitTime(config, addOp, project, "Updating InstanceGroup", userAgent, d.Timeout(resource_compute_instance_group_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - } - - if d.HasChange("named_port") { - namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) - - namedPortsReq := &resource_compute_instance_group_compute.InstanceGroupsSetNamedPortsRequest{ - NamedPorts: namedPorts, - } - - resource_compute_instance_group_log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq) - op, err := config.NewComputeClient(userAgent).InstanceGroups.SetNamedPorts( - project, zone, name, namedPortsReq).Do() - if err != nil { - return resource_compute_instance_group_fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating InstanceGroup", userAgent, d.Timeout(resource_compute_instance_group_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeInstanceGroupRead(d, meta) -} - -func resourceComputeInstanceGroupDelete(d *resource_compute_instance_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - name := d.Get("name").(string) - op, err := config.NewComputeClient(userAgent).InstanceGroups.Delete(project, zone, name).Do() - if err != nil { - return resource_compute_instance_group_fmt.Errorf("Error deleting InstanceGroup: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Deleting InstanceGroup", userAgent, d.Timeout(resource_compute_instance_group_schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceComputeInstanceGroupImportState(d *resource_compute_instance_group_schema.ResourceData, meta interface{}) ([]*resource_compute_instance_group_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{name}}") - if err != nil { - return nil, err - } - d.SetId(id) - - return []*resource_compute_instance_group_schema.ResourceData{d}, nil -} - -func resourceComputeInstanceGroupManager() *resource_compute_instance_group_manager_schema.Resource { - return &resource_compute_instance_group_manager_schema.Resource{ - Create: resourceComputeInstanceGroupManagerCreate, - Read: resourceComputeInstanceGroupManagerRead, - Update: resourceComputeInstanceGroupManagerUpdate, - Delete: resourceComputeInstanceGroupManagerDelete, - Importer: &resource_compute_instance_group_manager_schema.ResourceImporter{ - State: resourceInstanceGroupManagerStateImporter, - }, - Timeouts: &resource_compute_instance_group_manager_schema.ResourceTimeout{ - Create: resource_compute_instance_group_manager_schema.DefaultTimeout(15 * resource_compute_instance_group_manager_time.Minute), - Update: resource_compute_instance_group_manager_schema.DefaultTimeout(15 * resource_compute_instance_group_manager_time.Minute), - Delete: resource_compute_instance_group_manager_schema.DefaultTimeout(15 * resource_compute_instance_group_manager_time.Minute), - }, - - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "base_instance_name": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The base instance name to use for instances in this group. The value must be a valid RFC1035 name. Supported characters are lowercase letters, numbers, and hyphens (-). Instances are named by appending a hyphen and a random four-character string to the base instance name.`, - }, - - "version": { - Type: resource_compute_instance_group_manager_schema.TypeList, - Required: true, - Description: `Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "name": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Optional: true, - Description: `Version name.`, - }, - - "instance_template": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The full URL to an instance template from which all new instances of this version will be created.`, - }, - - "target_size": { - Type: resource_compute_instance_group_manager_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The number of instances calculated as a fixed number or a percentage depending on the settings.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "fixed": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Optional: true, - Description: `The number of instances which are managed for this version. Conflicts with percent.`, - }, - - "percent": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_instance_group_manager_validation.IntBetween(0, 100), - Description: `The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set target_size values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version.`, - }, - }, - }, - }, - }, - }, - }, - - "name": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the instance group manager. Must be 1-63 characters long and comply with RFC1035. Supported characters include lowercase letters, numbers, and hyphens.`, - }, - - "zone": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The zone that instances in this group should be created in.`, - }, - - "description": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Optional: true, - Description: `An optional textual description of the instance group manager.`, - }, - - "fingerprint": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Computed: true, - Description: `The fingerprint of the instance group manager.`, - }, - - "instance_group": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Computed: true, - Description: `The full URL of the instance group created by the manager.`, - }, - - "named_port": { - Type: resource_compute_instance_group_manager_schema.TypeSet, - Optional: true, - Description: `The named port configuration.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "name": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - Description: `The name of the port.`, - }, - - "port": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Required: true, - Description: `The port number.`, - }, - }, - }, - }, - - "project": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "self_link": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Computed: true, - Description: `The URL of the created resource.`, - }, - - "target_pools": { - Type: resource_compute_instance_group_manager_schema.TypeSet, - Optional: true, - Elem: &resource_compute_instance_group_manager_schema.Schema{ - Type: resource_compute_instance_group_manager_schema.TypeString, - }, - Set: selfLinkRelativePathHash, - Description: `The full URL of all target pools to which new instances in the group are added. Updating the target pools attribute does not affect existing instances.`, - }, - - "target_size": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The target number of running instances for this managed instance group. This value should always be explicitly set unless this resource is attached to an autoscaler, in which case it should never be set. Defaults to 0.`, - }, - - "auto_healing_policies": { - Type: resource_compute_instance_group_manager_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The autohealing policies for this managed instance group. You can specify only one value.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "health_check": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The health check resource that signals autohealing.`, - }, - - "initial_delay_sec": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Required: true, - ValidateFunc: resource_compute_instance_group_manager_validation.IntBetween(0, 3600), - Description: `The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600.`, - }, - }, - }, - }, - - "update_policy": { - Computed: true, - Type: resource_compute_instance_group_manager_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The update policy for this managed instance group.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "minimal_action": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_instance_group_manager_validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), - Description: `Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.`, - }, - - "type": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_instance_group_manager_validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), - Description: `The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls).`, - }, - - "max_surge_fixed": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Optional: true, - Computed: true, - ConflictsWith: []string{"update_policy.0.max_surge_percent"}, - Description: `The maximum number of instances that can be created above the specified targetSize during the update process. Conflicts with max_surge_percent. If neither is set, defaults to 1`, - }, - - "max_surge_percent": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Optional: true, - ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, - ValidateFunc: resource_compute_instance_group_manager_validation.IntBetween(0, 100), - Description: `The maximum number of instances(calculated as percentage) that can be created above the specified targetSize during the update process. Conflicts with max_surge_fixed.`, - }, - - "max_unavailable_fixed": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Optional: true, - Computed: true, - ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, - Description: `The maximum number of instances that can be unavailable during the update process. Conflicts with max_unavailable_percent. If neither is set, defaults to 1.`, - }, - - "max_unavailable_percent": { - Type: resource_compute_instance_group_manager_schema.TypeInt, - Optional: true, - ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, - ValidateFunc: resource_compute_instance_group_manager_validation.IntBetween(0, 100), - Description: `The maximum number of instances(calculated as percentage) that can be unavailable during the update process. Conflicts with max_unavailable_fixed.`, - }, - - "replacement_method": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_instance_group_manager_validation.StringInSlice([]string{"RECREATE", "SUBSTITUTE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("SUBSTITUTE"), - Description: `The instance replacement method for managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set max_unavailable_fixed or max_unavailable_percent to be greater than 0.`, - }, - }, - }, - }, - - "wait_for_instances": { - Type: resource_compute_instance_group_manager_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether to wait for all instances to be created/updated before returning. Note that if this is set to true and the operation does not succeed, Terraform will continue trying until it times out.`, - }, - "wait_for_instances_status": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Optional: true, - Default: "STABLE", - ValidateFunc: resource_compute_instance_group_manager_validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, - }, - "stateful_disk": { - Type: resource_compute_instance_group_manager_schema.TypeSet, - Optional: true, - Description: `Disks created on the instances that will be preserved on instance delete, update, etc.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "device_name": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Required: true, - Description: `The device name of the disk to be attached.`, - }, - - "delete_rule": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Default: "NEVER", - Optional: true, - ValidateFunc: resource_compute_instance_group_manager_validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), - Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER.`, - }, - }, - }, - }, - "operation": { - Type: resource_compute_instance_group_manager_schema.TypeString, - Computed: true, - }, - "status": { - Type: resource_compute_instance_group_manager_schema.TypeList, - Computed: true, - Description: `The status of this managed instance group.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "is_stable": { - Type: resource_compute_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.`, - }, - - "version_target": { - Type: resource_compute_instance_group_manager_schema.TypeList, - Computed: true, - Description: `A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "is_reached": { - Type: resource_compute_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.`, - }, - }, - }, - }, - "stateful": { - Type: resource_compute_instance_group_manager_schema.TypeList, - Computed: true, - Description: `Stateful status of the given Instance Group Manager.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "has_stateful_config": { - Type: resource_compute_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.`, - }, - "per_instance_configs": { - Type: resource_compute_instance_group_manager_schema.TypeList, - Computed: true, - Description: `Status of per-instance configs on the instance.`, - Elem: &resource_compute_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_instance_group_manager_schema.Schema{ - "all_effective": { - Type: resource_compute_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func getNamedPorts(nps []interface{}) []*resource_compute_instance_group_manager_compute.NamedPort { - namedPorts := make([]*resource_compute_instance_group_manager_compute.NamedPort, 0, len(nps)) - for _, v := range nps { - np := v.(map[string]interface{}) - namedPorts = append(namedPorts, &resource_compute_instance_group_manager_compute.NamedPort{ - Name: np["name"].(string), - Port: int64(np["port"].(int)), - }) - } - - return namedPorts -} - -func getNamedPortsBeta(nps []interface{}) []*resource_compute_instance_group_manager_compute.NamedPort { - namedPorts := make([]*resource_compute_instance_group_manager_compute.NamedPort, 0, len(nps)) - for _, v := range nps { - np := v.(map[string]interface{}) - namedPorts = append(namedPorts, &resource_compute_instance_group_manager_compute.NamedPort{ - Name: np["name"].(string), - Port: int64(np["port"].(int)), - }) - } - - return namedPorts -} - -func resourceComputeInstanceGroupManagerCreate(d *resource_compute_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - - manager := &resource_compute_instance_group_manager_compute.InstanceGroupManager{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - BaseInstanceName: d.Get("base_instance_name").(string), - TargetSize: int64(d.Get("target_size").(int)), - NamedPorts: getNamedPortsBeta(d.Get("named_port").(*resource_compute_instance_group_manager_schema.Set).List()), - TargetPools: convertStringSet(d.Get("target_pools").(*resource_compute_instance_group_manager_schema.Set)), - AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), - Versions: expandVersions(d.Get("version").([]interface{})), - UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), - StatefulPolicy: expandStatefulPolicy(d.Get("stateful_disk").(*resource_compute_instance_group_manager_schema.Set).List()), - - ForceSendFields: []string{"TargetSize"}, - } - - resource_compute_instance_group_manager_log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) - op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Insert( - project, zone, manager).Do() - - if err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error creating InstanceGroupManager: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") - if err != nil { - return err - } - d.SetId(id) - - err = computeOperationWaitTime(config, op, project, "Creating InstanceGroupManager", userAgent, d.Timeout(resource_compute_instance_group_manager_schema.TimeoutCreate)) - if err != nil { - - select { - case <-config.context.Done(): - resource_compute_instance_group_manager_log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", op.Name) - if err := d.Set("operation", op.Name); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting operation: %s", err) - } - return nil - default: - - } - return err - } - - if d.Get("wait_for_instances").(bool) { - err := computeIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - return resourceComputeInstanceGroupManagerRead(d, meta) -} - -func flattenNamedPortsBeta(namedPorts []*resource_compute_instance_group_manager_compute.NamedPort) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(namedPorts)) - for _, namedPort := range namedPorts { - namedPortMap := make(map[string]interface{}) - namedPortMap["name"] = namedPort.Name - namedPortMap["port"] = namedPort.Port - result = append(result, namedPortMap) - } - return result - -} - -func flattenVersions(versions []*resource_compute_instance_group_manager_compute.InstanceGroupManagerVersion) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(versions)) - for _, version := range versions { - versionMap := make(map[string]interface{}) - versionMap["name"] = version.Name - versionMap["instance_template"] = ConvertSelfLinkToV1(version.InstanceTemplate) - versionMap["target_size"] = flattenFixedOrPercent(version.TargetSize) - result = append(result, versionMap) - } - - return result -} - -func flattenFixedOrPercent(fixedOrPercent *resource_compute_instance_group_manager_compute.FixedOrPercent) []map[string]interface{} { - result := make(map[string]interface{}) - if value := fixedOrPercent.Percent; value > 0 { - result["percent"] = value - } else if value := fixedOrPercent.Fixed; value > 0 { - result["fixed"] = fixedOrPercent.Fixed - } else { - return []map[string]interface{}{} - } - return []map[string]interface{}{result} -} - -func getManager(d *resource_compute_instance_group_manager_schema.ResourceData, meta interface{}) (*resource_compute_instance_group_manager_compute.InstanceGroupManager, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - zone, _ := getZone(d, config) - name := d.Get("name").(string) - - manager, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Get(project, zone, name).Do() - if err != nil { - return nil, handleNotFoundError(err, d, resource_compute_instance_group_manager_fmt.Sprintf("Instance Group Manager %q", name)) - } - - if manager == nil { - resource_compute_instance_group_manager_log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) - - d.SetId("") - return nil, nil - } - - return manager, nil -} - -func resourceComputeInstanceGroupManagerRead(d *resource_compute_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - operation := d.Get("operation").(string) - if operation != "" { - resource_compute_instance_group_manager_log.Printf("[DEBUG] in progress operation detected at %v, attempting to resume", operation) - zone, _ := getZone(d, config) - op := &resource_compute_instance_group_manager_compute.Operation{ - Name: operation, - Zone: zone, - } - if err := d.Set("operation", op.Name); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting operation: %s", err) - } - err = computeOperationWaitTime(config, op, project, "Creating InstanceGroupManager", userAgent, d.Timeout(resource_compute_instance_group_manager_schema.TimeoutCreate)) - if err != nil { - - resource_compute_instance_group_manager_log.Printf("[DEBUG] Resumed operation returned an error, removing from state: %s", err) - d.SetId("") - return nil - } - } - - manager, err := getManager(d, meta) - if err != nil { - return err - } - if manager == nil { - resource_compute_instance_group_manager_log.Printf("[WARN] Instance Group Manager %q not found, removing from state.", d.Id()) - d.SetId("") - return nil - } - - if err := d.Set("base_instance_name", manager.BaseInstanceName); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting base_instance_name: %s", err) - } - if err := d.Set("name", manager.Name); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("zone", GetResourceNameFromSelfLink(manager.Zone)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("description", manager.Description); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("target_size", manager.TargetSize); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting target_size: %s", err) - } - if err = d.Set("target_pools", mapStringArr(manager.TargetPools, ConvertSelfLinkToV1)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting target_pools in state: %s", err.Error()) - } - if err = d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting named_port in state: %s", err.Error()) - } - if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) - } - if err := d.Set("fingerprint", manager.Fingerprint); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting fingerprint: %s", err) - } - if err := d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting instance_group: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting self_link: %s", err) - } - - if err = d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) - } - if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { - return err - } - if err = d.Set("update_policy", flattenUpdatePolicy(manager.UpdatePolicy)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting update_policy in state: %s", err.Error()) - } - if err = d.Set("status", flattenStatus(manager.Status)); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting status in state: %s", err.Error()) - } - - if d.Get("wait_for_instances_status").(string) == "" { - if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error setting wait_for_instances_status in state: %s", err.Error()) - } - } - - return nil -} - -func resourceComputeInstanceGroupManagerUpdate(d *resource_compute_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.Get("wait_for_instances").(bool) { - err := computeIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, err := getZone(d, config) - if err != nil { - return err - } - - updatedManager := &resource_compute_instance_group_manager_compute.InstanceGroupManager{ - Fingerprint: d.Get("fingerprint").(string), - } - var change bool - - if d.HasChange("description") { - updatedManager.Description = d.Get("description").(string) - updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "Description") - change = true - } - - if d.HasChange("target_pools") { - updatedManager.TargetPools = convertStringSet(d.Get("target_pools").(*resource_compute_instance_group_manager_schema.Set)) - updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetPools") - change = true - } - - if d.HasChange("auto_healing_policies") { - updatedManager.AutoHealingPolicies = expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})) - updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "AutoHealingPolicies") - change = true - } - - if d.HasChange("version") { - updatedManager.Versions = expandVersions(d.Get("version").([]interface{})) - change = true - } - - if d.HasChange("update_policy") { - updatedManager.UpdatePolicy = expandUpdatePolicy(d.Get("update_policy").([]interface{})) - change = true - } - - if d.HasChange("stateful_disk") { - updatedManager.StatefulPolicy = expandStatefulPolicy(d.Get("stateful_disk").(*resource_compute_instance_group_manager_schema.Set).List()) - change = true - } - - if change { - op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Patch(project, zone, d.Get("name").(string), updatedManager).Do() - if err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error updating managed group instances: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating managed group instances", userAgent, d.Timeout(resource_compute_instance_group_manager_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - if d.HasChange("named_port") { - d.Partial(true) - - namedPorts := getNamedPortsBeta(d.Get("named_port").(*resource_compute_instance_group_manager_schema.Set).List()) - setNamedPorts := &resource_compute_instance_group_manager_compute.InstanceGroupsSetNamedPortsRequest{ - NamedPorts: namedPorts, - } - - op, err := config.NewComputeClient(userAgent).InstanceGroups.SetNamedPorts( - project, zone, d.Get("name").(string), setNamedPorts).Do() - - if err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating InstanceGroupManager", userAgent, d.Timeout(resource_compute_instance_group_manager_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - if d.HasChange("target_size") { - d.Partial(true) - - targetSize := int64(d.Get("target_size").(int)) - op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Resize( - project, zone, d.Get("name").(string), targetSize).Do() - - if err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error updating InstanceGroupManager: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating InstanceGroupManager", userAgent, d.Timeout(resource_compute_instance_group_manager_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - if d.Get("wait_for_instances").(bool) { - err := computeIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - return resourceComputeInstanceGroupManagerRead(d, meta) -} - -func resourceComputeInstanceGroupManagerDelete(d *resource_compute_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.Get("wait_for_instances").(bool) { - err := computeIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone, _ := getZone(d, config) - name := d.Get("name").(string) - - op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Delete(project, zone, name).Do() - attempt := 0 - for err != nil && attempt < 20 { - attempt++ - resource_compute_instance_group_manager_time.Sleep(2000 * resource_compute_instance_group_manager_time.Millisecond) - op, err = config.NewComputeClient(userAgent).InstanceGroupManagers.Delete(project, zone, name).Do() - } - - if err != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error deleting instance group manager: %s", err) - } - - currentSize := int64(d.Get("target_size").(int)) - - err = computeOperationWaitTime(config, op, project, "Deleting InstanceGroupManager", userAgent, d.Timeout(resource_compute_instance_group_manager_schema.TimeoutDelete)) - - for err != nil && currentSize > 0 { - if !resource_compute_instance_group_manager_strings.Contains(err.Error(), "timeout") { - return err - } - - instanceGroup, igErr := config.NewComputeClient(userAgent).InstanceGroups.Get( - project, zone, name).Do() - if igErr != nil { - return resource_compute_instance_group_manager_fmt.Errorf("Error getting instance group size: %s", err) - } - - instanceGroupSize := instanceGroup.Size - - if instanceGroupSize >= currentSize { - return resource_compute_instance_group_manager_fmt.Errorf("Error, instance group isn't shrinking during delete") - } - - resource_compute_instance_group_manager_log.Printf("[INFO] timeout occurred, but instance group is shrinking (%d < %d)", instanceGroupSize, currentSize) - currentSize = instanceGroupSize - err = computeOperationWaitTime(config, op, project, "Deleting InstanceGroupManager", userAgent, d.Timeout(resource_compute_instance_group_manager_schema.TimeoutDelete)) - } - - d.SetId("") - return nil -} - -func computeIGMWaitForInstanceStatus(d *resource_compute_instance_group_manager_schema.ResourceData, meta interface{}) error { - waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" - conf := resource_compute_instance_group_manager_resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, - Target: []string{"created"}, - Refresh: waitForInstancesRefreshFunc(getManager, waitForUpdates, d, meta), - Timeout: d.Timeout(resource_compute_instance_group_manager_schema.TimeoutCreate), - } - _, err := conf.WaitForState() - if err != nil { - return err - } - return nil -} - -func expandAutoHealingPolicies(configured []interface{}) []*resource_compute_instance_group_manager_compute.InstanceGroupManagerAutoHealingPolicy { - autoHealingPolicies := make([]*resource_compute_instance_group_manager_compute.InstanceGroupManagerAutoHealingPolicy, 0, len(configured)) - for _, raw := range configured { - data := raw.(map[string]interface{}) - autoHealingPolicy := resource_compute_instance_group_manager_compute.InstanceGroupManagerAutoHealingPolicy{ - HealthCheck: data["health_check"].(string), - InitialDelaySec: int64(data["initial_delay_sec"].(int)), - } - - autoHealingPolicies = append(autoHealingPolicies, &autoHealingPolicy) - } - return autoHealingPolicies -} - -func expandStatefulPolicy(configured []interface{}) *resource_compute_instance_group_manager_compute.StatefulPolicy { - disks := make(map[string]resource_compute_instance_group_manager_compute.StatefulPolicyPreservedStateDiskDevice) - for _, raw := range configured { - data := raw.(map[string]interface{}) - disk := resource_compute_instance_group_manager_compute.StatefulPolicyPreservedStateDiskDevice{ - AutoDelete: data["delete_rule"].(string), - } - disks[data["device_name"].(string)] = disk - } - if len(disks) > 0 { - return &resource_compute_instance_group_manager_compute.StatefulPolicy{PreservedState: &resource_compute_instance_group_manager_compute.StatefulPolicyPreservedState{Disks: disks}} - } - return nil -} - -func expandVersions(configured []interface{}) []*resource_compute_instance_group_manager_compute.InstanceGroupManagerVersion { - versions := make([]*resource_compute_instance_group_manager_compute.InstanceGroupManagerVersion, 0, len(configured)) - for _, raw := range configured { - data := raw.(map[string]interface{}) - - version := resource_compute_instance_group_manager_compute.InstanceGroupManagerVersion{ - Name: data["name"].(string), - InstanceTemplate: data["instance_template"].(string), - TargetSize: expandFixedOrPercent(data["target_size"].([]interface{})), - } - - versions = append(versions, &version) - } - return versions -} - -func expandFixedOrPercent(configured []interface{}) *resource_compute_instance_group_manager_compute.FixedOrPercent { - fixedOrPercent := &resource_compute_instance_group_manager_compute.FixedOrPercent{} - - for _, raw := range configured { - if raw != nil { - data := raw.(map[string]interface{}) - if percent := data["percent"]; percent.(int) > 0 { - fixedOrPercent.Percent = int64(percent.(int)) - } else { - fixedOrPercent.Fixed = int64(data["fixed"].(int)) - fixedOrPercent.ForceSendFields = []string{"Fixed"} - } - } - } - return fixedOrPercent -} - -func expandUpdatePolicy(configured []interface{}) *resource_compute_instance_group_manager_compute.InstanceGroupManagerUpdatePolicy { - updatePolicy := &resource_compute_instance_group_manager_compute.InstanceGroupManagerUpdatePolicy{} - - for _, raw := range configured { - data := raw.(map[string]interface{}) - - updatePolicy.MinimalAction = data["minimal_action"].(string) - updatePolicy.Type = data["type"].(string) - updatePolicy.ReplacementMethod = data["replacement_method"].(string) - - if v := data["max_surge_percent"]; v.(int) > 0 { - updatePolicy.MaxSurge = &resource_compute_instance_group_manager_compute.FixedOrPercent{ - Percent: int64(v.(int)), - NullFields: []string{"Fixed"}, - } - } else { - updatePolicy.MaxSurge = &resource_compute_instance_group_manager_compute.FixedOrPercent{ - Fixed: int64(data["max_surge_fixed"].(int)), - - ForceSendFields: []string{"Fixed"}, - NullFields: []string{"Percent"}, - } - } - - if v := data["max_unavailable_percent"]; v.(int) > 0 { - updatePolicy.MaxUnavailable = &resource_compute_instance_group_manager_compute.FixedOrPercent{ - Percent: int64(v.(int)), - NullFields: []string{"Fixed"}, - } - } else { - updatePolicy.MaxUnavailable = &resource_compute_instance_group_manager_compute.FixedOrPercent{ - Fixed: int64(data["max_unavailable_fixed"].(int)), - - ForceSendFields: []string{"Fixed"}, - NullFields: []string{"Percent"}, - } - } - } - return updatePolicy -} - -func flattenAutoHealingPolicies(autoHealingPolicies []*resource_compute_instance_group_manager_compute.InstanceGroupManagerAutoHealingPolicy) []map[string]interface{} { - autoHealingPoliciesSchema := make([]map[string]interface{}, 0, len(autoHealingPolicies)) - for _, autoHealingPolicy := range autoHealingPolicies { - data := map[string]interface{}{ - "health_check": autoHealingPolicy.HealthCheck, - "initial_delay_sec": autoHealingPolicy.InitialDelaySec, - } - - autoHealingPoliciesSchema = append(autoHealingPoliciesSchema, data) - } - return autoHealingPoliciesSchema -} - -func flattenStatefulPolicy(statefulPolicy *resource_compute_instance_group_manager_compute.StatefulPolicy) []map[string]interface{} { - if statefulPolicy == nil || statefulPolicy.PreservedState == nil || statefulPolicy.PreservedState.Disks == nil { - return make([]map[string]interface{}, 0, 0) - } - result := make([]map[string]interface{}, 0, len(statefulPolicy.PreservedState.Disks)) - for deviceName, disk := range statefulPolicy.PreservedState.Disks { - data := map[string]interface{}{ - "device_name": deviceName, - "delete_rule": disk.AutoDelete, - } - - result = append(result, data) - } - return result -} - -func flattenUpdatePolicy(updatePolicy *resource_compute_instance_group_manager_compute.InstanceGroupManagerUpdatePolicy) []map[string]interface{} { - results := []map[string]interface{}{} - if updatePolicy != nil { - up := map[string]interface{}{} - if updatePolicy.MaxSurge != nil { - up["max_surge_fixed"] = updatePolicy.MaxSurge.Fixed - up["max_surge_percent"] = updatePolicy.MaxSurge.Percent - } else { - up["max_surge_fixed"] = 0 - up["max_surge_percent"] = 0 - } - if updatePolicy.MaxUnavailable != nil { - up["max_unavailable_fixed"] = updatePolicy.MaxUnavailable.Fixed - up["max_unavailable_percent"] = updatePolicy.MaxUnavailable.Percent - } else { - up["max_unavailable_fixed"] = 0 - up["max_unavailable_percent"] = 0 - } - up["minimal_action"] = updatePolicy.MinimalAction - up["type"] = updatePolicy.Type - up["replacement_method"] = updatePolicy.ReplacementMethod - results = append(results, up) - } - return results -} - -func flattenStatus(status *resource_compute_instance_group_manager_compute.InstanceGroupManagerStatus) []map[string]interface{} { - results := []map[string]interface{}{} - data := map[string]interface{}{ - "is_stable": status.IsStable, - "stateful": flattenStatusStateful(status.Stateful), - "version_target": flattenStatusVersionTarget(status.VersionTarget), - } - results = append(results, data) - return results -} - -func flattenStatusStateful(stateful *resource_compute_instance_group_manager_compute.InstanceGroupManagerStatusStateful) []map[string]interface{} { - results := []map[string]interface{}{} - data := map[string]interface{}{ - "has_stateful_config": stateful.HasStatefulConfig, - "per_instance_configs": flattenStatusStatefulConfigs(stateful.PerInstanceConfigs), - } - results = append(results, data) - return results -} - -func flattenStatusStatefulConfigs(statefulConfigs *resource_compute_instance_group_manager_compute.InstanceGroupManagerStatusStatefulPerInstanceConfigs) []map[string]interface{} { - results := []map[string]interface{}{} - data := map[string]interface{}{ - "all_effective": statefulConfigs.AllEffective, - } - results = append(results, data) - return results -} - -func flattenStatusVersionTarget(versionTarget *resource_compute_instance_group_manager_compute.InstanceGroupManagerStatusVersionTarget) []map[string]interface{} { - results := []map[string]interface{}{} - data := map[string]interface{}{ - "is_reached": versionTarget.IsReached, - } - results = append(results, data) - return results -} - -func resourceInstanceGroupManagerStateImporter(d *resource_compute_instance_group_manager_schema.ResourceData, meta interface{}) ([]*resource_compute_instance_group_manager_schema.ResourceData, error) { - if err := d.Set("wait_for_instances", false); err != nil { - return nil, resource_compute_instance_group_manager_fmt.Errorf("Error setting wait_for_instances: %s", err) - } - if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { - return nil, resource_compute_instance_group_manager_fmt.Errorf("Error setting wait_for_instances_status: %s", err) - } - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{name}}") - if err != nil { - return nil, resource_compute_instance_group_manager_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_instance_group_manager_schema.ResourceData{d}, nil -} - -func resourceComputeInstanceGroupMigrateState( - v int, is *resource_compute_instance_group_migrate_terraform.InstanceState, meta interface{}) (*resource_compute_instance_group_migrate_terraform.InstanceState, error) { - if is.Empty() { - resource_compute_instance_group_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - resource_compute_instance_group_migrate_log.Println("[INFO] Found Compute Instance Group State v0; migrating to v1") - is, err := migrateInstanceGroupStateV0toV1(is) - if err != nil { - return is, err - } - fallthrough - case 1: - resource_compute_instance_group_migrate_log.Println("[INFO] Found Compute Instance Group State v1; migrating to v2") - is, err := migrateInstanceGroupStateV1toV2(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, resource_compute_instance_group_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateInstanceGroupStateV0toV1(is *resource_compute_instance_group_migrate_terraform.InstanceState) (*resource_compute_instance_group_migrate_terraform.InstanceState, error) { - resource_compute_instance_group_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - newInstances := []string{} - - for k, v := range is.Attributes { - if !resource_compute_instance_group_migrate_strings.HasPrefix(k, "instances.") { - continue - } - - if k == "instances.#" { - continue - } - - kParts := resource_compute_instance_group_migrate_strings.Split(k, ".") - - badFormat := false - if len(kParts) != 2 { - badFormat = true - } else if _, err := resource_compute_instance_group_migrate_strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, resource_compute_instance_group_migrate_fmt.Errorf("migration error: found instances key in unexpected format: %s", k) - } - - newInstances = append(newInstances, v) - delete(is.Attributes, k) - } - - for _, v := range newInstances { - hash := resource_compute_instance_group_migrate_schema.HashString(v) - newKey := resource_compute_instance_group_migrate_fmt.Sprintf("instances.%d", hash) - is.Attributes[newKey] = v - } - - resource_compute_instance_group_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func migrateInstanceGroupStateV1toV2(is *resource_compute_instance_group_migrate_terraform.InstanceState) (*resource_compute_instance_group_migrate_terraform.InstanceState, error) { - resource_compute_instance_group_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - is.ID = resource_compute_instance_group_migrate_fmt.Sprintf("%s/%s", is.Attributes["zone"], is.Attributes["name"]) - - return is, nil -} - -func resourceComputeInstanceGroupNamedPort() *resource_compute_instance_group_named_port_schema.Resource { - return &resource_compute_instance_group_named_port_schema.Resource{ - Create: resourceComputeInstanceGroupNamedPortCreate, - Read: resourceComputeInstanceGroupNamedPortRead, - Delete: resourceComputeInstanceGroupNamedPortDelete, - - Importer: &resource_compute_instance_group_named_port_schema.ResourceImporter{ - State: resourceComputeInstanceGroupNamedPortImport, - }, - - Timeouts: &resource_compute_instance_group_named_port_schema.ResourceTimeout{ - Create: resource_compute_instance_group_named_port_schema.DefaultTimeout(6 * resource_compute_instance_group_named_port_time.Minute), - Delete: resource_compute_instance_group_named_port_schema.DefaultTimeout(6 * resource_compute_instance_group_named_port_time.Minute), - }, - - Schema: map[string]*resource_compute_instance_group_named_port_schema.Schema{ - "group": { - Type: resource_compute_instance_group_named_port_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The name of the instance group.`, - }, - "name": { - Type: resource_compute_instance_group_named_port_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this named port. The name must be 1-63 characters -long, and comply with RFC1035.`, - }, - "port": { - Type: resource_compute_instance_group_named_port_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The port number, which can be a value between 1 and 65535.`, - }, - "zone": { - Type: resource_compute_instance_group_named_port_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The zone of the instance group.`, - }, - "project": { - Type: resource_compute_instance_group_named_port_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeInstanceGroupNamedPortCreate(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeInstanceGroupNamedPortName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_instance_group_named_port_reflect.ValueOf(nameProp)) && (ok || !resource_compute_instance_group_named_port_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - portProp, err := expandNestedComputeInstanceGroupNamedPortPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_compute_instance_group_named_port_reflect.ValueOf(portProp)) && (ok || !resource_compute_instance_group_named_port_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - - obj, err = resourceComputeInstanceGroupNamedPortEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts") - if err != nil { - return err - } - - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Creating new InstanceGroupNamedPort: %#v", obj) - - obj, err = resourceComputeInstanceGroupNamedPortPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_instance_group_named_port_schema.TimeoutCreate)) - if err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error creating InstanceGroupNamedPort: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}") - if err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating InstanceGroupNamedPort", userAgent, - d.Timeout(resource_compute_instance_group_named_port_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_instance_group_named_port_fmt.Errorf("Error waiting to create InstanceGroupNamedPort: %s", err) - } - - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Finished creating InstanceGroupNamedPort %q: %#v", d.Id(), res) - - return resourceComputeInstanceGroupNamedPortRead(d, meta) -} - -func resourceComputeInstanceGroupNamedPortRead(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_instance_group_named_port_fmt.Sprintf("ComputeInstanceGroupNamedPort %q", d.Id())) - } - - res, err = flattenNestedComputeInstanceGroupNamedPort(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Removing ComputeInstanceGroupNamedPort because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) - } - - if err := d.Set("name", flattenNestedComputeInstanceGroupNamedPortName(res["name"], d, config)); err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) - } - if err := d.Set("port", flattenNestedComputeInstanceGroupNamedPortPort(res["port"], d, config)); err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error reading InstanceGroupNamedPort: %s", err) - } - - return nil -} - -func resourceComputeInstanceGroupNamedPortDelete(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_instance_group_named_port_fmt.Errorf("Error fetching project for InstanceGroupNamedPort: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceComputeInstanceGroupNamedPortPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "InstanceGroupNamedPort") - } - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Deleting InstanceGroupNamedPort %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_instance_group_named_port_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InstanceGroupNamedPort") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting InstanceGroupNamedPort", userAgent, - d.Timeout(resource_compute_instance_group_named_port_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Finished deleting InstanceGroupNamedPort %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeInstanceGroupNamedPortImport(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}) ([]*resource_compute_instance_group_named_port_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}") - if err != nil { - return nil, resource_compute_instance_group_named_port_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_instance_group_named_port_schema.ResourceData{d}, nil -} - -func flattenNestedComputeInstanceGroupNamedPortName(v interface{}, d *resource_compute_instance_group_named_port_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeInstanceGroupNamedPortPort(v interface{}, d *resource_compute_instance_group_named_port_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_instance_group_named_port_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandNestedComputeInstanceGroupNamedPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeInstanceGroupNamedPortPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeInstanceGroupNamedPortEncoder(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - ig, err := ParseInstanceGroupFieldValue(d.Get("group").(string), d, config) - if err != nil { - return nil, err - } - - if err := d.Set("group", ig.Name); err != nil { - return nil, resource_compute_instance_group_named_port_fmt.Errorf("Error setting group: %s", err) - } - if err := d.Set("zone", ig.Zone); err != nil { - return nil, resource_compute_instance_group_named_port_fmt.Errorf("Error setting zone: %s", err) - } - if err := d.Set("project", ig.Project); err != nil { - return nil, resource_compute_instance_group_named_port_fmt.Errorf("Error setting project: %s", err) - } - - return obj, nil -} - -func flattenNestedComputeInstanceGroupNamedPort(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["namedPorts"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_instance_group_named_port_fmt.Errorf("expected list or map for value namedPorts. Actual value: %v", v) - } - - _, item, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedPort, err := expandNestedComputeInstanceGroupNamedPortPort(d.Get("port"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPort := flattenNestedComputeInstanceGroupNamedPortPort(expectedPort, d, meta.(*Config)) - expectedName, err := expandNestedComputeInstanceGroupNamedPortName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeInstanceGroupNamedPortName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemPort := flattenNestedComputeInstanceGroupNamedPortPort(item["port"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_instance_group_named_port_reflect.ValueOf(itemPort)) && isEmptyValue(resource_compute_instance_group_named_port_reflect.ValueOf(expectedFlattenedPort))) && !resource_compute_instance_group_named_port_reflect.DeepEqual(itemPort, expectedFlattenedPort) { - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) - continue - } - itemName := flattenNestedComputeInstanceGroupNamedPortName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_instance_group_named_port_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_instance_group_named_port_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_instance_group_named_port_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_instance_group_named_port_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeInstanceGroupNamedPortPatchCreateEncoder(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeInstanceGroupNamedPortListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - if found != nil { - return nil, resource_compute_instance_group_named_port_fmt.Errorf("Unable to create InstanceGroupNamedPort, existing object already found: %+v", found) - } - - res := map[string]interface{}{ - "namedPorts": append(currItems, obj), - } - - return res, nil -} - -func resourceComputeInstanceGroupNamedPortPatchDeleteEncoder(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeInstanceGroupNamedPortListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeInstanceGroupNamedPortFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - - return nil, &resource_compute_instance_group_named_port_googleapi.Error{ - Code: 404, - Message: "InstanceGroupNamedPort not found in list", - } - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "namedPorts": updatedItems, - } - - return res, nil -} - -func resourceComputeInstanceGroupNamedPortListForPatch(d *resource_compute_instance_group_named_port_schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}") - if err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - - v, ok = res["namedPorts"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, resource_compute_instance_group_named_port_fmt.Errorf(`expected list for nested field "namedPorts"`) - } - return ls, nil - } - return nil, nil -} - -func resourceComputeInstanceMigrateState( - v int, is *resource_compute_instance_migrate_terraform.InstanceState, meta interface{}) (*resource_compute_instance_migrate_terraform.InstanceState, error) { - if is.Empty() { - resource_compute_instance_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - var err error - - switch v { - case 0: - resource_compute_instance_migrate_log.Println("[INFO] Found Compute Instance State v0; migrating to v1") - is, err = migrateStateV0toV1(is) - if err != nil { - return is, err - } - fallthrough - case 1: - resource_compute_instance_migrate_log.Println("[INFO] Found Compute Instance State v1; migrating to v2") - is, err = migrateStateV1toV2(is) - if err != nil { - return is, err - } - fallthrough - case 2: - resource_compute_instance_migrate_log.Println("[INFO] Found Compute Instance State v2; migrating to v3") - is, err = migrateStateV2toV3(is) - if err != nil { - return is, err - } - fallthrough - case 3: - resource_compute_instance_migrate_log.Println("[INFO] Found Compute Instance State v3; migrating to v4") - is, err = migrateStateV3toV4(is, meta) - if err != nil { - return is, err - } - fallthrough - case 4: - resource_compute_instance_migrate_log.Println("[INFO] Found Compute Instance State v4; migrating to v5") - is, err = migrateStateV4toV5(is, meta) - if err != nil { - return is, err - } - fallthrough - case 5: - resource_compute_instance_migrate_log.Println("[INFO] Found Compute Instance State v5; migrating to v6") - is, err = migrateStateV5toV6(is) - if err != nil { - return is, err - } - - return is, err - default: - return is, resource_compute_instance_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateStateV0toV1(is *resource_compute_instance_migrate_terraform.InstanceState) (*resource_compute_instance_migrate_terraform.InstanceState, error) { - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - delete(is.Attributes, "metadata.#") - - newMetadata := make(map[string]string) - - for k, v := range is.Attributes { - if !resource_compute_instance_migrate_strings.HasPrefix(k, "metadata.") { - continue - } - - kParts := resource_compute_instance_migrate_strings.SplitN(k, ".", 3) - - badFormat := false - if len(kParts) != 3 { - badFormat = true - } else if _, err := resource_compute_instance_migrate_strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, resource_compute_instance_migrate_fmt.Errorf( - "migration error: found metadata key in unexpected format: %s", k) - } - - newK := resource_compute_instance_migrate_strings.Join([]string{kParts[0], kParts[2]}, ".") - newMetadata[newK] = v - delete(is.Attributes, k) - } - - for k, v := range newMetadata { - is.Attributes[k] = v - } - - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func migrateStateV1toV2(is *resource_compute_instance_migrate_terraform.InstanceState) (*resource_compute_instance_migrate_terraform.InstanceState, error) { - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - newScopesMap := make(map[string][]string) - - for k, v := range is.Attributes { - if !resource_compute_instance_migrate_strings.HasPrefix(k, "service_account.") { - continue - } - - if k == "service_account.#" { - continue - } - - if resource_compute_instance_migrate_strings.HasSuffix(k, ".scopes.#") { - continue - } - - if resource_compute_instance_migrate_strings.HasSuffix(k, ".email") { - continue - } - - kParts := resource_compute_instance_migrate_strings.Split(k, ".") - - badFormat := false - if len(kParts) != 4 { - badFormat = true - } else if _, err := resource_compute_instance_migrate_strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, resource_compute_instance_migrate_fmt.Errorf( - "migration error: found scope key in unexpected format: %s", k) - } - - newScopesMap[kParts[1]] = append(newScopesMap[kParts[1]], v) - - delete(is.Attributes, k) - } - - for service_acct_index, newScopes := range newScopesMap { - for _, newScope := range newScopes { - hash := hashcode(canonicalizeServiceScope(newScope)) - newKey := resource_compute_instance_migrate_fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash) - is.Attributes[newKey] = newScope - } - } - - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func migrateStateV2toV3(is *resource_compute_instance_migrate_terraform.InstanceState) (*resource_compute_instance_migrate_terraform.InstanceState, error) { - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - is.Attributes["create_timeout"] = "4" - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func migrateStateV3toV4(is *resource_compute_instance_migrate_terraform.InstanceState, meta interface{}) (*resource_compute_instance_migrate_terraform.InstanceState, error) { - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - config := meta.(*Config) - instance, err := getInstanceFromInstanceState(config, is) - if err != nil { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: %s", err) - } - diskList, err := getAllDisksFromInstanceState(config, is) - if err != nil { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: %s", err) - } - allDisks := make(map[string]*resource_compute_instance_migrate_compute.Disk) - for _, disk := range diskList { - allDisks[disk.Name] = disk - } - - hasBootDisk := is.Attributes["boot_disk.#"] == "1" - - scratchDisks := 0 - if v := is.Attributes["scratch_disk.#"]; v != "" { - scratchDisks, err = resource_compute_instance_migrate_strconv.Atoi(v) - if err != nil { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: found scratch_disk.# value in unexpected format: %s", err) - } - } - - attachedDisks := 0 - if v := is.Attributes["attached_disk.#"]; v != "" { - attachedDisks, err = resource_compute_instance_migrate_strconv.Atoi(v) - if err != nil { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: found attached_disk.# value in unexpected format: %s", err) - } - } - - disks := 0 - if v := is.Attributes["disk.#"]; v != "" { - disks, err = resource_compute_instance_migrate_strconv.Atoi(is.Attributes["disk.#"]) - if err != nil { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: found disk.# value in unexpected format: %s", err) - } - } - - for i := 0; i < disks; i++ { - if !hasBootDisk && i == 0 { - is.Attributes["boot_disk.#"] = "1" - - if is.Attributes["disk.0.scratch_disk"] == "true" { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: found scratch disk at index 0") - } - - for _, disk := range instance.Disks { - if disk.Boot { - is.Attributes["boot_disk.0.source"] = GetResourceNameFromSelfLink(disk.Source) - is.Attributes["boot_disk.0.device_name"] = disk.DeviceName - break - } - } - is.Attributes["boot_disk.0.auto_delete"] = is.Attributes["disk.0.auto_delete"] - is.Attributes["boot_disk.0.disk_encryption_key_raw"] = is.Attributes["disk.0.disk_encryption_key_raw"] - is.Attributes["boot_disk.0.disk_encryption_key_sha256"] = is.Attributes["disk.0.disk_encryption_key_sha256"] - - if is.Attributes["disk.0.size"] != "" && is.Attributes["disk.0.size"] != "0" { - is.Attributes["boot_disk.0.initialize_params.#"] = "1" - is.Attributes["boot_disk.0.initialize_params.0.size"] = is.Attributes["disk.0.size"] - } - if is.Attributes["disk.0.type"] != "" { - is.Attributes["boot_disk.0.initialize_params.#"] = "1" - is.Attributes["boot_disk.0.initialize_params.0.type"] = is.Attributes["disk.0.type"] - } - if is.Attributes["disk.0.image"] != "" { - is.Attributes["boot_disk.0.initialize_params.#"] = "1" - is.Attributes["boot_disk.0.initialize_params.0.image"] = is.Attributes["disk.0.image"] - } - } else if is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.scratch", i)] == "true" { - - if is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.auto_delete", i)] != "true" { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: attempted to migrate scratch disk where auto_delete is not true") - } - - is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("scratch_disk.%d.interface", scratchDisks)] = "SCSI" - - scratchDisks++ - } else { - - disk, err := getDiskFromAttributes(config, instance, allDisks, is.Attributes, i) - if err != nil { - return is, resource_compute_instance_migrate_fmt.Errorf("migration error: %s", err) - } - - is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("attached_disk.%d.source", attachedDisks)] = disk.Source - is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("attached_disk.%d.device_name", attachedDisks)] = disk.DeviceName - is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", attachedDisks)] = is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)] - is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("attached_disk.%d.disk_encryption_key_sha256", attachedDisks)] = is.Attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)] - - attachedDisks++ - } - } - - for k := range is.Attributes { - if !resource_compute_instance_migrate_strings.HasPrefix(k, "disk.") { - continue - } - - delete(is.Attributes, k) - } - if scratchDisks > 0 { - is.Attributes["scratch_disk.#"] = resource_compute_instance_migrate_strconv.Itoa(scratchDisks) - } - if attachedDisks > 0 { - is.Attributes["attached_disk.#"] = resource_compute_instance_migrate_strconv.Itoa(attachedDisks) - } - - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func migrateStateV4toV5(is *resource_compute_instance_migrate_terraform.InstanceState, meta interface{}) (*resource_compute_instance_migrate_terraform.InstanceState, error) { - if v := is.Attributes["disk.#"]; v != "" { - return migrateStateV3toV4(is, meta) - } - return is, nil -} - -func getInstanceFromInstanceState(config *Config, is *resource_compute_instance_migrate_terraform.InstanceState) (*resource_compute_instance_migrate_compute.Instance, error) { - project, ok := is.Attributes["project"] - if !ok { - if config.Project == "" { - return nil, resource_compute_instance_migrate_fmt.Errorf("could not determine 'project'") - } else { - project = config.Project - } - } - - zone, ok := is.Attributes["zone"] - if !ok { - if config.Zone == "" { - return nil, resource_compute_instance_migrate_fmt.Errorf("could not determine 'zone'") - } else { - zone = config.Zone - } - } - - instance, err := config.NewComputeClient(config.userAgent).Instances.Get( - project, zone, is.ID).Do() - if err != nil { - return nil, resource_compute_instance_migrate_fmt.Errorf("error reading instance: %s", err) - } - - return instance, nil -} - -func getAllDisksFromInstanceState(config *Config, is *resource_compute_instance_migrate_terraform.InstanceState) ([]*resource_compute_instance_migrate_compute.Disk, error) { - project, ok := is.Attributes["project"] - if !ok { - if config.Project == "" { - return nil, resource_compute_instance_migrate_fmt.Errorf("could not determine 'project'") - } else { - project = config.Project - } - } - - zone, ok := is.Attributes["zone"] - if !ok { - if config.Zone == "" { - return nil, resource_compute_instance_migrate_fmt.Errorf("could not determine 'zone'") - } else { - zone = config.Zone - } - } - - diskList := []*resource_compute_instance_migrate_compute.Disk{} - token := "" - for { - disks, err := config.NewComputeClient(config.userAgent).Disks.List(project, zone).PageToken(token).Do() - if err != nil { - return nil, resource_compute_instance_migrate_fmt.Errorf("error reading disks: %s", err) - } - diskList = append(diskList, disks.Items...) - token = disks.NextPageToken - if token == "" { - break - } - } - - return diskList, nil -} - -func getDiskFromAttributes(config *Config, instance *resource_compute_instance_migrate_compute.Instance, allDisks map[string]*resource_compute_instance_migrate_compute.Disk, attributes map[string]string, i int) (*resource_compute_instance_migrate_compute.AttachedDisk, error) { - if diskSource := attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.disk", i)]; diskSource != "" { - return getDiskFromSource(instance, diskSource) - } - - if deviceName := attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.device_name", i)]; deviceName != "" { - return getDiskFromDeviceName(instance, deviceName) - } - - if encryptionKey := attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)]; encryptionKey != "" { - return getDiskFromEncryptionKey(instance, encryptionKey) - } - - autoDelete, err := resource_compute_instance_migrate_strconv.ParseBool(attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.auto_delete", i)]) - if err != nil { - return nil, resource_compute_instance_migrate_fmt.Errorf("error parsing auto_delete attribute of disk %d", i) - } - image := attributes[resource_compute_instance_migrate_fmt.Sprintf("disk.%d.image", i)] - - project, ok := attributes["project"] - if !ok { - project = config.Project - } - zone := attributes["zone"] - return getDiskFromAutoDeleteAndImage(config, instance, allDisks, autoDelete, image, project, zone) -} - -func getDiskFromSource(instance *resource_compute_instance_migrate_compute.Instance, source string) (*resource_compute_instance_migrate_compute.AttachedDisk, error) { - for _, disk := range instance.Disks { - if disk.Boot || disk.Type == "SCRATCH" { - - continue - } - - if resource_compute_instance_migrate_strings.HasSuffix(disk.Source, "/"+source) { - return disk, nil - } - } - return nil, resource_compute_instance_migrate_fmt.Errorf("could not find attached disk with source %q", source) -} - -func getDiskFromDeviceName(instance *resource_compute_instance_migrate_compute.Instance, deviceName string) (*resource_compute_instance_migrate_compute.AttachedDisk, error) { - for _, disk := range instance.Disks { - if disk.Boot || disk.Type == "SCRATCH" { - - continue - } - if disk.DeviceName == deviceName { - return disk, nil - } - } - return nil, resource_compute_instance_migrate_fmt.Errorf("could not find attached disk with deviceName %q", deviceName) -} - -func getDiskFromEncryptionKey(instance *resource_compute_instance_migrate_compute.Instance, encryptionKey string) (*resource_compute_instance_migrate_compute.AttachedDisk, error) { - encryptionSha, err := hash256(encryptionKey) - if err != nil { - return nil, err - } - for _, disk := range instance.Disks { - if disk.Boot || disk.Type == "SCRATCH" { - - continue - } - if disk.DiskEncryptionKey.Sha256 == encryptionSha { - return disk, nil - } - } - return nil, resource_compute_instance_migrate_fmt.Errorf("could not find attached disk with encryption hash %q", encryptionSha) -} - -func getDiskFromAutoDeleteAndImage(config *Config, instance *resource_compute_instance_migrate_compute.Instance, allDisks map[string]*resource_compute_instance_migrate_compute.Disk, autoDelete bool, image, project, zone string) (*resource_compute_instance_migrate_compute.AttachedDisk, error) { - img, err := resolveImage(config, project, image, config.userAgent) - if err != nil { - return nil, err - } - imgParts := resource_compute_instance_migrate_strings.Split(img, "/projects/") - canonicalImage := imgParts[len(imgParts)-1] - - for i, disk := range instance.Disks { - if disk.Boot || disk.Type == "SCRATCH" { - - continue - } - if disk.AutoDelete == autoDelete { - - fullDisk := allDisks[GetResourceNameFromSelfLink(disk.Source)] - sourceImage, err := getRelativePath(fullDisk.SourceImage) - if err != nil { - return nil, err - } - if canonicalImage == sourceImage { - - instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...) - return disk, nil - } - } - } - - canonicalImage = resource_compute_instance_migrate_strings.Replace(canonicalImage, "/family/", "/", -1) - for i, disk := range instance.Disks { - if disk.Boot || disk.Type == "SCRATCH" { - - continue - } - if disk.AutoDelete == autoDelete { - - fullDisk := allDisks[GetResourceNameFromSelfLink(disk.Source)] - sourceImage, err := getRelativePath(fullDisk.SourceImage) - if err != nil { - return nil, err - } - - if resource_compute_instance_migrate_strings.Contains(sourceImage, "/"+canonicalImage+"-") { - - instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...) - return disk, nil - } - } - } - - return nil, resource_compute_instance_migrate_fmt.Errorf("could not find attached disk with image %q", image) -} - -func migrateStateV5toV6(is *resource_compute_instance_migrate_terraform.InstanceState) (*resource_compute_instance_migrate_terraform.InstanceState, error) { - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - if is.Attributes["boot_disk.0.initialize_params.#"] == "1" { - if (is.Attributes["boot_disk.0.initialize_params.0.size"] == "0" || - is.Attributes["boot_disk.0.initialize_params.0.size"] == "") && - is.Attributes["boot_disk.0.initialize_params.0.type"] == "" && - is.Attributes["boot_disk.0.initialize_params.0.image"] == "" { - is.Attributes["boot_disk.0.initialize_params.#"] = "0" - delete(is.Attributes, "boot_disk.0.initialize_params.0.size") - delete(is.Attributes, "boot_disk.0.initialize_params.0.type") - delete(is.Attributes, "boot_disk.0.initialize_params.0.image") - } - } - resource_compute_instance_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -var ( - schedulingInstTemplateKeys = []string{ - "scheduling.0.on_host_maintenance", - "scheduling.0.automatic_restart", - "scheduling.0.preemptible", - "scheduling.0.node_affinities", - "scheduling.0.min_node_cpus", - } - - shieldedInstanceTemplateConfigKeys = []string{ - "shielded_instance_config.0.enable_secure_boot", - "shielded_instance_config.0.enable_vtpm", - "shielded_instance_config.0.enable_integrity_monitoring", - } -) - -var REQUIRED_SCRATCH_DISK_SIZE_GB = 375 - -func resourceComputeInstanceTemplate() *resource_compute_instance_template_schema.Resource { - return &resource_compute_instance_template_schema.Resource{ - Create: resourceComputeInstanceTemplateCreate, - Read: resourceComputeInstanceTemplateRead, - Delete: resourceComputeInstanceTemplateDelete, - Importer: &resource_compute_instance_template_schema.ResourceImporter{ - State: resourceComputeInstanceTemplateImportState, - }, - SchemaVersion: 1, - CustomizeDiff: resource_compute_instance_template_customdiff.All( - resourceComputeInstanceTemplateSourceImageCustomizeDiff, - resourceComputeInstanceTemplateScratchDiskCustomizeDiff, - resourceComputeInstanceTemplateBootDiskCustomizeDiff, - ), - MigrateState: resourceComputeInstanceTemplateMigrateState, - - Timeouts: &resource_compute_instance_template_schema.ResourceTimeout{ - Create: resource_compute_instance_template_schema.DefaultTimeout(4 * resource_compute_instance_template_time.Minute), - Delete: resource_compute_instance_template_schema.DefaultTimeout(4 * resource_compute_instance_template_time.Minute), - }, - - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "name": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name_prefix"}, - ValidateFunc: validateGCPName, - Description: `The name of the instance template. If you leave this blank, Terraform will auto-generate a unique name.`, - }, - - "name_prefix": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - - value := v.(string) - if len(value) > 37 { - errors = append(errors, resource_compute_instance_template_fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) - } - return - }, - }, - - "disk": { - Type: resource_compute_instance_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Disks to attach to instances created from this template. This can be specified multiple times for multiple disks.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "auto_delete": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - Description: `Whether or not the disk should be auto-deleted. This defaults to true.`, - }, - - "boot": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - ForceNew: true, - Computed: true, - Description: `Indicates that this is a boot disk.`, - }, - - "device_name": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk.`, - }, - - "disk_name": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the disk. When not provided, this defaults to the name of the instance.`, - }, - - "disk_size_gb": { - Type: resource_compute_instance_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The size of the image in gigabytes. If not specified, it will inherit the size of its base image. For SCRATCH disks, the size must be exactly 375GB.`, - }, - - "disk_type": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The Google Compute Engine disk type. Can be either "pd-ssd", "local-ssd", "pd-balanced" or "pd-standard".`, - }, - - "labels": { - Type: resource_compute_instance_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &resource_compute_instance_template_schema.Schema{ - Type: resource_compute_instance_template_schema.TypeString, - }, - Description: `A set of key/value label pairs to assign to disks,`, - }, - - "source_image": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. ~> Note: Either source or source_image is required when creating a new instance except for when creating a local SSD.`, - }, - - "interface": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `Specifies the disk interface to use for attaching this disk.`, - }, - - "mode": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If you are attaching or creating a boot disk, this must read-write mode.`, - }, - - "source": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name (not self_link) of the disk (such as those managed by google_compute_disk) to attach. ~> Note: Either source or source_image is required when creating a new instance except for when creating a local SSD.`, - }, - - "type": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The type of Google Compute Engine disk, can be either "SCRATCH" or "PERSISTENT".`, - }, - - "disk_encryption_key": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `Encrypts or decrypts a disk using a customer-supplied encryption key.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "kms_key_self_link": { - Type: resource_compute_instance_template_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The self link of the encryption key that is stored in Google Cloud KMS.`, - }, - }, - }, - }, - - "resource_policies": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `A list (short name or id) of resource policies to attach to this disk. Currently a max of 1 resource policy is supported.`, - Elem: &resource_compute_instance_template_schema.Schema{ - Type: resource_compute_instance_template_schema.TypeString, - DiffSuppressFunc: compareResourceNames, - }, - }, - }, - }, - }, - - "machine_type": { - Type: resource_compute_instance_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The machine type to create. To create a machine with a custom type (such as extended memory), format the value like custom-VCPUS-MEM_IN_MB like custom-6-20480 for 6 vCPU and 20GB of RAM.`, - }, - - "can_ip_forward": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - Description: `Whether to allow sending and receiving of packets with non-matching source or destination IPs. This defaults to false.`, - }, - - "description": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A brief description of this resource.`, - }, - - "instance_description": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A description of the instance.`, - }, - - "metadata": { - Type: resource_compute_instance_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Metadata key/value pairs to make available from within instances created from this template.`, - }, - - "metadata_startup_script": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An alternative to using the startup-script metadata key, mostly to match the compute_instance resource. This replaces the startup-script metadata key on the created instance and thus the two mechanisms are not allowed to be used simultaneously.`, - }, - - "metadata_fingerprint": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The unique fingerprint of the metadata.`, - }, - "network_interface": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Networks to attach to instances created from this template. This can be specified multiple times for multiple networks.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "network": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the network to attach this interface to. Use network attribute for Legacy or Auto subnetted networks and subnetwork for custom subnetted networks.`, - }, - - "subnetwork": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the subnetwork to attach this interface to. The subnetwork must exist in the same region this instance will be created in. Either network or subnetwork must be provided.`, - }, - - "subnetwork_project": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The ID of the project in which the subnetwork belongs. If it is not provided, the provider project is used.`, - }, - - "network_ip": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The private IP address to assign to the instance. If empty, the address will be automatically assigned.`, - }, - - "name": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The name of the network_interface.`, - }, - "nic_type": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_instance_template_validation.StringInSlice([]string{"GVNIC", "VIRTIO_NET"}, false), - Description: `The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET`, - }, - "access_config": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Access configurations, i.e. IPs via which this instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet (this means that ssh provisioners will not work unless you are running Terraform can send traffic to the instance's network (e.g. via tunnel or because it is running on another cloud instance on that network). This block can be repeated multiple times.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "nat_ip": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The IP address that will be 1:1 mapped to the instance's network ip. If not given, one will be generated.`, - }, - "network_tier": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The networking tier used for configuring this instance template. This field can take the following values: PREMIUM or STANDARD. If this field is not specified, it is assumed to be PREMIUM.`, - ValidateFunc: resource_compute_instance_template_validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), - }, - - "public_ptr_domain_name": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The DNS domain name for the public PTR record.The DNS domain name for the public PTR record.`, - }, - }, - }, - }, - - "alias_ip_range": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "ip_cidr_range": { - Type: resource_compute_instance_template_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: ipCidrRangeDiffSuppress, - Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, - }, - "subnetwork_range_name": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used.`, - }, - }, - }, - }, - - "stack_type": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resource_compute_instance_template_validation.StringInSlice([]string{"IPV4_ONLY", "IPV4_IPV6", ""}, false), - Description: `The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used.`, - }, - - "ipv6_access_type": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork.`, - }, - - "ipv6_access_config": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - Description: `An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "network_tier": { - Type: resource_compute_instance_template_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_instance_template_validation.StringInSlice([]string{"PREMIUM"}, false), - Description: `The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6`, - }, - - "public_ptr_domain_name": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The domain name to be used when creating DNSv6 records for the external IPv6 ranges.`, - }, - "external_ipv6": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.`, - }, - "external_ipv6_prefix_length": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The prefix length of the external IPv6 range.`, - }, - }, - }, - }, - "queue_count": { - Type: resource_compute_instance_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.`, - }, - }, - }, - }, - - "project": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "region": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `An instance template is a global resource that is not bound to a zone or a region. However, you can still specify some regional resources in an instance template, which restricts the template to the region where that resource resides. For example, a custom subnetwork resource is tied to a specific region. Defaults to the region of the Provider if no value is given.`, - }, - - "scheduling": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - MaxItems: 1, - Description: `The scheduling strategy to use.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "preemptible": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - AtLeastOneOf: schedulingInstTemplateKeys, - Default: false, - ForceNew: true, - Description: `Allows instance to be preempted. This defaults to false.`, - }, - - "automatic_restart": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - AtLeastOneOf: schedulingInstTemplateKeys, - Default: true, - ForceNew: true, - Description: `Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true.`, - }, - - "on_host_maintenance": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: schedulingInstTemplateKeys, - ForceNew: true, - Description: `Defines the maintenance behavior for this instance.`, - }, - - "node_affinities": { - Type: resource_compute_instance_template_schema.TypeSet, - Optional: true, - AtLeastOneOf: schedulingInstTemplateKeys, - ForceNew: true, - Elem: instanceSchedulingNodeAffinitiesElemSchema(), - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), - Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, - }, - "min_node_cpus": { - Type: resource_compute_instance_template_schema.TypeInt, - Optional: true, - AtLeastOneOf: schedulingInstTemplateKeys, - Description: `Minimum number of cpus for the instance.`, - }, - }, - }, - }, - - "self_link": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - "service_account": { - Type: resource_compute_instance_template_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Description: `Service account to attach to the instance.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "email": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The service account e-mail address. If not given, the default Google Compute Engine service account is used.`, - }, - - "scopes": { - Type: resource_compute_instance_template_schema.TypeSet, - Required: true, - ForceNew: true, - Description: `A list of service scopes. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope.`, - Elem: &resource_compute_instance_template_schema.Schema{ - Type: resource_compute_instance_template_schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - Set: stringScopeHashcode, - }, - }, - }, - }, - - "shielded_instance_config": { - Type: resource_compute_instance_template_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Description: `Enable Shielded VM on this instance. Shielded VM provides verifiable integrity to prevent against malware and rootkits. Defaults to disabled. Note: shielded_instance_config can only be used with boot images with shielded vm support.`, - - Computed: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress(""), - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "enable_secure_boot": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - AtLeastOneOf: shieldedInstanceTemplateConfigKeys, - Default: false, - ForceNew: true, - Description: `Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false.`, - }, - - "enable_vtpm": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - AtLeastOneOf: shieldedInstanceTemplateConfigKeys, - Default: true, - ForceNew: true, - Description: `Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true.`, - }, - - "enable_integrity_monitoring": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - AtLeastOneOf: shieldedInstanceTemplateConfigKeys, - Default: true, - ForceNew: true, - Description: `Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true.`, - }, - }, - }, - }, - "confidential_instance_config": { - Type: resource_compute_instance_template_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "enable_confidential_compute": { - Type: resource_compute_instance_template_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - }, - }, - }, - "advanced_machine_features": { - Type: resource_compute_instance_template_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Description: `Controls for advanced machine-related behavior features.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "enable_nested_virtualization": { - Type: resource_compute_instance_template_schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - Description: `Whether to enable nested virtualization or not.`, - }, - "threads_per_core": { - Type: resource_compute_instance_template_schema.TypeInt, - Optional: true, - Computed: false, - ForceNew: true, - Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, - }, - }, - }, - }, - "guest_accelerator": { - Type: resource_compute_instance_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `List of the type and count of accelerator cards attached to the instance.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "count": { - Type: resource_compute_instance_template_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of the guest accelerator cards exposed to this instance.`, - }, - "type": { - Type: resource_compute_instance_template_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80.`, - }, - }, - }, - }, - - "min_cpu_platform": { - Type: resource_compute_instance_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell or Intel Skylake.`, - }, - - "tags": { - Type: resource_compute_instance_template_schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &resource_compute_instance_template_schema.Schema{Type: resource_compute_instance_template_schema.TypeString}, - Set: resource_compute_instance_template_schema.HashString, - Description: `Tags to attach to the instance.`, - }, - - "tags_fingerprint": { - Type: resource_compute_instance_template_schema.TypeString, - Computed: true, - Description: `The unique fingerprint of the tags.`, - }, - - "labels": { - Type: resource_compute_instance_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &resource_compute_instance_template_schema.Schema{Type: resource_compute_instance_template_schema.TypeString}, - Set: resource_compute_instance_template_schema.HashString, - Description: `A set of key/value label pairs to assign to instances created from this template,`, - }, - - "reservation_affinity": { - Type: resource_compute_instance_template_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Description: `Specifies the reservations that this instance can consume from.`, - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "type": { - Type: resource_compute_instance_template_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_compute_instance_template_validation.StringInSlice([]string{"ANY_RESERVATION", "SPECIFIC_RESERVATION", "NO_RESERVATION"}, false), - Description: `The type of reservation from which this instance can consume resources.`, - }, - - "specific_reservation": { - Type: resource_compute_instance_template_schema.TypeList, - MaxItems: 1, - Optional: true, - ForceNew: true, - Description: `Specifies the label selector for the reservation to use.`, - - Elem: &resource_compute_instance_template_schema.Resource{ - Schema: map[string]*resource_compute_instance_template_schema.Schema{ - "key": { - Type: resource_compute_instance_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value.`, - }, - "values": { - Type: resource_compute_instance_template_schema.TypeList, - Elem: &resource_compute_instance_template_schema.Schema{Type: resource_compute_instance_template_schema.TypeString}, - Required: true, - ForceNew: true, - Description: `Corresponds to the label values of a reservation resource.`, - }, - }, - }, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeInstanceTemplateSourceImageCustomizeDiff(_ resource_compute_instance_template_context.Context, diff *resource_compute_instance_template_schema.ResourceDiff, meta interface{}) error { - config := meta.(*Config) - - numDisks := diff.Get("disk.#").(int) - for i := 0; i < numDisks; i++ { - key := resource_compute_instance_template_fmt.Sprintf("disk.%d.source_image", i) - if diff.HasChange(key) { - var err error - old, new := diff.GetChange(key) - if old == "" || new == "" { - continue - } - - project, err := getProjectFromDiff(diff, config) - if err != nil { - return err - } - oldResolved, err := resolveImage(config, project, old.(string), config.userAgent) - if err != nil { - return err - } - oldResolved, err = resolveImageRefToRelativeURI(project, oldResolved) - if err != nil { - return err - } - newResolved, err := resolveImage(config, project, new.(string), config.userAgent) - if err != nil { - return err - } - newResolved, err = resolveImageRefToRelativeURI(project, newResolved) - if err != nil { - return err - } - if oldResolved != newResolved { - continue - } - err = diff.Clear(key) - if err != nil { - return err - } - } - } - return nil -} - -func resourceComputeInstanceTemplateScratchDiskCustomizeDiff(_ resource_compute_instance_template_context.Context, diff *resource_compute_instance_template_schema.ResourceDiff, meta interface{}) error { - - return resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff) -} - -func resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff TerraformResourceDiff) error { - numDisks := diff.Get("disk.#").(int) - for i := 0; i < numDisks; i++ { - - typee := diff.Get(resource_compute_instance_template_fmt.Sprintf("disk.%d.type", i)).(string) - diskType := diff.Get(resource_compute_instance_template_fmt.Sprintf("disk.%d.disk_type", i)).(string) - if typee == "SCRATCH" && diskType != "local-ssd" { - return resource_compute_instance_template_fmt.Errorf("SCRATCH disks must have a disk_type of local-ssd. disk %d has disk_type %s", i, diskType) - } - - if diskType == "local-ssd" && typee != "SCRATCH" { - return resource_compute_instance_template_fmt.Errorf("disks with a disk_type of local-ssd must be SCRATCH disks. disk %d is a %s disk", i, typee) - } - - diskSize := diff.Get(resource_compute_instance_template_fmt.Sprintf("disk.%d.disk_size_gb", i)).(int) - if typee == "SCRATCH" && diskSize != REQUIRED_SCRATCH_DISK_SIZE_GB { - return resource_compute_instance_template_fmt.Errorf("SCRATCH disks must be exactly %dGB, disk %d is %d", REQUIRED_SCRATCH_DISK_SIZE_GB, i, diskSize) - } - } - - return nil -} - -func resourceComputeInstanceTemplateBootDiskCustomizeDiff(_ resource_compute_instance_template_context.Context, diff *resource_compute_instance_template_schema.ResourceDiff, meta interface{}) error { - numDisks := diff.Get("disk.#").(int) - - for i := 1; i < numDisks; i++ { - key := resource_compute_instance_template_fmt.Sprintf("disk.%d.boot", i) - if v, ok := diff.GetOk(key); ok { - if v.(bool) { - return resource_compute_instance_template_fmt.Errorf("Only the first disk specified in instance_template can be the boot disk. %s was true", key) - } - } - } - return nil -} - -func buildDisks(d *resource_compute_instance_template_schema.ResourceData, config *Config) ([]*resource_compute_instance_template_compute.AttachedDisk, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - disksCount := d.Get("disk.#").(int) - - disks := make([]*resource_compute_instance_template_compute.AttachedDisk, 0, disksCount) - for i := 0; i < disksCount; i++ { - prefix := resource_compute_instance_template_fmt.Sprintf("disk.%d", i) - - var disk resource_compute_instance_template_compute.AttachedDisk - disk.Type = "PERSISTENT" - disk.Mode = "READ_WRITE" - disk.Interface = "SCSI" - disk.Boot = i == 0 - disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) - - if v, ok := d.GetOk(prefix + ".boot"); ok { - disk.Boot = v.(bool) - } - - if v, ok := d.GetOk(prefix + ".device_name"); ok { - disk.DeviceName = v.(string) - } - - if _, ok := d.GetOk(prefix + ".disk_encryption_key"); ok { - disk.DiskEncryptionKey = &resource_compute_instance_template_compute.CustomerEncryptionKey{} - if v, ok := d.GetOk(prefix + ".disk_encryption_key.0.kms_key_self_link"); ok { - disk.DiskEncryptionKey.KmsKeyName = v.(string) - } - } - - if v, ok := d.GetOk(prefix + ".source"); ok { - disk.Source = v.(string) - conflicts := []string{"disk_size_gb", "disk_name", "disk_type", "source_image", "labels"} - for _, conflict := range conflicts { - if _, ok := d.GetOk(prefix + "." + conflict); ok { - return nil, resource_compute_instance_template_fmt.Errorf("Cannot use `source` with any of the fields in %s", conflicts) - } - } - } else { - disk.InitializeParams = &resource_compute_instance_template_compute.AttachedDiskInitializeParams{} - - if v, ok := d.GetOk(prefix + ".disk_name"); ok { - disk.InitializeParams.DiskName = v.(string) - } - if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { - disk.InitializeParams.DiskSizeGb = int64(v.(int)) - } - disk.InitializeParams.DiskType = "pd-standard" - if v, ok := d.GetOk(prefix + ".disk_type"); ok { - disk.InitializeParams.DiskType = v.(string) - } - - if v, ok := d.GetOk(prefix + ".source_image"); ok { - imageName := v.(string) - imageUrl, err := resolveImage(config, project, imageName, userAgent) - if err != nil { - return nil, resource_compute_instance_template_fmt.Errorf( - "Error resolving image name '%s': %s", - imageName, err) - } - disk.InitializeParams.SourceImage = imageUrl - } - - disk.InitializeParams.Labels = expandStringMap(d, prefix+".labels") - - if _, ok := d.GetOk(prefix + ".resource_policies"); ok { - - disk.InitializeParams.ResourcePolicies = convertAndMapStringArr(d.Get(prefix+".resource_policies").([]interface{}), GetResourceNameFromSelfLink) - } - } - - if v, ok := d.GetOk(prefix + ".interface"); ok { - disk.Interface = v.(string) - } - - if v, ok := d.GetOk(prefix + ".mode"); ok { - disk.Mode = v.(string) - } - - if v, ok := d.GetOk(prefix + ".type"); ok { - disk.Type = v.(string) - } - - disks = append(disks, &disk) - } - - return disks, nil -} - -func expandInstanceTemplateGuestAccelerators(d TerraformResourceData, config *Config) []*resource_compute_instance_template_compute.AcceleratorConfig { - configs, ok := d.GetOk("guest_accelerator") - if !ok { - return nil - } - accels := configs.([]interface{}) - guestAccelerators := make([]*resource_compute_instance_template_compute.AcceleratorConfig, 0, len(accels)) - for _, raw := range accels { - data := raw.(map[string]interface{}) - if data["count"].(int) == 0 { - continue - } - guestAccelerators = append(guestAccelerators, &resource_compute_instance_template_compute.AcceleratorConfig{ - AcceleratorCount: int64(data["count"].(int)), - - AcceleratorType: data["type"].(string), - }) - } - - return guestAccelerators -} - -func resourceComputeInstanceTemplateCreate(d *resource_compute_instance_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - disks, err := buildDisks(d, config) - if err != nil { - return err - } - - metadata, err := resourceInstanceMetadata(d) - if err != nil { - return err - } - - networks, err := expandNetworkInterfaces(d, config) - if err != nil { - return err - } - - scheduling, err := expandResourceComputeInstanceTemplateScheduling(d, config) - if err != nil { - return err - } - reservationAffinity, err := expandReservationAffinity(d) - if err != nil { - return err - } - - instanceProperties := &resource_compute_instance_template_compute.InstanceProperties{ - CanIpForward: d.Get("can_ip_forward").(bool), - Description: d.Get("instance_description").(string), - GuestAccelerators: expandInstanceTemplateGuestAccelerators(d, config), - MachineType: d.Get("machine_type").(string), - MinCpuPlatform: d.Get("min_cpu_platform").(string), - Disks: disks, - Metadata: metadata, - NetworkInterfaces: networks, - Scheduling: scheduling, - ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), - Tags: resourceInstanceTags(d), - ConfidentialInstanceConfig: expandConfidentialInstanceConfig(d), - ShieldedInstanceConfig: expandShieldedVmConfigs(d), - AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), - ReservationAffinity: reservationAffinity, - } - - if _, ok := d.GetOk("labels"); ok { - instanceProperties.Labels = expandLabels(d) - } - - var itName string - if v, ok := d.GetOk("name"); ok { - itName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - itName = resource_compute_instance_template_resource.PrefixedUniqueId(v.(string)) - } else { - itName = resource_compute_instance_template_resource.UniqueId() - } - instanceTemplate := &resource_compute_instance_template_compute.InstanceTemplate{ - Description: d.Get("description").(string), - Properties: instanceProperties, - Name: itName, - } - - op, err := config.NewComputeClient(userAgent).InstanceTemplates.Insert(project, instanceTemplate).Do() - if err != nil { - return resource_compute_instance_template_fmt.Errorf("Error creating instance template: %s", err) - } - - d.SetId(resource_compute_instance_template_fmt.Sprintf("projects/%s/global/instanceTemplates/%s", project, instanceTemplate.Name)) - - err = computeOperationWaitTime(config, op, project, "Creating Instance Template", userAgent, d.Timeout(resource_compute_instance_template_schema.TimeoutCreate)) - if err != nil { - return err - } - - return resourceComputeInstanceTemplateRead(d, meta) -} - -type diskCharacteristics struct { - mode string - diskType string - diskSizeGb string - autoDelete bool - sourceImage string -} - -func diskCharacteristicsFromMap(m map[string]interface{}) diskCharacteristics { - dc := diskCharacteristics{} - if v := m["mode"]; v == nil || v.(string) == "" { - - dc.mode = "READ_WRITE" - } else { - dc.mode = v.(string) - } - - if v := m["disk_type"]; v != nil { - dc.diskType = v.(string) - } - - if v := m["disk_size_gb"]; v != nil { - - dc.diskSizeGb = resource_compute_instance_template_fmt.Sprintf("%v", v) - } - - if v := m["auto_delete"]; v != nil { - dc.autoDelete = v.(bool) - } - - if v := m["source_image"]; v != nil { - dc.sourceImage = v.(string) - } - return dc -} - -func flattenDisk(disk *resource_compute_instance_template_compute.AttachedDisk, defaultProject string) (map[string]interface{}, error) { - diskMap := make(map[string]interface{}) - if disk.InitializeParams != nil { - if disk.InitializeParams.SourceImage != "" { - path, err := resolveImageRefToRelativeURI(defaultProject, disk.InitializeParams.SourceImage) - if err != nil { - return nil, resource_compute_instance_template_errwrap.Wrapf("Error expanding source image input to relative URI: {{err}}", err) - } - diskMap["source_image"] = path - } else { - diskMap["source_image"] = "" - } - diskMap["disk_type"] = disk.InitializeParams.DiskType - diskMap["disk_name"] = disk.InitializeParams.DiskName - diskMap["labels"] = disk.InitializeParams.Labels - - if disk.InitializeParams.DiskSizeGb == 0 && disk.Type == "SCRATCH" { - diskMap["disk_size_gb"] = REQUIRED_SCRATCH_DISK_SIZE_GB - } else { - diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb - } - - diskMap["resource_policies"] = disk.InitializeParams.ResourcePolicies - } - - if disk.DiskEncryptionKey != nil { - encryption := make([]map[string]interface{}, 1) - encryption[0] = make(map[string]interface{}) - encryption[0]["kms_key_self_link"] = disk.DiskEncryptionKey.KmsKeyName - diskMap["disk_encryption_key"] = encryption - } - - diskMap["auto_delete"] = disk.AutoDelete - diskMap["boot"] = disk.Boot - diskMap["device_name"] = disk.DeviceName - diskMap["interface"] = disk.Interface - diskMap["source"] = ConvertSelfLinkToV1(disk.Source) - diskMap["mode"] = disk.Mode - diskMap["type"] = disk.Type - - return diskMap, nil -} - -func reorderDisks(configDisks []interface{}, apiDisks []map[string]interface{}) []map[string]interface{} { - if len(apiDisks) != len(configDisks) { - - return apiDisks - } - - result := make([]map[string]interface{}, len(apiDisks)) - - disksByDeviceName := map[string]int{} - scratchDisksByInterface := map[string][]int{} - attachedDisksBySource := map[string]int{} - attachedDisksByDiskName := map[string]int{} - attachedDisksByCharacteristics := []int{} - - for i, d := range configDisks { - if i == 0 { - - continue - } - disk := d.(map[string]interface{}) - if v := disk["device_name"]; v.(string) != "" { - disksByDeviceName[v.(string)] = i - } else if v := disk["type"]; v.(string) == "SCRATCH" { - iface := disk["interface"].(string) - if iface == "" { - - iface = "SCSI" - } - scratchDisksByInterface[iface] = append(scratchDisksByInterface[iface], i) - } else if v := disk["source"]; v.(string) != "" { - attachedDisksBySource[v.(string)] = i - } else if v := disk["disk_name"]; v.(string) != "" { - attachedDisksByDiskName[v.(string)] = i - } else { - attachedDisksByCharacteristics = append(attachedDisksByCharacteristics, i) - } - } - - for _, apiDisk := range apiDisks { - - if apiDisk["boot"].(bool) { - result[0] = apiDisk - - } else if i, ok := disksByDeviceName[apiDisk["device_name"].(string)]; ok { - result[i] = apiDisk - - } else if apiDisk["type"].(string) == "SCRATCH" { - iface := apiDisk["interface"].(string) - indexes := scratchDisksByInterface[iface] - if len(indexes) > 0 { - result[indexes[0]] = apiDisk - scratchDisksByInterface[iface] = indexes[1:] - } else { - result = append(result, apiDisk) - } - - } else if i, ok := attachedDisksBySource[apiDisk["source"].(string)]; ok { - result[i] = apiDisk - - } else if v, ok := apiDisk["disk_name"]; ok && attachedDisksByDiskName[v.(string)] != 0 { - result[attachedDisksByDiskName[v.(string)]] = apiDisk - - } else { - found := false - for arrayIndex, i := range attachedDisksByCharacteristics { - configDisk := configDisks[i].(map[string]interface{}) - stateDc := diskCharacteristicsFromMap(configDisk) - readDc := diskCharacteristicsFromMap(apiDisk) - if resource_compute_instance_template_reflect.DeepEqual(stateDc, readDc) { - result[i] = apiDisk - attachedDisksByCharacteristics = append(attachedDisksByCharacteristics[:arrayIndex], attachedDisksByCharacteristics[arrayIndex+1:]...) - found = true - break - } - } - if !found { - result = append(result, apiDisk) - } - } - } - - ds := []map[string]interface{}{} - for _, d := range result { - if d != nil { - ds = append(ds, d) - } - } - return ds -} - -func flattenDisks(disks []*resource_compute_instance_template_compute.AttachedDisk, d *resource_compute_instance_template_schema.ResourceData, defaultProject string) ([]map[string]interface{}, error) { - apiDisks := make([]map[string]interface{}, len(disks)) - - for i, disk := range disks { - d, err := flattenDisk(disk, defaultProject) - if err != nil { - return nil, err - } - apiDisks[i] = d - } - - return reorderDisks(d.Get("disk").([]interface{}), apiDisks), nil -} - -func resourceComputeInstanceTemplateRead(d *resource_compute_instance_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - splits := resource_compute_instance_template_strings.Split(d.Id(), "/") - instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, splits[len(splits)-1]).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_instance_template_fmt.Sprintf("Instance Template %q", d.Get("name").(string))) - } - - if instanceTemplate.Properties.Metadata != nil { - if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting metadata_fingerprint: %s", err) - } - - md := instanceTemplate.Properties.Metadata - - _md := flattenMetadataBeta(md) - - if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { - if err = d.Set("metadata_startup_script", script); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting metadata_startup_script: %s", err) - } - - delete(_md, "startup-script") - } - - if err = d.Set("metadata", _md); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting metadata: %s", err) - } - } - - if instanceTemplate.Properties.Tags != nil { - if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting tags_fingerprint: %s", err) - } - } else { - if err := d.Set("tags_fingerprint", ""); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting tags_fingerprint: %s", err) - } - } - if instanceTemplate.Properties.Labels != nil { - if err := d.Set("labels", instanceTemplate.Properties.Labels); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting labels: %s", err) - } - } - if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting self_link: %s", err) - } - if err = d.Set("name", instanceTemplate.Name); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting name: %s", err) - } - if instanceTemplate.Properties.Disks != nil { - disks, err := flattenDisks(instanceTemplate.Properties.Disks, d, project) - if err != nil { - return resource_compute_instance_template_fmt.Errorf("error flattening disks: %s", err) - } - if err = d.Set("disk", disks); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting disk: %s", err) - } - } - if err = d.Set("description", instanceTemplate.Description); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting description: %s", err) - } - if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting machine_type: %s", err) - } - if err = d.Set("min_cpu_platform", instanceTemplate.Properties.MinCpuPlatform); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting min_cpu_platform: %s", err) - } - - if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting can_ip_forward: %s", err) - } - - if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting instance_description: %s", err) - } - if err = d.Set("project", project); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting project: %s", err) - } - if instanceTemplate.Properties.NetworkInterfaces != nil { - networkInterfaces, region, _, _, err := flattenNetworkInterfaces(d, config, instanceTemplate.Properties.NetworkInterfaces) - if err != nil { - return err - } - if err = d.Set("network_interface", networkInterfaces); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting network_interface: %s", err) - } - - if region != "" { - if err = d.Set("region", region); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting region: %s", err) - } - } - } - if instanceTemplate.Properties.Scheduling != nil { - scheduling := flattenScheduling(instanceTemplate.Properties.Scheduling) - if err = d.Set("scheduling", scheduling); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting scheduling: %s", err) - } - } - if instanceTemplate.Properties.Tags != nil { - if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting tags: %s", err) - } - } else { - if err = d.Set("tags", nil); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting empty tags: %s", err) - } - } - if instanceTemplate.Properties.ServiceAccounts != nil { - if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting service_account: %s", err) - } - } - if instanceTemplate.Properties.GuestAccelerators != nil { - if err = d.Set("guest_accelerator", flattenGuestAccelerators(instanceTemplate.Properties.GuestAccelerators)); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting guest_accelerator: %s", err) - } - } - if instanceTemplate.Properties.ShieldedInstanceConfig != nil { - if err = d.Set("shielded_instance_config", flattenShieldedVmConfig(instanceTemplate.Properties.ShieldedInstanceConfig)); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting shielded_instance_config: %s", err) - } - } - - if instanceTemplate.Properties.ConfidentialInstanceConfig != nil { - if err = d.Set("confidential_instance_config", flattenConfidentialInstanceConfig(instanceTemplate.Properties.ConfidentialInstanceConfig)); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting confidential_instance_config: %s", err) - } - } - if instanceTemplate.Properties.AdvancedMachineFeatures != nil { - if err = d.Set("advanced_machine_features", flattenAdvancedMachineFeatures(instanceTemplate.Properties.AdvancedMachineFeatures)); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting advanced_machine_features: %s", err) - } - } - - if reservationAffinity := instanceTemplate.Properties.ReservationAffinity; reservationAffinity != nil { - if err = d.Set("reservation_affinity", flattenReservationAffinity(reservationAffinity)); err != nil { - return resource_compute_instance_template_fmt.Errorf("Error setting reservation_affinity: %s", err) - } - } - - return nil -} - -func resourceComputeInstanceTemplateDelete(d *resource_compute_instance_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - splits := resource_compute_instance_template_strings.Split(d.Id(), "/") - op, err := config.NewComputeClient(userAgent).InstanceTemplates.Delete( - project, splits[len(splits)-1]).Do() - if err != nil { - return resource_compute_instance_template_fmt.Errorf("Error deleting instance template: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Deleting Instance Template", userAgent, d.Timeout(resource_compute_instance_template_schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func expandResourceComputeInstanceTemplateScheduling(d *resource_compute_instance_template_schema.ResourceData, meta interface{}) (*resource_compute_instance_template_compute.Scheduling, error) { - v, ok := d.GetOk("scheduling") - if !ok || v == nil { - - return &resource_compute_instance_template_compute.Scheduling{ - OnHostMaintenance: "MIGRATE", - }, nil - } - - expanded, err := expandScheduling(v) - if err != nil { - return nil, err - } - - if expanded.Preemptible && expanded.OnHostMaintenance == "" { - expanded.OnHostMaintenance = "TERMINATE" - } - return expanded, nil -} - -func resourceComputeInstanceTemplateImportState(d *resource_compute_instance_template_schema.ResourceData, meta interface{}) ([]*resource_compute_instance_template_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/global/instanceTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/instanceTemplates/{{name}}") - if err != nil { - return nil, resource_compute_instance_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_instance_template_schema.ResourceData{d}, nil -} - -func resourceComputeInstanceTemplateMigrateState( - v int, is *resource_compute_instance_template_migrate_terraform.InstanceState, meta interface{}) (*resource_compute_instance_template_migrate_terraform.InstanceState, error) { - if is.Empty() { - resource_compute_instance_template_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - resource_compute_instance_template_migrate_log.Println("[INFO] Found Compute Instance Template State v0; migrating to v1") - return migrateComputeInstanceTemplateStateV0toV1(is) - default: - return is, resource_compute_instance_template_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateComputeInstanceTemplateStateV0toV1(is *resource_compute_instance_template_migrate_terraform.InstanceState) (*resource_compute_instance_template_migrate_terraform.InstanceState, error) { - resource_compute_instance_template_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - ar := is.Attributes["automatic_restart"] - delete(is.Attributes, "automatic_restart") - - schedulingCount, ok := is.Attributes["scheduling.#"] - if ok && schedulingCount != "0" && schedulingCount != "1" { - return nil, resource_compute_instance_template_migrate_fmt.Errorf("Found multiple scheduling blocks when there should only be one") - } - - if !ok || schedulingCount == "0" { - - is.Attributes["scheduling.#"] = "1" - is.Attributes["scheduling.0.automatic_restart"] = ar - } - - schedAr := is.Attributes["scheduling.0.automatic_restart"] - if ar != schedAr { - - return nil, resource_compute_instance_template_migrate_fmt.Errorf("Found differing values for automatic_restart in state, unsure how to proceed. automatic_restart = %#v, scheduling.0.automatic_restart = %#v", ar, schedAr) - } - - delete(is.Attributes, "on_host_maintenance") - - resource_compute_instance_template_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func waitForAttachmentToBeProvisioned(d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config, timeout resource_compute_interconnect_attachment_time.Duration) error { - return resource_compute_interconnect_attachment_resource.Retry(timeout, func() *resource_compute_interconnect_attachment_resource.RetryError { - if err := resourceComputeInterconnectAttachmentRead(d, config); err != nil { - return resource_compute_interconnect_attachment_resource.NonRetryableError(err) - } - - name := d.Get("name").(string) - state := d.Get("state").(string) - if state == "UNPROVISIONED" { - return resource_compute_interconnect_attachment_resource.RetryableError(resource_compute_interconnect_attachment_fmt.Errorf("InterconnectAttachment %q has state %q.", name, state)) - } - resource_compute_interconnect_attachment_log.Printf("InterconnectAttachment %q has state %q.", name, state) - return nil - }) -} - -func resourceComputeInterconnectAttachment() *resource_compute_interconnect_attachment_schema.Resource { - return &resource_compute_interconnect_attachment_schema.Resource{ - Create: resourceComputeInterconnectAttachmentCreate, - Read: resourceComputeInterconnectAttachmentRead, - Update: resourceComputeInterconnectAttachmentUpdate, - Delete: resourceComputeInterconnectAttachmentDelete, - - Importer: &resource_compute_interconnect_attachment_schema.ResourceImporter{ - State: resourceComputeInterconnectAttachmentImport, - }, - - Timeouts: &resource_compute_interconnect_attachment_schema.ResourceTimeout{ - Create: resource_compute_interconnect_attachment_schema.DefaultTimeout(10 * resource_compute_interconnect_attachment_time.Minute), - Update: resource_compute_interconnect_attachment_schema.DefaultTimeout(4 * resource_compute_interconnect_attachment_time.Minute), - Delete: resource_compute_interconnect_attachment_schema.DefaultTimeout(10 * resource_compute_interconnect_attachment_time.Minute), - }, - - Schema: map[string]*resource_compute_interconnect_attachment_schema.Schema{ - "name": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z]([-a-z0-9]*[a-z0-9])?$`), - Description: `Name of the resource. Provided by the client when the resource is created. The -name must be 1-63 characters long, and comply with RFC1035. Specifically, the -name must be 1-63 characters long and match the regular expression -'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a -lowercase letter, and all following characters must be a dash, lowercase -letter, or digit, except the last character, which cannot be a dash.`, - }, - "router": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the cloud router to be used for dynamic routing. This router must be in -the same region as this InterconnectAttachment. The InterconnectAttachment will -automatically connect the Interconnect to the network & region within which the -Cloud Router is configured.`, - }, - "admin_enabled": { - Type: resource_compute_interconnect_attachment_schema.TypeBool, - Optional: true, - Description: `Whether the VLAN attachment is enabled or disabled. When using -PARTNER type this will Pre-Activate the interconnect attachment`, - Default: true, - }, - "bandwidth": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_interconnect_attachment_validation.StringInSlice([]string{"BPS_50M", "BPS_100M", "BPS_200M", "BPS_300M", "BPS_400M", "BPS_500M", "BPS_1G", "BPS_2G", "BPS_5G", "BPS_10G", "BPS_20G", "BPS_50G", ""}, false), - Description: `Provisioned bandwidth capacity for the interconnect attachment. -For attachments of type DEDICATED, the user can set the bandwidth. -For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. -Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, -Defaults to BPS_10G Possible values: ["BPS_50M", "BPS_100M", "BPS_200M", "BPS_300M", "BPS_400M", "BPS_500M", "BPS_1G", "BPS_2G", "BPS_5G", "BPS_10G", "BPS_20G", "BPS_50G"]`, - }, - "candidate_subnets": { - Type: resource_compute_interconnect_attachment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Up to 16 candidate prefixes that can be used to restrict the allocation -of cloudRouterIpAddress and customerRouterIpAddress for this attachment. -All prefixes must be within link-local address space (169.254.0.0/16) -and must be /29 or shorter (/28, /27, etc). Google will attempt to select -an unused /29 from the supplied candidate prefix(es). The request will -fail if all possible /29s are in use on Google's edge. If not supplied, -Google will randomly select an unused /29 from all of link-local space.`, - Elem: &resource_compute_interconnect_attachment_schema.Schema{ - Type: resource_compute_interconnect_attachment_schema.TypeString, - }, - }, - "description": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "edge_availability_domain": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Desired availability domain for the attachment. Only available for type -PARTNER, at creation time. For improved reliability, customers should -configure a pair of attachments with one per availability domain. The -selected availability domain will be provided to the Partner via the -pairing key so that the provisioned circuit will lie in the specified -domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY.`, - }, - "encryption": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_interconnect_attachment_validation.StringInSlice([]string{"NONE", "IPSEC", ""}, false), - Description: `Indicates the user-supplied encryption option of this interconnect -attachment: - -NONE is the default value, which means that the attachment carries -unencrypted traffic. VMs can send traffic to, or receive traffic -from, this type of attachment. - -IPSEC indicates that the attachment carries only traffic encrypted by -an IPsec device such as an HA VPN gateway. VMs cannot directly send -traffic to, or receive traffic from, such an attachment. To use -IPsec-encrypted Cloud Interconnect create the attachment using this -option. - -Not currently available publicly. Default value: "NONE" Possible values: ["NONE", "IPSEC"]`, - Default: "NONE", - }, - "interconnect": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the underlying Interconnect object that this attachment's -traffic will traverse through. Required if type is DEDICATED, must not -be set if type is PARTNER.`, - }, - "ipsec_internal_addresses": { - Type: resource_compute_interconnect_attachment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `URL of addresses that have been reserved for the interconnect -attachment, Used only for interconnect attachment that has the -encryption option as IPSEC. - -The addresses must be RFC 1918 IP address ranges. When creating HA -VPN gateway over the interconnect attachment, if the attachment is -configured to use an RFC 1918 IP address, then the VPN gateway's IP -address will be allocated from the IP address range specified -here. - -For example, if the HA VPN gateway's interface 0 is paired to this -interconnect attachment, then an RFC 1918 IP address for the VPN -gateway interface 0 will be allocated from the IP address specified -for this interconnect attachment. - -If this field is not specified for interconnect attachment that has -encryption option as IPSEC, later on when creating HA VPN gateway on -this interconnect attachment, the HA VPN gateway's IP address will be -allocated from regional external IP address pool.`, - Elem: &resource_compute_interconnect_attachment_schema.Schema{ - Type: resource_compute_interconnect_attachment_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "mtu": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Optional: true, - Description: `Maximum Transmission Unit (MTU), in bytes, of packets passing through -this interconnect attachment. Currently, only 1440 and 1500 are allowed. If not specified, the value will default to 1440.`, - }, - "region": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the regional interconnect attachment resides.`, - }, - "type": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_interconnect_attachment_validation.StringInSlice([]string{"DEDICATED", "PARTNER", "PARTNER_PROVIDER", ""}, false), - Description: `The type of InterconnectAttachment you wish to create. Defaults to -DEDICATED. Possible values: ["DEDICATED", "PARTNER", "PARTNER_PROVIDER"]`, - }, - "vlan_tag8021q": { - Type: resource_compute_interconnect_attachment_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When -using PARTNER type this will be managed upstream.`, - }, - "cloud_router_ip_address": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Description: `IPv4 address + prefix length to be configured on Cloud Router -Interface for this interconnect attachment.`, - }, - "creation_timestamp": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "customer_router_ip_address": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Description: `IPv4 address + prefix length to be configured on the customer -router subinterface for this interconnect attachment.`, - }, - "google_reference_id": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Description: `Google reference ID, to be used when raising support tickets with -Google or otherwise to debug backend connectivity issues.`, - }, - "pairing_key": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Description: `[Output only for type PARTNER. Not present for DEDICATED]. The opaque -identifier of an PARTNER attachment used to initiate provisioning with -a selected partner. Of the form "XXXXX/region/domain"`, - }, - "partner_asn": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Description: `[Output only for type PARTNER. Not present for DEDICATED]. Optional -BGP ASN for the router that should be supplied by a layer 3 Partner if -they configured BGP on behalf of the customer.`, - }, - "private_interconnect_info": { - Type: resource_compute_interconnect_attachment_schema.TypeList, - Computed: true, - Description: `Information specific to an InterconnectAttachment. This property -is populated if the interconnect that this is attached to is of type DEDICATED.`, - Elem: &resource_compute_interconnect_attachment_schema.Resource{ - Schema: map[string]*resource_compute_interconnect_attachment_schema.Schema{ - "tag8021q": { - Type: resource_compute_interconnect_attachment_schema.TypeInt, - Computed: true, - Description: `802.1q encapsulation tag to be used for traffic between -Google and the customer, going to and from this network and region.`, - }, - }, - }, - }, - "state": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - Description: `[Output Only] The current state of this attachment's functionality.`, - }, - "project": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_interconnect_attachment_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeInterconnectAttachmentCreate(d *resource_compute_interconnect_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - adminEnabledProp, err := expandComputeInterconnectAttachmentAdminEnabled(d.Get("admin_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admin_enabled"); ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, adminEnabledProp) { - obj["adminEnabled"] = adminEnabledProp - } - interconnectProp, err := expandComputeInterconnectAttachmentInterconnect(d.Get("interconnect"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("interconnect"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(interconnectProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, interconnectProp)) { - obj["interconnect"] = interconnectProp - } - descriptionProp, err := expandComputeInterconnectAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - mtuProp, err := expandComputeInterconnectAttachmentMtu(d.Get("mtu"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mtu"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(mtuProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, mtuProp)) { - obj["mtu"] = mtuProp - } - bandwidthProp, err := expandComputeInterconnectAttachmentBandwidth(d.Get("bandwidth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bandwidth"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(bandwidthProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, bandwidthProp)) { - obj["bandwidth"] = bandwidthProp - } - edgeAvailabilityDomainProp, err := expandComputeInterconnectAttachmentEdgeAvailabilityDomain(d.Get("edge_availability_domain"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_availability_domain"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(edgeAvailabilityDomainProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, edgeAvailabilityDomainProp)) { - obj["edgeAvailabilityDomain"] = edgeAvailabilityDomainProp - } - typeProp, err := expandComputeInterconnectAttachmentType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(typeProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - routerProp, err := expandComputeInterconnectAttachmentRouter(d.Get("router"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("router"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(routerProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, routerProp)) { - obj["router"] = routerProp - } - nameProp, err := expandComputeInterconnectAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(nameProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - candidateSubnetsProp, err := expandComputeInterconnectAttachmentCandidateSubnets(d.Get("candidate_subnets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("candidate_subnets"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(candidateSubnetsProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, candidateSubnetsProp)) { - obj["candidateSubnets"] = candidateSubnetsProp - } - vlanTag8021qProp, err := expandComputeInterconnectAttachmentVlanTag8021q(d.Get("vlan_tag8021q"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vlan_tag8021q"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(vlanTag8021qProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, vlanTag8021qProp)) { - obj["vlanTag8021q"] = vlanTag8021qProp - } - ipsecInternalAddressesProp, err := expandComputeInterconnectAttachmentIpsecInternalAddresses(d.Get("ipsec_internal_addresses"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ipsec_internal_addresses"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(ipsecInternalAddressesProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, ipsecInternalAddressesProp)) { - obj["ipsecInternalAddresses"] = ipsecInternalAddressesProp - } - encryptionProp, err := expandComputeInterconnectAttachmentEncryption(d.Get("encryption"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(encryptionProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, encryptionProp)) { - obj["encryption"] = encryptionProp - } - regionProp, err := expandComputeInterconnectAttachmentRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(regionProp)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments") - if err != nil { - return err - } - - resource_compute_interconnect_attachment_log.Printf("[DEBUG] Creating new InterconnectAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutCreate)) - if err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error creating InterconnectAttachment: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating InterconnectAttachment", userAgent, - d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_interconnect_attachment_fmt.Errorf("Error waiting to create InterconnectAttachment: %s", err) - } - - if err := waitForAttachmentToBeProvisioned(d, config, d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutCreate)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error waiting for InterconnectAttachment %q to be provisioned: %q", d.Get("name").(string), err) - } - - resource_compute_interconnect_attachment_log.Printf("[DEBUG] Finished creating InterconnectAttachment %q: %#v", d.Id(), res) - - return resourceComputeInterconnectAttachmentRead(d, meta) -} - -func resourceComputeInterconnectAttachmentRead(d *resource_compute_interconnect_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_interconnect_attachment_fmt.Sprintf("ComputeInterconnectAttachment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - - if err := d.Set("admin_enabled", flattenComputeInterconnectAttachmentAdminEnabled(res["adminEnabled"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("cloud_router_ip_address", flattenComputeInterconnectAttachmentCloudRouterIpAddress(res["cloudRouterIpAddress"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("customer_router_ip_address", flattenComputeInterconnectAttachmentCustomerRouterIpAddress(res["customerRouterIpAddress"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("interconnect", flattenComputeInterconnectAttachmentInterconnect(res["interconnect"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("description", flattenComputeInterconnectAttachmentDescription(res["description"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("mtu", flattenComputeInterconnectAttachmentMtu(res["mtu"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("bandwidth", flattenComputeInterconnectAttachmentBandwidth(res["bandwidth"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("edge_availability_domain", flattenComputeInterconnectAttachmentEdgeAvailabilityDomain(res["edgeAvailabilityDomain"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("pairing_key", flattenComputeInterconnectAttachmentPairingKey(res["pairingKey"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("partner_asn", flattenComputeInterconnectAttachmentPartnerAsn(res["partnerAsn"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("private_interconnect_info", flattenComputeInterconnectAttachmentPrivateInterconnectInfo(res["privateInterconnectInfo"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("type", flattenComputeInterconnectAttachmentType(res["type"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("state", flattenComputeInterconnectAttachmentState(res["state"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("google_reference_id", flattenComputeInterconnectAttachmentGoogleReferenceId(res["googleReferenceId"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("router", flattenComputeInterconnectAttachmentRouter(res["router"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeInterconnectAttachmentCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("name", flattenComputeInterconnectAttachmentName(res["name"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("vlan_tag8021q", flattenComputeInterconnectAttachmentVlanTag8021q(res["vlanTag8021q"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("ipsec_internal_addresses", flattenComputeInterconnectAttachmentIpsecInternalAddresses(res["ipsecInternalAddresses"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("encryption", flattenComputeInterconnectAttachmentEncryption(res["encryption"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("region", flattenComputeInterconnectAttachmentRegion(res["region"], d, config)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error reading InterconnectAttachment: %s", err) - } - - return nil -} - -func resourceComputeInterconnectAttachmentUpdate(d *resource_compute_interconnect_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - adminEnabledProp, err := expandComputeInterconnectAttachmentAdminEnabled(d.Get("admin_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("admin_enabled"); ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, adminEnabledProp) { - obj["adminEnabled"] = adminEnabledProp - } - descriptionProp, err := expandComputeInterconnectAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - mtuProp, err := expandComputeInterconnectAttachmentMtu(d.Get("mtu"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mtu"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, mtuProp)) { - obj["mtu"] = mtuProp - } - bandwidthProp, err := expandComputeInterconnectAttachmentBandwidth(d.Get("bandwidth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bandwidth"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, bandwidthProp)) { - obj["bandwidth"] = bandwidthProp - } - regionProp, err := expandComputeInterconnectAttachmentRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_interconnect_attachment_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return err - } - - resource_compute_interconnect_attachment_log.Printf("[DEBUG] Updating InterconnectAttachment %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error updating InterconnectAttachment %q: %s", d.Id(), err) - } else { - resource_compute_interconnect_attachment_log.Printf("[DEBUG] Finished updating InterconnectAttachment %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating InterconnectAttachment", userAgent, - d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeInterconnectAttachmentRead(d, meta) -} - -func resourceComputeInterconnectAttachmentDelete(d *resource_compute_interconnect_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error fetching project for InterconnectAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if err := waitForAttachmentToBeProvisioned(d, config, d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutCreate)); err != nil { - return resource_compute_interconnect_attachment_fmt.Errorf("Error waiting for InterconnectAttachment %q to be provisioned: %q", d.Get("name").(string), err) - } - resource_compute_interconnect_attachment_log.Printf("[DEBUG] Deleting InterconnectAttachment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InterconnectAttachment") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting InterconnectAttachment", userAgent, - d.Timeout(resource_compute_interconnect_attachment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_interconnect_attachment_log.Printf("[DEBUG] Finished deleting InterconnectAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeInterconnectAttachmentImport(d *resource_compute_interconnect_attachment_schema.ResourceData, meta interface{}) ([]*resource_compute_interconnect_attachment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/interconnectAttachments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/interconnectAttachments/{{name}}") - if err != nil { - return nil, resource_compute_interconnect_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_interconnect_attachment_schema.ResourceData{d}, nil -} - -func flattenComputeInterconnectAttachmentAdminEnabled(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentCloudRouterIpAddress(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentCustomerRouterIpAddress(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentInterconnect(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentDescription(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentMtu(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - - if floatVal, ok := v.(float64); ok { - return resource_compute_interconnect_attachment_fmt.Sprintf("%d", int(floatVal)) - } - return v -} - -func flattenComputeInterconnectAttachmentBandwidth(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentEdgeAvailabilityDomain(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentPairingKey(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentPartnerAsn(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentPrivateInterconnectInfo(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["tag8021q"] = - flattenComputeInterconnectAttachmentPrivateInterconnectInfoTag8021q(original["tag8021q"], d, config) - return []interface{}{transformed} -} - -func flattenComputeInterconnectAttachmentPrivateInterconnectInfoTag8021q(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_interconnect_attachment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeInterconnectAttachmentType(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentState(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentGoogleReferenceId(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentRouter(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeInterconnectAttachmentCreationTimestamp(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentName(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeInterconnectAttachmentVlanTag8021q(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_interconnect_attachment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeInterconnectAttachmentIpsecInternalAddresses(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeInterconnectAttachmentEncryption(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_interconnect_attachment_reflect.ValueOf(v)) { - return "NONE" - } - - return v -} - -func flattenComputeInterconnectAttachmentRegion(v interface{}, d *resource_compute_interconnect_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeInterconnectAttachmentAdminEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentInterconnect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentMtu(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentBandwidth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentEdgeAvailabilityDomain(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentRouter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("routers", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_interconnect_attachment_fmt.Errorf("Invalid value for router: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeInterconnectAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentCandidateSubnets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentVlanTag8021q(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentIpsecInternalAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_interconnect_attachment_fmt.Errorf("Invalid value for ipsec_internal_addresses: nil") - } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_interconnect_attachment_fmt.Errorf("Invalid value for ipsec_internal_addresses: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeInterconnectAttachmentEncryption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeInterconnectAttachmentRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_interconnect_attachment_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeManagedSslCertificate() *resource_compute_managed_ssl_certificate_schema.Resource { - return &resource_compute_managed_ssl_certificate_schema.Resource{ - Create: resourceComputeManagedSslCertificateCreate, - Read: resourceComputeManagedSslCertificateRead, - Delete: resourceComputeManagedSslCertificateDelete, - - Importer: &resource_compute_managed_ssl_certificate_schema.ResourceImporter{ - State: resourceComputeManagedSslCertificateImport, - }, - - Timeouts: &resource_compute_managed_ssl_certificate_schema.ResourceTimeout{ - Create: resource_compute_managed_ssl_certificate_schema.DefaultTimeout(6 * resource_compute_managed_ssl_certificate_time.Minute), - Delete: resource_compute_managed_ssl_certificate_schema.DefaultTimeout(30 * resource_compute_managed_ssl_certificate_time.Minute), - }, - - Schema: map[string]*resource_compute_managed_ssl_certificate_schema.Schema{ - "description": { - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "managed": { - Type: resource_compute_managed_ssl_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Properties relevant to a managed certificate. These will be used if the -certificate is managed (as indicated by a value of 'MANAGED' in 'type').`, - MaxItems: 1, - Elem: &resource_compute_managed_ssl_certificate_schema.Resource{ - Schema: map[string]*resource_compute_managed_ssl_certificate_schema.Schema{ - "domains": { - Type: resource_compute_managed_ssl_certificate_schema.TypeList, - Required: true, - ForceNew: true, - DiffSuppressFunc: absoluteDomainSuppress, - Description: `Domains for which a managed SSL certificate will be valid. Currently, -there can be up to 100 domains in this list.`, - MaxItems: 100, - Elem: &resource_compute_managed_ssl_certificate_schema.Schema{ - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - }, - }, - }, - }, - }, - "name": { - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash. - - -These are in the same namespace as the managed SSL certificates.`, - }, - "type": { - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_managed_ssl_certificate_validation.StringInSlice([]string{"MANAGED", ""}, false), - Description: `Enum field whose value is always 'MANAGED' - used to signal to the API -which type this is. Default value: "MANAGED" Possible values: ["MANAGED"]`, - Default: "MANAGED", - }, - "certificate_id": { - Type: resource_compute_managed_ssl_certificate_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The unique identifier for the resource.`, - }, - "creation_timestamp": { - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "expire_time": { - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - Computed: true, - Description: `Expire time of the certificate.`, - }, - "subject_alternative_names": { - Type: resource_compute_managed_ssl_certificate_schema.TypeList, - Computed: true, - Description: `Domains associated with the certificate via Subject Alternative Name.`, - Elem: &resource_compute_managed_ssl_certificate_schema.Schema{ - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - }, - }, - "project": { - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_managed_ssl_certificate_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeManagedSslCertificateCreate(d *resource_compute_managed_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeManagedSslCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_managed_ssl_certificate_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_managed_ssl_certificate_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeManagedSslCertificateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_managed_ssl_certificate_reflect.ValueOf(nameProp)) && (ok || !resource_compute_managed_ssl_certificate_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - managedProp, err := expandComputeManagedSslCertificateManaged(d.Get("managed"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("managed"); !isEmptyValue(resource_compute_managed_ssl_certificate_reflect.ValueOf(managedProp)) && (ok || !resource_compute_managed_ssl_certificate_reflect.DeepEqual(v, managedProp)) { - obj["managed"] = managedProp - } - typeProp, err := expandComputeManagedSslCertificateType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_compute_managed_ssl_certificate_reflect.ValueOf(typeProp)) && (ok || !resource_compute_managed_ssl_certificate_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates") - if err != nil { - return err - } - - resource_compute_managed_ssl_certificate_log.Printf("[DEBUG] Creating new ManagedSslCertificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_managed_ssl_certificate_schema.TimeoutCreate)) - if err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error creating ManagedSslCertificate: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating ManagedSslCertificate", userAgent, - d.Timeout(resource_compute_managed_ssl_certificate_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error waiting to create ManagedSslCertificate: %s", err) - } - - resource_compute_managed_ssl_certificate_log.Printf("[DEBUG] Finished creating ManagedSslCertificate %q: %#v", d.Id(), res) - - return resourceComputeManagedSslCertificateRead(d, meta) -} - -func resourceComputeManagedSslCertificateRead(d *resource_compute_managed_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_managed_ssl_certificate_fmt.Sprintf("ComputeManagedSslCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeManagedSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("description", flattenComputeManagedSslCertificateDescription(res["description"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("certificate_id", flattenComputeManagedSslCertificateCertificateId(res["id"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("name", flattenComputeManagedSslCertificateName(res["name"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("managed", flattenComputeManagedSslCertificateManaged(res["managed"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("type", flattenComputeManagedSslCertificateType(res["type"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("subject_alternative_names", flattenComputeManagedSslCertificateSubjectAlternativeNames(res["subjectAlternativeNames"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("expire_time", flattenComputeManagedSslCertificateExpireTime(res["expireTime"], d, config)); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error reading ManagedSslCertificate: %s", err) - } - - return nil -} - -func resourceComputeManagedSslCertificateDelete(d *resource_compute_managed_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_managed_ssl_certificate_fmt.Errorf("Error fetching project for ManagedSslCertificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_managed_ssl_certificate_log.Printf("[DEBUG] Deleting ManagedSslCertificate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_managed_ssl_certificate_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ManagedSslCertificate") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting ManagedSslCertificate", userAgent, - d.Timeout(resource_compute_managed_ssl_certificate_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_managed_ssl_certificate_log.Printf("[DEBUG] Finished deleting ManagedSslCertificate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeManagedSslCertificateImport(d *resource_compute_managed_ssl_certificate_schema.ResourceData, meta interface{}) ([]*resource_compute_managed_ssl_certificate_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/sslCertificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return nil, resource_compute_managed_ssl_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_managed_ssl_certificate_schema.ResourceData{d}, nil -} - -func flattenComputeManagedSslCertificateCreationTimestamp(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateDescription(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateCertificateId(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_managed_ssl_certificate_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeManagedSslCertificateName(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateManaged(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["domains"] = - flattenComputeManagedSslCertificateManagedDomains(original["domains"], d, config) - return []interface{}{transformed} -} - -func flattenComputeManagedSslCertificateManagedDomains(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateType(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateSubjectAlternativeNames(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeManagedSslCertificateExpireTime(v interface{}, d *resource_compute_managed_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeManagedSslCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeManagedSslCertificateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeManagedSslCertificateManaged(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDomains, err := expandComputeManagedSslCertificateManagedDomains(original["domains"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_managed_ssl_certificate_reflect.ValueOf(transformedDomains); val.IsValid() && !isEmptyValue(val) { - transformed["domains"] = transformedDomains - } - - return transformed, nil -} - -func expandComputeManagedSslCertificateManagedDomains(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeManagedSslCertificateType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeNetwork() *resource_compute_network_schema.Resource { - return &resource_compute_network_schema.Resource{ - Create: resourceComputeNetworkCreate, - Read: resourceComputeNetworkRead, - Update: resourceComputeNetworkUpdate, - Delete: resourceComputeNetworkDelete, - - Importer: &resource_compute_network_schema.ResourceImporter{ - State: resourceComputeNetworkImport, - }, - - Timeouts: &resource_compute_network_schema.ResourceTimeout{ - Create: resource_compute_network_schema.DefaultTimeout(4 * resource_compute_network_time.Minute), - Update: resource_compute_network_schema.DefaultTimeout(4 * resource_compute_network_time.Minute), - Delete: resource_compute_network_schema.DefaultTimeout(4 * resource_compute_network_time.Minute), - }, - - Schema: map[string]*resource_compute_network_schema.Schema{ - "name": { - Type: resource_compute_network_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "auto_create_subnetworks": { - Type: resource_compute_network_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `When set to 'true', the network is created in "auto subnet mode" and -it will create a subnet for each region automatically across the -'10.128.0.0/9' address range. - -When set to 'false', the network is created in "custom subnet mode" so -the user can explicitly connect subnetwork resources.`, - Default: true, - }, - "description": { - Type: resource_compute_network_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. The resource must be -recreated to modify this field.`, - }, - "mtu": { - Type: resource_compute_network_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Maximum Transmission Unit in bytes. The minimum value for this field is 1460 -and the maximum value is 1500 bytes.`, - }, - "routing_mode": { - Type: resource_compute_network_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_network_validation.StringInSlice([]string{"REGIONAL", "GLOBAL", ""}, false), - Description: `The network-wide routing mode to use. If set to 'REGIONAL', this -network's cloud routers will only advertise routes with subnetworks -of this network in the same region as the router. If set to 'GLOBAL', -this network's cloud routers will advertise routes with all -subnetworks of this network, across regions. Possible values: ["REGIONAL", "GLOBAL"]`, - }, - - "gateway_ipv4": { - Type: resource_compute_network_schema.TypeString, - Computed: true, - Description: `The gateway address for default routing out of the network. This value -is selected by GCP.`, - }, - "delete_default_routes_on_create": { - Type: resource_compute_network_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_compute_network_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_network_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkCreate(d *resource_compute_network_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeNetworkDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_network_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_network_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeNetworkName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_network_reflect.ValueOf(nameProp)) && (ok || !resource_compute_network_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - autoCreateSubnetworksProp, err := expandComputeNetworkAutoCreateSubnetworks(d.Get("auto_create_subnetworks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auto_create_subnetworks"); ok || !resource_compute_network_reflect.DeepEqual(v, autoCreateSubnetworksProp) { - obj["autoCreateSubnetworks"] = autoCreateSubnetworksProp - } - routingConfigProp, err := expandComputeNetworkRoutingConfig(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_compute_network_reflect.ValueOf(routingConfigProp)) { - obj["routingConfig"] = routingConfigProp - } - mtuProp, err := expandComputeNetworkMtu(d.Get("mtu"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mtu"); !isEmptyValue(resource_compute_network_reflect.ValueOf(mtuProp)) && (ok || !resource_compute_network_reflect.DeepEqual(v, mtuProp)) { - obj["mtu"] = mtuProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks") - if err != nil { - return err - } - - resource_compute_network_log.Printf("[DEBUG] Creating new Network: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_schema.TimeoutCreate)) - if err != nil { - return resource_compute_network_fmt.Errorf("Error creating Network: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") - if err != nil { - return resource_compute_network_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Network", userAgent, - d.Timeout(resource_compute_network_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_network_fmt.Errorf("Error waiting to create Network: %s", err) - } - - if d.Get("delete_default_routes_on_create").(bool) { - token := "" - for paginate := true; paginate; { - network, err := config.NewComputeClient(userAgent).Networks.Get(project, d.Get("name").(string)).Do() - if err != nil { - return resource_compute_network_fmt.Errorf("Error finding network in proj: %s", err) - } - filter := resource_compute_network_fmt.Sprintf("(network=\"%s\") AND (destRange=\"0.0.0.0/0\")", network.SelfLink) - resource_compute_network_log.Printf("[DEBUG] Getting routes for network %q with filter '%q'", d.Get("name").(string), filter) - resp, err := config.NewComputeClient(userAgent).Routes.List(project).Filter(filter).Do() - if err != nil { - return resource_compute_network_fmt.Errorf("Error listing routes in proj: %s", err) - } - - resource_compute_network_log.Printf("[DEBUG] Found %d routes rules in %q network", len(resp.Items), d.Get("name").(string)) - - for _, route := range resp.Items { - op, err := config.NewComputeClient(userAgent).Routes.Delete(project, route.Name).Do() - if err != nil { - return resource_compute_network_fmt.Errorf("Error deleting route: %s", err) - } - err = computeOperationWaitTime(config, op, project, "Deleting Route", userAgent, d.Timeout(resource_compute_network_schema.TimeoutCreate)) - if err != nil { - return err - } - } - - token = resp.NextPageToken - paginate = token != "" - } - } - - resource_compute_network_log.Printf("[DEBUG] Finished creating Network %q: %#v", d.Id(), res) - - return resourceComputeNetworkRead(d, meta) -} - -func resourceComputeNetworkRead(d *resource_compute_network_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_network_fmt.Sprintf("ComputeNetwork %q", d.Id())) - } - - if _, ok := d.GetOkExists("delete_default_routes_on_create"); !ok { - if err := d.Set("delete_default_routes_on_create", false); err != nil { - return resource_compute_network_fmt.Errorf("Error setting delete_default_routes_on_create: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", err) - } - - if err := d.Set("description", flattenComputeNetworkDescription(res["description"], d, config)); err != nil { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("gateway_ipv4", flattenComputeNetworkGatewayIpv4(res["gatewayIPv4"], d, config)); err != nil { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("name", flattenComputeNetworkName(res["name"], d, config)); err != nil { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("auto_create_subnetworks", flattenComputeNetworkAutoCreateSubnetworks(res["autoCreateSubnetworks"], d, config)); err != nil { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", err) - } - - if flattenedProp := flattenComputeNetworkRoutingConfig(res["routingConfig"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_compute_network_googleapi.Error); ok { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_compute_network_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("mtu", flattenComputeNetworkMtu(res["mtu"], d, config)); err != nil { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_network_fmt.Errorf("Error reading Network: %s", err) - } - - return nil -} - -func resourceComputeNetworkUpdate(d *resource_compute_network_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("routing_mode") { - obj := make(map[string]interface{}) - - routingConfigProp, err := expandComputeNetworkRoutingConfig(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_compute_network_reflect.ValueOf(routingConfigProp)) { - obj["routingConfig"] = routingConfigProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_network_fmt.Errorf("Error updating Network %q: %s", d.Id(), err) - } else { - resource_compute_network_log.Printf("[DEBUG] Finished updating Network %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Network", userAgent, - d.Timeout(resource_compute_network_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeNetworkRead(d, meta) -} - -func resourceComputeNetworkDelete(d *resource_compute_network_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_fmt.Errorf("Error fetching project for Network: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_network_log.Printf("[DEBUG] Deleting Network %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Network") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Network", userAgent, - d.Timeout(resource_compute_network_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_network_log.Printf("[DEBUG] Finished deleting Network %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNetworkImport(d *resource_compute_network_schema.ResourceData, meta interface{}) ([]*resource_compute_network_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{name}}") - if err != nil { - return nil, resource_compute_network_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("delete_default_routes_on_create", false); err != nil { - return nil, resource_compute_network_fmt.Errorf("Error setting delete_default_routes_on_create: %s", err) - } - - return []*resource_compute_network_schema.ResourceData{d}, nil -} - -func flattenComputeNetworkDescription(v interface{}, d *resource_compute_network_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkGatewayIpv4(v interface{}, d *resource_compute_network_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkName(v interface{}, d *resource_compute_network_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkAutoCreateSubnetworks(v interface{}, d *resource_compute_network_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkRoutingConfig(v interface{}, d *resource_compute_network_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["routing_mode"] = - flattenComputeNetworkRoutingConfigRoutingMode(original["routingMode"], d, config) - return []interface{}{transformed} -} - -func flattenComputeNetworkRoutingConfigRoutingMode(v interface{}, d *resource_compute_network_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkMtu(v interface{}, d *resource_compute_network_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_network_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandComputeNetworkDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkAutoCreateSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkRoutingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedRoutingMode, err := expandComputeNetworkRoutingConfigRoutingMode(d.Get("routing_mode"), d, config) - if err != nil { - return nil, err - } else if val := resource_compute_network_reflect.ValueOf(transformedRoutingMode); val.IsValid() && !isEmptyValue(val) { - transformed["routingMode"] = transformedRoutingMode - } - - return transformed, nil -} - -func expandComputeNetworkRoutingConfigRoutingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkMtu(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeNetworkEndpoint() *resource_compute_network_endpoint_schema.Resource { - return &resource_compute_network_endpoint_schema.Resource{ - Create: resourceComputeNetworkEndpointCreate, - Read: resourceComputeNetworkEndpointRead, - Delete: resourceComputeNetworkEndpointDelete, - - Importer: &resource_compute_network_endpoint_schema.ResourceImporter{ - State: resourceComputeNetworkEndpointImport, - }, - - Timeouts: &resource_compute_network_endpoint_schema.ResourceTimeout{ - Create: resource_compute_network_endpoint_schema.DefaultTimeout(6 * resource_compute_network_endpoint_time.Minute), - Delete: resource_compute_network_endpoint_schema.DefaultTimeout(6 * resource_compute_network_endpoint_time.Minute), - }, - - Schema: map[string]*resource_compute_network_endpoint_schema.Schema{ - "instance": { - Type: resource_compute_network_endpoint_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name for a specific VM instance that the IP address belongs to. -This is required for network endpoints of type GCE_VM_IP_PORT. -The instance must be in the same zone of network endpoint group.`, - }, - "ip_address": { - Type: resource_compute_network_endpoint_schema.TypeString, - Required: true, - ForceNew: true, - Description: `IPv4 address of network endpoint. The IP address must belong -to a VM in GCE (either the primary IP or as part of an aliased IP -range).`, - }, - "network_endpoint_group": { - Type: resource_compute_network_endpoint_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The network endpoint group this endpoint is part of.`, - }, - "port": { - Type: resource_compute_network_endpoint_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Port number of network endpoint.`, - }, - "zone": { - Type: resource_compute_network_endpoint_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where the containing network endpoint group is located.`, - }, - "project": { - Type: resource_compute_network_endpoint_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkEndpointCreate(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - instanceProp, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(instanceProp)) && (ok || !resource_compute_network_endpoint_reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - portProp, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(portProp)) && (ok || !resource_compute_network_endpoint_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(ipAddressProp)) && (ok || !resource_compute_network_endpoint_reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - - obj, err = resourceComputeNetworkEndpointEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints") - if err != nil { - return err - } - - resource_compute_network_endpoint_log.Printf("[DEBUG] Creating new NetworkEndpoint: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_endpoint_schema.TimeoutCreate)) - if err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error creating NetworkEndpoint: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}") - if err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating NetworkEndpoint", userAgent, - d.Timeout(resource_compute_network_endpoint_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_network_endpoint_fmt.Errorf("Error waiting to create NetworkEndpoint: %s", err) - } - - resource_compute_network_endpoint_log.Printf("[DEBUG] Finished creating NetworkEndpoint %q: %#v", d.Id(), res) - - return resourceComputeNetworkEndpointRead(d, meta) -} - -func resourceComputeNetworkEndpointRead(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_network_endpoint_fmt.Sprintf("ComputeNetworkEndpoint %q", d.Id())) - } - - res, err = flattenNestedComputeNetworkEndpoint(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_network_endpoint_log.Printf("[DEBUG] Removing ComputeNetworkEndpoint because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeNetworkEndpointDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_network_endpoint_log.Printf("[DEBUG] Removing ComputeNetworkEndpoint because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - - if err := d.Set("instance", flattenNestedComputeNetworkEndpointInstance(res["instance"], d, config)); err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - if err := d.Set("port", flattenNestedComputeNetworkEndpointPort(res["port"], d, config)); err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - if err := d.Set("ip_address", flattenNestedComputeNetworkEndpointIpAddress(res["ipAddress"], d, config)); err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error reading NetworkEndpoint: %s", err) - } - - return nil -} - -func resourceComputeNetworkEndpointDelete(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_endpoint_fmt.Errorf("Error fetching project for NetworkEndpoint: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints") - if err != nil { - return err - } - - var obj map[string]interface{} - toDelete := make(map[string]interface{}) - instanceProp, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, config) - if err != nil { - return err - } - toDelete["instance"] = instanceProp - - portProp, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, config) - if err != nil { - return err - } - toDelete["port"] = portProp - - ipAddressProp, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } - toDelete["ipAddress"] = ipAddressProp - - obj = map[string]interface{}{ - "networkEndpoints": []map[string]interface{}{toDelete}, - } - resource_compute_network_endpoint_log.Printf("[DEBUG] Deleting NetworkEndpoint %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_endpoint_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NetworkEndpoint") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting NetworkEndpoint", userAgent, - d.Timeout(resource_compute_network_endpoint_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_network_endpoint_log.Printf("[DEBUG] Finished deleting NetworkEndpoint %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNetworkEndpointImport(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}) ([]*resource_compute_network_endpoint_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}") - if err != nil { - return nil, resource_compute_network_endpoint_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_network_endpoint_schema.ResourceData{d}, nil -} - -func flattenNestedComputeNetworkEndpointInstance(v interface{}, d *resource_compute_network_endpoint_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenNestedComputeNetworkEndpointPort(v interface{}, d *resource_compute_network_endpoint_schema.ResourceData, config *Config) interface{} { - - if floatVal, ok := v.(float64); ok { - return int(floatVal) - } - return v -} - -func flattenNestedComputeNetworkEndpointIpAddress(v interface{}, d *resource_compute_network_endpoint_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeNetworkEndpointInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandNestedComputeNetworkEndpointPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeNetworkEndpointIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeNetworkEndpointEncoder(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if err := d.Set("network_endpoint_group", GetResourceNameFromSelfLink(d.Get("network_endpoint_group").(string))); err != nil { - return nil, resource_compute_network_endpoint_fmt.Errorf("Error setting network_endpoint_group: %s", err) - } - - wrappedReq := map[string]interface{}{ - "networkEndpoints": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputeNetworkEndpoint(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_network_endpoint_fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputeNetworkEndpointFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeNetworkEndpointFindNestedObjectInList(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedInstance, err := expandNestedComputeNetworkEndpointInstance(d.Get("instance"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedInstance := flattenNestedComputeNetworkEndpointInstance(expectedInstance, d, meta.(*Config)) - expectedIpAddress, err := expandNestedComputeNetworkEndpointIpAddress(d.Get("ip_address"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedIpAddress := flattenNestedComputeNetworkEndpointIpAddress(expectedIpAddress, d, meta.(*Config)) - expectedPort, err := expandNestedComputeNetworkEndpointPort(d.Get("port"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPort := flattenNestedComputeNetworkEndpointPort(expectedPort, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - item, err := resourceComputeNetworkEndpointDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemInstance := flattenNestedComputeNetworkEndpointInstance(item["instance"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(itemInstance)) && isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(expectedFlattenedInstance))) && !resource_compute_network_endpoint_reflect.DeepEqual(itemInstance, expectedFlattenedInstance) { - resource_compute_network_endpoint_log.Printf("[DEBUG] Skipping item with instance= %#v, looking for %#v)", itemInstance, expectedFlattenedInstance) - continue - } - itemIpAddress := flattenNestedComputeNetworkEndpointIpAddress(item["ipAddress"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(itemIpAddress)) && isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(expectedFlattenedIpAddress))) && !resource_compute_network_endpoint_reflect.DeepEqual(itemIpAddress, expectedFlattenedIpAddress) { - resource_compute_network_endpoint_log.Printf("[DEBUG] Skipping item with ipAddress= %#v, looking for %#v)", itemIpAddress, expectedFlattenedIpAddress) - continue - } - itemPort := flattenNestedComputeNetworkEndpointPort(item["port"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(itemPort)) && isEmptyValue(resource_compute_network_endpoint_reflect.ValueOf(expectedFlattenedPort))) && !resource_compute_network_endpoint_reflect.DeepEqual(itemPort, expectedFlattenedPort) { - resource_compute_network_endpoint_log.Printf("[DEBUG] Skipping item with port= %#v, looking for %#v)", itemPort, expectedFlattenedPort) - continue - } - resource_compute_network_endpoint_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeNetworkEndpointDecoder(d *resource_compute_network_endpoint_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - v, ok := res["networkEndpoint"] - if !ok || v == nil { - return res, nil - } - - return v.(map[string]interface{}), nil -} - -func resourceComputeNetworkEndpointGroup() *resource_compute_network_endpoint_group_schema.Resource { - return &resource_compute_network_endpoint_group_schema.Resource{ - Create: resourceComputeNetworkEndpointGroupCreate, - Read: resourceComputeNetworkEndpointGroupRead, - Delete: resourceComputeNetworkEndpointGroupDelete, - - Importer: &resource_compute_network_endpoint_group_schema.ResourceImporter{ - State: resourceComputeNetworkEndpointGroupImport, - }, - - Timeouts: &resource_compute_network_endpoint_group_schema.ResourceTimeout{ - Create: resource_compute_network_endpoint_group_schema.DefaultTimeout(4 * resource_compute_network_endpoint_group_time.Minute), - Delete: resource_compute_network_endpoint_group_schema.DefaultTimeout(4 * resource_compute_network_endpoint_group_time.Minute), - }, - - Schema: map[string]*resource_compute_network_endpoint_group_schema.Schema{ - "name": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network to which all network endpoints in the NEG belong. -Uses "default" project network if unspecified.`, - }, - "default_port": { - Type: resource_compute_network_endpoint_group_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The default port used if the port number is not specified in the -network endpoint.`, - }, - "description": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "network_endpoint_type": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_network_endpoint_group_validation.StringInSlice([]string{"GCE_VM_IP_PORT", ""}, false), - Description: `Type of network endpoints in this network endpoint group. Default value: "GCE_VM_IP_PORT" Possible values: ["GCE_VM_IP_PORT"]`, - Default: "GCE_VM_IP_PORT", - }, - "subnetwork": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareOptionalSubnet, - Description: `Optional subnetwork to which all network endpoints in the NEG belong.`, - }, - "zone": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where the network endpoint group is located.`, - }, - "size": { - Type: resource_compute_network_endpoint_group_schema.TypeInt, - Computed: true, - Description: `Number of network endpoints in the network endpoint group.`, - }, - "project": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_network_endpoint_group_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkEndpointGroupCreate(d *resource_compute_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeNetworkEndpointGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_network_endpoint_group_reflect.ValueOf(nameProp)) && (ok || !resource_compute_network_endpoint_group_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeNetworkEndpointGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_network_endpoint_group_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_network_endpoint_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - networkEndpointTypeProp, err := expandComputeNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_endpoint_type"); !isEmptyValue(resource_compute_network_endpoint_group_reflect.ValueOf(networkEndpointTypeProp)) && (ok || !resource_compute_network_endpoint_group_reflect.DeepEqual(v, networkEndpointTypeProp)) { - obj["networkEndpointType"] = networkEndpointTypeProp - } - networkProp, err := expandComputeNetworkEndpointGroupNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_network_endpoint_group_reflect.ValueOf(networkProp)) && (ok || !resource_compute_network_endpoint_group_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - subnetworkProp, err := expandComputeNetworkEndpointGroupSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); !isEmptyValue(resource_compute_network_endpoint_group_reflect.ValueOf(subnetworkProp)) && (ok || !resource_compute_network_endpoint_group_reflect.DeepEqual(v, subnetworkProp)) { - obj["subnetwork"] = subnetworkProp - } - defaultPortProp, err := expandComputeNetworkEndpointGroupDefaultPort(d.Get("default_port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_port"); !isEmptyValue(resource_compute_network_endpoint_group_reflect.ValueOf(defaultPortProp)) && (ok || !resource_compute_network_endpoint_group_reflect.DeepEqual(v, defaultPortProp)) { - obj["defaultPort"] = defaultPortProp - } - zoneProp, err := expandComputeNetworkEndpointGroupZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_network_endpoint_group_reflect.ValueOf(zoneProp)) && (ok || !resource_compute_network_endpoint_group_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups") - if err != nil { - return err - } - - resource_compute_network_endpoint_group_log.Printf("[DEBUG] Creating new NetworkEndpointGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_endpoint_group_schema.TimeoutCreate)) - if err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error creating NetworkEndpointGroup: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating NetworkEndpointGroup", userAgent, - d.Timeout(resource_compute_network_endpoint_group_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_network_endpoint_group_fmt.Errorf("Error waiting to create NetworkEndpointGroup: %s", err) - } - - resource_compute_network_endpoint_group_log.Printf("[DEBUG] Finished creating NetworkEndpointGroup %q: %#v", d.Id(), res) - - return resourceComputeNetworkEndpointGroupRead(d, meta) -} - -func resourceComputeNetworkEndpointGroupRead(d *resource_compute_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_network_endpoint_group_fmt.Sprintf("ComputeNetworkEndpointGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - - if err := d.Set("name", flattenComputeNetworkEndpointGroupName(res["name"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("description", flattenComputeNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("network_endpoint_type", flattenComputeNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("size", flattenComputeNetworkEndpointGroupSize(res["size"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("network", flattenComputeNetworkEndpointGroupNetwork(res["network"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("subnetwork", flattenComputeNetworkEndpointGroupSubnetwork(res["subnetwork"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("default_port", flattenComputeNetworkEndpointGroupDefaultPort(res["defaultPort"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("zone", flattenComputeNetworkEndpointGroupZone(res["zone"], d, config)); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error reading NetworkEndpointGroup: %s", err) - } - - return nil -} - -func resourceComputeNetworkEndpointGroupDelete(d *resource_compute_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_endpoint_group_fmt.Errorf("Error fetching project for NetworkEndpointGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_network_endpoint_group_log.Printf("[DEBUG] Deleting NetworkEndpointGroup %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_endpoint_group_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NetworkEndpointGroup") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting NetworkEndpointGroup", userAgent, - d.Timeout(resource_compute_network_endpoint_group_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_network_endpoint_group_log.Printf("[DEBUG] Finished deleting NetworkEndpointGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNetworkEndpointGroupImport(d *resource_compute_network_endpoint_group_schema.ResourceData, meta interface{}) ([]*resource_compute_network_endpoint_group_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{name}}") - if err != nil { - return nil, resource_compute_network_endpoint_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_network_endpoint_group_schema.ResourceData{d}, nil -} - -func flattenComputeNetworkEndpointGroupName(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkEndpointGroupDescription(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkEndpointGroupNetworkEndpointType(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNetworkEndpointGroupSize(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_network_endpoint_group_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeNetworkEndpointGroupNetwork(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeNetworkEndpointGroupSubnetwork(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeNetworkEndpointGroupDefaultPort(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_network_endpoint_group_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeNetworkEndpointGroupZone(v interface{}, d *resource_compute_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeNetworkEndpointGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupNetworkEndpointType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_network_endpoint_group_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeNetworkEndpointGroupSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_network_endpoint_group_fmt.Errorf("Invalid value for subnetwork: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeNetworkEndpointGroupDefaultPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNetworkEndpointGroupZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_network_endpoint_group_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -const peerNetworkLinkRegex = "projects/(" + ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" - -func resourceComputeNetworkPeering() *resource_compute_network_peering_schema.Resource { - return &resource_compute_network_peering_schema.Resource{ - Create: resourceComputeNetworkPeeringCreate, - Read: resourceComputeNetworkPeeringRead, - Update: resourceComputeNetworkPeeringUpdate, - Delete: resourceComputeNetworkPeeringDelete, - Importer: &resource_compute_network_peering_schema.ResourceImporter{ - State: resourceComputeNetworkPeeringImport, - }, - - Timeouts: &resource_compute_network_peering_schema.ResourceTimeout{ - Create: resource_compute_network_peering_schema.DefaultTimeout(4 * resource_compute_network_peering_time.Minute), - Update: resource_compute_network_peering_schema.DefaultTimeout(4 * resource_compute_network_peering_time.Minute), - Delete: resource_compute_network_peering_schema.DefaultTimeout(4 * resource_compute_network_peering_time.Minute), - }, - - Schema: map[string]*resource_compute_network_peering_schema.Schema{ - "name": { - Type: resource_compute_network_peering_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the peering.`, - }, - - "network": { - Type: resource_compute_network_peering_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(peerNetworkLinkRegex), - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The primary network of the peering.`, - }, - - "peer_network": { - Type: resource_compute_network_peering_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(peerNetworkLinkRegex), - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The peer network in the peering. The peer network may belong to a different project.`, - }, - - "export_custom_routes": { - Type: resource_compute_network_peering_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether to export the custom routes to the peer network. Defaults to false.`, - }, - - "import_custom_routes": { - Type: resource_compute_network_peering_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether to export the custom routes from the peer network. Defaults to false.`, - }, - - "export_subnet_routes_with_public_ip": { - Type: resource_compute_network_peering_schema.TypeBool, - ForceNew: true, - Optional: true, - Default: true, - }, - - "import_subnet_routes_with_public_ip": { - Type: resource_compute_network_peering_schema.TypeBool, - ForceNew: true, - Optional: true, - }, - - "state": { - Type: resource_compute_network_peering_schema.TypeString, - Computed: true, - Description: `State for the peering, either ACTIVE or INACTIVE. The peering is ACTIVE when there's a matching configuration in the peer network.`, - }, - - "state_details": { - Type: resource_compute_network_peering_schema.TypeString, - Computed: true, - Description: `Details about the current state of the peering.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkPeeringCreate(d *resource_compute_network_peering_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) - if err != nil { - return err - } - - request := &resource_compute_network_peering_compute.NetworksAddPeeringRequest{} - request.NetworkPeering = expandNetworkPeering(d) - - peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) - for _, kn := range peeringLockNames { - mutexKV.Lock(kn) - defer mutexKV.Unlock(kn) - } - - addOp, err := config.NewComputeClient(userAgent).Networks.AddPeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() - if err != nil { - return resource_compute_network_peering_fmt.Errorf("Error adding network peering: %s", err) - } - - err = computeOperationWaitTime(config, addOp, networkFieldValue.Project, "Adding Network Peering", userAgent, d.Timeout(resource_compute_network_peering_schema.TimeoutCreate)) - if err != nil { - return err - } - - d.SetId(resource_compute_network_peering_fmt.Sprintf("%s/%s", networkFieldValue.Name, d.Get("name").(string))) - - return resourceComputeNetworkPeeringRead(d, meta) -} - -func resourceComputeNetworkPeeringRead(d *resource_compute_network_peering_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - peeringName := d.Get("name").(string) - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - - network, err := config.NewComputeClient(userAgent).Networks.Get(networkFieldValue.Project, networkFieldValue.Name).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_network_peering_fmt.Sprintf("Network %q", networkFieldValue.Name)) - } - - peering := findPeeringFromNetwork(network, peeringName) - if peering == nil { - resource_compute_network_peering_log.Printf("[WARN] Removing network peering %s from network %s because it's gone", peeringName, network.Name) - d.SetId("") - return nil - } - - if err := d.Set("peer_network", peering.Network); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting peer_network: %s", err) - } - if err := d.Set("name", peering.Name); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("import_custom_routes", peering.ImportCustomRoutes); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting import_custom_routes: %s", err) - } - if err := d.Set("export_custom_routes", peering.ExportCustomRoutes); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting export_custom_routes: %s", err) - } - if err := d.Set("import_subnet_routes_with_public_ip", peering.ImportSubnetRoutesWithPublicIp); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting import_subnet_routes_with_public_ip: %s", err) - } - if err := d.Set("export_subnet_routes_with_public_ip", peering.ExportSubnetRoutesWithPublicIp); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting export_subnet_routes_with_public_ip: %s", err) - } - if err := d.Set("state", peering.State); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting state: %s", err) - } - if err := d.Set("state_details", peering.StateDetails); err != nil { - return resource_compute_network_peering_fmt.Errorf("Error setting state_details: %s", err) - } - - return nil -} - -func resourceComputeNetworkPeeringUpdate(d *resource_compute_network_peering_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) - if err != nil { - return err - } - - request := &resource_compute_network_peering_compute.NetworksUpdatePeeringRequest{} - request.NetworkPeering = expandNetworkPeering(d) - - peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) - for _, kn := range peeringLockNames { - mutexKV.Lock(kn) - defer mutexKV.Unlock(kn) - } - - updateOp, err := config.NewComputeClient(userAgent).Networks.UpdatePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() - if err != nil { - return resource_compute_network_peering_fmt.Errorf("Error updating network peering: %s", err) - } - - err = computeOperationWaitTime(config, updateOp, networkFieldValue.Project, "Updating Network Peering", userAgent, d.Timeout(resource_compute_network_peering_schema.TimeoutUpdate)) - if err != nil { - return err - } - - return resourceComputeNetworkPeeringRead(d, meta) -} - -func resourceComputeNetworkPeeringDelete(d *resource_compute_network_peering_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - name := d.Get("name").(string) - networkFieldValue, err := ParseNetworkFieldValue(d.Get("network").(string), d, config) - if err != nil { - return err - } - peerNetworkFieldValue, err := ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) - if err != nil { - return err - } - - request := &resource_compute_network_peering_compute.NetworksRemovePeeringRequest{ - Name: name, - } - - peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) - for _, kn := range peeringLockNames { - mutexKV.Lock(kn) - defer mutexKV.Unlock(kn) - } - - removeOp, err := config.NewComputeClient(userAgent).Networks.RemovePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_network_peering_googleapi.Error); ok && gerr.Code == 404 { - resource_compute_network_peering_log.Printf("[WARN] Peering `%s` already removed from network `%s`", name, networkFieldValue.Name) - } else { - return resource_compute_network_peering_fmt.Errorf("Error removing peering `%s` from network `%s`: %s", name, networkFieldValue.Name, err) - } - } else { - err = computeOperationWaitTime(config, removeOp, networkFieldValue.Project, "Removing Network Peering", userAgent, d.Timeout(resource_compute_network_peering_schema.TimeoutDelete)) - if err != nil { - return err - } - } - - return nil -} - -func findPeeringFromNetwork(network *resource_compute_network_peering_compute.Network, peeringName string) *resource_compute_network_peering_compute.NetworkPeering { - for _, p := range network.Peerings { - if p.Name == peeringName { - return p - } - } - return nil -} - -func expandNetworkPeering(d *resource_compute_network_peering_schema.ResourceData) *resource_compute_network_peering_compute.NetworkPeering { - return &resource_compute_network_peering_compute.NetworkPeering{ - ExchangeSubnetRoutes: true, - Name: d.Get("name").(string), - Network: d.Get("peer_network").(string), - ExportCustomRoutes: d.Get("export_custom_routes").(bool), - ImportCustomRoutes: d.Get("import_custom_routes").(bool), - ExportSubnetRoutesWithPublicIp: d.Get("export_subnet_routes_with_public_ip").(bool), - ImportSubnetRoutesWithPublicIp: d.Get("import_subnet_routes_with_public_ip").(bool), - ForceSendFields: []string{"ExportSubnetRoutesWithPublicIp", "ImportCustomRoutes", "ExportCustomRoutes"}, - } -} - -func sortedNetworkPeeringMutexKeys(networkName, peerNetworkName *GlobalFieldValue) []string { - - networks := []string{ - resource_compute_network_peering_fmt.Sprintf("%s/peerings", networkName.RelativeLink()), - resource_compute_network_peering_fmt.Sprintf("%s/peerings", peerNetworkName.RelativeLink()), - } - resource_compute_network_peering_sort.Strings(networks) - return networks -} - -func resourceComputeNetworkPeeringImport(d *resource_compute_network_peering_schema.ResourceData, meta interface{}) ([]*resource_compute_network_peering_schema.ResourceData, error) { - config := meta.(*Config) - splits := resource_compute_network_peering_strings.Split(d.Id(), "/") - if len(splits) != 3 { - return nil, resource_compute_network_peering_fmt.Errorf("Error parsing network peering import format, expected: {project}/{network}/{name}") - } - project := splits[0] - network := splits[1] - name := splits[2] - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - net, err := config.NewComputeClient(userAgent).Networks.Get(project, network).Do() - if err != nil { - return nil, handleNotFoundError(err, d, resource_compute_network_peering_fmt.Sprintf("Network %q", splits[1])) - } - - if err := d.Set("network", ConvertSelfLinkToV1(net.SelfLink)); err != nil { - return nil, resource_compute_network_peering_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("name", name); err != nil { - return nil, resource_compute_network_peering_fmt.Errorf("Error setting name: %s", err) - } - - id := resource_compute_network_peering_fmt.Sprintf("%s/%s", network, name) - d.SetId(id) - - return []*resource_compute_network_peering_schema.ResourceData{d}, nil -} - -func resourceComputeNetworkPeeringRoutesConfig() *resource_compute_network_peering_routes_config_schema.Resource { - return &resource_compute_network_peering_routes_config_schema.Resource{ - Create: resourceComputeNetworkPeeringRoutesConfigCreate, - Read: resourceComputeNetworkPeeringRoutesConfigRead, - Update: resourceComputeNetworkPeeringRoutesConfigUpdate, - Delete: resourceComputeNetworkPeeringRoutesConfigDelete, - - Importer: &resource_compute_network_peering_routes_config_schema.ResourceImporter{ - State: resourceComputeNetworkPeeringRoutesConfigImport, - }, - - Timeouts: &resource_compute_network_peering_routes_config_schema.ResourceTimeout{ - Create: resource_compute_network_peering_routes_config_schema.DefaultTimeout(6 * resource_compute_network_peering_routes_config_time.Minute), - Update: resource_compute_network_peering_routes_config_schema.DefaultTimeout(6 * resource_compute_network_peering_routes_config_time.Minute), - Delete: resource_compute_network_peering_routes_config_schema.DefaultTimeout(6 * resource_compute_network_peering_routes_config_time.Minute), - }, - - Schema: map[string]*resource_compute_network_peering_routes_config_schema.Schema{ - "export_custom_routes": { - Type: resource_compute_network_peering_routes_config_schema.TypeBool, - Required: true, - Description: `Whether to export the custom routes to the peer network.`, - }, - "import_custom_routes": { - Type: resource_compute_network_peering_routes_config_schema.TypeBool, - Required: true, - Description: `Whether to import the custom routes to the peer network.`, - }, - "network": { - Type: resource_compute_network_peering_routes_config_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the primary network for the peering.`, - }, - "peering": { - Type: resource_compute_network_peering_routes_config_schema.TypeString, - Required: true, - Description: `Name of the peering.`, - }, - "project": { - Type: resource_compute_network_peering_routes_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNetworkPeeringRoutesConfigCreate(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering"); !isEmptyValue(resource_compute_network_peering_routes_config_reflect.ValueOf(nameProp)) && (ok || !resource_compute_network_peering_routes_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - exportCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(d.Get("export_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("export_custom_routes"); ok || !resource_compute_network_peering_routes_config_reflect.DeepEqual(v, exportCustomRoutesProp) { - obj["exportCustomRoutes"] = exportCustomRoutesProp - } - importCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(d.Get("import_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_custom_routes"); ok || !resource_compute_network_peering_routes_config_reflect.DeepEqual(v, importCustomRoutesProp) { - obj["importCustomRoutes"] = importCustomRoutesProp - } - - obj, err = resourceComputeNetworkPeeringRoutesConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}/updatePeering") - if err != nil { - return err - } - - resource_compute_network_peering_routes_config_log.Printf("[DEBUG] Creating new NetworkPeeringRoutesConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_peering_routes_config_schema.TimeoutCreate)) - if err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error creating NetworkPeeringRoutesConfig: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}") - if err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating NetworkPeeringRoutesConfig", userAgent, - d.Timeout(resource_compute_network_peering_routes_config_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_network_peering_routes_config_fmt.Errorf("Error waiting to create NetworkPeeringRoutesConfig: %s", err) - } - - resource_compute_network_peering_routes_config_log.Printf("[DEBUG] Finished creating NetworkPeeringRoutesConfig %q: %#v", d.Id(), res) - - return resourceComputeNetworkPeeringRoutesConfigRead(d, meta) -} - -func resourceComputeNetworkPeeringRoutesConfigRead(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_network_peering_routes_config_fmt.Sprintf("ComputeNetworkPeeringRoutesConfig %q", d.Id())) - } - - res, err = flattenNestedComputeNetworkPeeringRoutesConfig(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_network_peering_routes_config_log.Printf("[DEBUG] Removing ComputeNetworkPeeringRoutesConfig because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - - if err := d.Set("peering", flattenNestedComputeNetworkPeeringRoutesConfigPeering(res["name"], d, config)); err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - if err := d.Set("export_custom_routes", flattenNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(res["exportCustomRoutes"], d, config)); err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - if err := d.Set("import_custom_routes", flattenNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(res["importCustomRoutes"], d, config)); err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error reading NetworkPeeringRoutesConfig: %s", err) - } - - return nil -} - -func resourceComputeNetworkPeeringRoutesConfigUpdate(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error fetching project for NetworkPeeringRoutesConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering"); !isEmptyValue(resource_compute_network_peering_routes_config_reflect.ValueOf(v)) && (ok || !resource_compute_network_peering_routes_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - exportCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(d.Get("export_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("export_custom_routes"); ok || !resource_compute_network_peering_routes_config_reflect.DeepEqual(v, exportCustomRoutesProp) { - obj["exportCustomRoutes"] = exportCustomRoutesProp - } - importCustomRoutesProp, err := expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(d.Get("import_custom_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_custom_routes"); ok || !resource_compute_network_peering_routes_config_reflect.DeepEqual(v, importCustomRoutesProp) { - obj["importCustomRoutes"] = importCustomRoutesProp - } - - obj, err = resourceComputeNetworkPeeringRoutesConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/networks/{{network}}/updatePeering") - if err != nil { - return err - } - - resource_compute_network_peering_routes_config_log.Printf("[DEBUG] Updating NetworkPeeringRoutesConfig %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_network_peering_routes_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_network_peering_routes_config_fmt.Errorf("Error updating NetworkPeeringRoutesConfig %q: %s", d.Id(), err) - } else { - resource_compute_network_peering_routes_config_log.Printf("[DEBUG] Finished updating NetworkPeeringRoutesConfig %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating NetworkPeeringRoutesConfig", userAgent, - d.Timeout(resource_compute_network_peering_routes_config_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeNetworkPeeringRoutesConfigRead(d, meta) -} - -func resourceComputeNetworkPeeringRoutesConfigDelete(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}) error { - resource_compute_network_peering_routes_config_log.Printf("[WARNING] Compute NetworkPeeringRoutesConfig resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceComputeNetworkPeeringRoutesConfigImport(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}) ([]*resource_compute_network_peering_routes_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/networks/(?P[^/]+)/networkPeerings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}") - if err != nil { - return nil, resource_compute_network_peering_routes_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_network_peering_routes_config_schema.ResourceData{d}, nil -} - -func flattenNestedComputeNetworkPeeringRoutesConfigPeering(v interface{}, d *resource_compute_network_peering_routes_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(v interface{}, d *resource_compute_network_peering_routes_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(v interface{}, d *resource_compute_network_peering_routes_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeNetworkPeeringRoutesConfigPeering(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeNetworkPeeringRoutesConfigExportCustomRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeNetworkPeeringRoutesConfigImportCustomRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeNetworkPeeringRoutesConfigEncoder(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - newObj := make(map[string]interface{}) - newObj["networkPeering"] = obj - return newObj, nil -} - -func flattenNestedComputeNetworkPeeringRoutesConfig(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["peerings"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_network_peering_routes_config_fmt.Errorf("expected list or map for value peerings. Actual value: %v", v) - } - - _, item, err := resourceComputeNetworkPeeringRoutesConfigFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeNetworkPeeringRoutesConfigFindNestedObjectInList(d *resource_compute_network_peering_routes_config_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedPeering, err := expandNestedComputeNetworkPeeringRoutesConfigPeering(d.Get("peering"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedPeering := flattenNestedComputeNetworkPeeringRoutesConfigPeering(expectedPeering, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemPeering := flattenNestedComputeNetworkPeeringRoutesConfigPeering(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_network_peering_routes_config_reflect.ValueOf(itemPeering)) && isEmptyValue(resource_compute_network_peering_routes_config_reflect.ValueOf(expectedFlattenedPeering))) && !resource_compute_network_peering_routes_config_reflect.DeepEqual(itemPeering, expectedFlattenedPeering) { - resource_compute_network_peering_routes_config_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemPeering, expectedFlattenedPeering) - continue - } - resource_compute_network_peering_routes_config_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeNodeGroup() *resource_compute_node_group_schema.Resource { - return &resource_compute_node_group_schema.Resource{ - Create: resourceComputeNodeGroupCreate, - Read: resourceComputeNodeGroupRead, - Update: resourceComputeNodeGroupUpdate, - Delete: resourceComputeNodeGroupDelete, - - Importer: &resource_compute_node_group_schema.ResourceImporter{ - State: resourceComputeNodeGroupImport, - }, - - Timeouts: &resource_compute_node_group_schema.ResourceTimeout{ - Create: resource_compute_node_group_schema.DefaultTimeout(4 * resource_compute_node_group_time.Minute), - Update: resource_compute_node_group_schema.DefaultTimeout(4 * resource_compute_node_group_time.Minute), - Delete: resource_compute_node_group_schema.DefaultTimeout(4 * resource_compute_node_group_time.Minute), - }, - - Schema: map[string]*resource_compute_node_group_schema.Schema{ - "node_template": { - Type: resource_compute_node_group_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the node template to which this node group belongs.`, - }, - "autoscaling_policy": { - Type: resource_compute_node_group_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `If you use sole-tenant nodes for your workloads, you can use the node -group autoscaler to automatically manage the sizes of your node groups.`, - MaxItems: 1, - Elem: &resource_compute_node_group_schema.Resource{ - Schema: map[string]*resource_compute_node_group_schema.Schema{ - "max_nodes": { - Type: resource_compute_node_group_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Maximum size of the node group. Set to a value less than or equal -to 100 and greater than or equal to min-nodes.`, - }, - "mode": { - Type: resource_compute_node_group_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_node_group_validation.StringInSlice([]string{"OFF", "ON", "ONLY_SCALE_OUT"}, false), - Description: `The autoscaling mode. Set to one of the following: - - OFF: Disables the autoscaler. - - ON: Enables scaling in and scaling out. - - ONLY_SCALE_OUT: Enables only scaling out. - You must use this mode if your node groups are configured to - restart their hosted VMs on minimal servers. Possible values: ["OFF", "ON", "ONLY_SCALE_OUT"]`, - }, - "min_nodes": { - Type: resource_compute_node_group_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Minimum size of the node group. Must be less -than or equal to max-nodes. The default value is 0.`, - }, - }, - }, - }, - "description": { - Type: resource_compute_node_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional textual description of the resource.`, - }, - "initial_size": { - Type: resource_compute_node_group_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The initial number of nodes in the node group. One of 'initial_size' or 'size' must be specified.`, - ExactlyOneOf: []string{"size", "initial_size"}, - }, - "maintenance_policy": { - Type: resource_compute_node_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT.`, - Default: "DEFAULT", - }, - "maintenance_window": { - Type: resource_compute_node_group_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `contains properties for the timeframe of maintenance`, - MaxItems: 1, - Elem: &resource_compute_node_group_schema.Resource{ - Schema: map[string]*resource_compute_node_group_schema.Schema{ - "start_time": { - Type: resource_compute_node_group_schema.TypeString, - Required: true, - ForceNew: true, - Description: `instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid.`, - }, - }, - }, - }, - "name": { - Type: resource_compute_node_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the resource.`, - }, - "size": { - Type: resource_compute_node_group_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The total number of nodes in the node group. One of 'initial_size' or 'size' must be specified.`, - ExactlyOneOf: []string{"size", "initial_size"}, - }, - "zone": { - Type: resource_compute_node_group_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where this node group is located`, - }, - "creation_timestamp": { - Type: resource_compute_node_group_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_node_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_node_group_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNodeGroupCreate(d *resource_compute_node_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeNodeGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeNodeGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(nameProp)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - nodeTemplateProp, err := expandComputeNodeGroupNodeTemplate(d.Get("node_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_template"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(nodeTemplateProp)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, nodeTemplateProp)) { - obj["nodeTemplate"] = nodeTemplateProp - } - sizeProp, err := expandComputeNodeGroupSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); ok || !resource_compute_node_group_reflect.DeepEqual(v, sizeProp) { - obj["size"] = sizeProp - } - maintenancePolicyProp, err := expandComputeNodeGroupMaintenancePolicy(d.Get("maintenance_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_policy"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(maintenancePolicyProp)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, maintenancePolicyProp)) { - obj["maintenancePolicy"] = maintenancePolicyProp - } - maintenanceWindowProp, err := expandComputeNodeGroupMaintenanceWindow(d.Get("maintenance_window"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("maintenance_window"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(maintenanceWindowProp)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, maintenanceWindowProp)) { - obj["maintenanceWindow"] = maintenanceWindowProp - } - autoscalingPolicyProp, err := expandComputeNodeGroupAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(autoscalingPolicyProp)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, autoscalingPolicyProp)) { - obj["autoscalingPolicy"] = autoscalingPolicyProp - } - zoneProp, err := expandComputeNodeGroupZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(zoneProp)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups?initialNodeCount=PRE_CREATE_REPLACE_ME") - if err != nil { - return err - } - - resource_compute_node_group_log.Printf("[DEBUG] Creating new NodeGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_node_group_fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - var sizeParam string - if v, ok := d.GetOkExists("size"); ok { - sizeParam = resource_compute_node_group_fmt.Sprintf("%v", v) - } else if v, ok := d.GetOkExists("initial_size"); ok { - sizeParam = resource_compute_node_group_fmt.Sprintf("%v", v) - } - - url = resource_compute_node_group_regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sizeParam) - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_node_group_schema.TimeoutCreate)) - if err != nil { - return resource_compute_node_group_fmt.Errorf("Error creating NodeGroup: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return resource_compute_node_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating NodeGroup", userAgent, - d.Timeout(resource_compute_node_group_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_node_group_fmt.Errorf("Error waiting to create NodeGroup: %s", err) - } - - resource_compute_node_group_log.Printf("[DEBUG] Finished creating NodeGroup %q: %#v", d.Id(), res) - - return resourceComputeNodeGroupRead(d, meta) -} - -func resourceComputeNodeGroupRead(d *resource_compute_node_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_node_group_fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_node_group_fmt.Sprintf("ComputeNodeGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeNodeGroupCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("description", flattenComputeNodeGroupDescription(res["description"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("name", flattenComputeNodeGroupName(res["name"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("node_template", flattenComputeNodeGroupNodeTemplate(res["nodeTemplate"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("size", flattenComputeNodeGroupSize(res["size"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("maintenance_policy", flattenComputeNodeGroupMaintenancePolicy(res["maintenancePolicy"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("maintenance_window", flattenComputeNodeGroupMaintenanceWindow(res["maintenanceWindow"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("autoscaling_policy", flattenComputeNodeGroupAutoscalingPolicy(res["autoscalingPolicy"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("zone", flattenComputeNodeGroupZone(res["zone"], d, config)); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_node_group_fmt.Errorf("Error reading NodeGroup: %s", err) - } - - return nil -} - -func resourceComputeNodeGroupUpdate(d *resource_compute_node_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_node_group_fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("node_template") { - obj := make(map[string]interface{}) - - nodeTemplateProp, err := expandComputeNodeGroupNodeTemplate(d.Get("node_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_template"); !isEmptyValue(resource_compute_node_group_reflect.ValueOf(v)) && (ok || !resource_compute_node_group_reflect.DeepEqual(v, nodeTemplateProp)) { - obj["nodeTemplate"] = nodeTemplateProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}/setNodeTemplate") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_node_group_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_node_group_fmt.Errorf("Error updating NodeGroup %q: %s", d.Id(), err) - } else { - resource_compute_node_group_log.Printf("[DEBUG] Finished updating NodeGroup %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating NodeGroup", userAgent, - d.Timeout(resource_compute_node_group_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeNodeGroupRead(d, meta) -} - -func resourceComputeNodeGroupDelete(d *resource_compute_node_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_node_group_fmt.Errorf("Error fetching project for NodeGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_node_group_log.Printf("[DEBUG] Deleting NodeGroup %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_node_group_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NodeGroup") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting NodeGroup", userAgent, - d.Timeout(resource_compute_node_group_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_node_group_log.Printf("[DEBUG] Finished deleting NodeGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNodeGroupImport(d *resource_compute_node_group_schema.ResourceData, meta interface{}) ([]*resource_compute_node_group_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/nodeGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}") - if err != nil { - return nil, resource_compute_node_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_node_group_schema.ResourceData{d}, nil -} - -func flattenComputeNodeGroupCreationTimestamp(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupDescription(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupName(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupNodeTemplate(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeNodeGroupSize(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_node_group_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeNodeGroupMaintenancePolicy(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupMaintenanceWindow(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["start_time"] = - flattenComputeNodeGroupMaintenanceWindowStartTime(original["startTime"], d, config) - return []interface{}{transformed} -} - -func flattenComputeNodeGroupMaintenanceWindowStartTime(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupAutoscalingPolicy(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["mode"] = - flattenComputeNodeGroupAutoscalingPolicyMode(original["mode"], d, config) - transformed["min_nodes"] = - flattenComputeNodeGroupAutoscalingPolicyMinNodes(original["minNodes"], d, config) - transformed["max_nodes"] = - flattenComputeNodeGroupAutoscalingPolicyMaxNodes(original["maxNodes"], d, config) - return []interface{}{transformed} -} - -func flattenComputeNodeGroupAutoscalingPolicyMode(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeGroupAutoscalingPolicyMinNodes(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_node_group_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeNodeGroupAutoscalingPolicyMaxNodes(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_node_group_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeNodeGroupZone(v interface{}, d *resource_compute_node_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeNodeGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupNodeTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("nodeTemplates", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_node_group_fmt.Errorf("Invalid value for node_template: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeNodeGroupSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupMaintenancePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupMaintenanceWindow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandComputeNodeGroupMaintenanceWindowStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_group_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - return transformed, nil -} - -func expandComputeNodeGroupMaintenanceWindowStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupAutoscalingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMode, err := expandComputeNodeGroupAutoscalingPolicyMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_group_reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - transformedMinNodes, err := expandComputeNodeGroupAutoscalingPolicyMinNodes(original["min_nodes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_group_reflect.ValueOf(transformedMinNodes); val.IsValid() && !isEmptyValue(val) { - transformed["minNodes"] = transformedMinNodes - } - - transformedMaxNodes, err := expandComputeNodeGroupAutoscalingPolicyMaxNodes(original["max_nodes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_group_reflect.ValueOf(transformedMaxNodes); val.IsValid() && !isEmptyValue(val) { - transformed["maxNodes"] = transformedMaxNodes - } - - return transformed, nil -} - -func expandComputeNodeGroupAutoscalingPolicyMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupAutoscalingPolicyMinNodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupAutoscalingPolicyMaxNodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeGroupZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_node_group_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeNodeTemplate() *resource_compute_node_template_schema.Resource { - return &resource_compute_node_template_schema.Resource{ - Create: resourceComputeNodeTemplateCreate, - Read: resourceComputeNodeTemplateRead, - Delete: resourceComputeNodeTemplateDelete, - - Importer: &resource_compute_node_template_schema.ResourceImporter{ - State: resourceComputeNodeTemplateImport, - }, - - Timeouts: &resource_compute_node_template_schema.ResourceTimeout{ - Create: resource_compute_node_template_schema.DefaultTimeout(4 * resource_compute_node_template_time.Minute), - Delete: resource_compute_node_template_schema.DefaultTimeout(4 * resource_compute_node_template_time.Minute), - }, - - Schema: map[string]*resource_compute_node_template_schema.Schema{ - "cpu_overcommit_type": { - Type: resource_compute_node_template_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_node_template_validation.StringInSlice([]string{"ENABLED", "NONE", ""}, false), - Description: `CPU overcommit. Default value: "NONE" Possible values: ["ENABLED", "NONE"]`, - Default: "NONE", - }, - "description": { - Type: resource_compute_node_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional textual description of the resource.`, - }, - "name": { - Type: resource_compute_node_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the resource.`, - }, - "node_affinity_labels": { - Type: resource_compute_node_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels to use for node affinity, which will be used in -instance scheduling.`, - Elem: &resource_compute_node_template_schema.Schema{Type: resource_compute_node_template_schema.TypeString}, - }, - "node_type": { - Type: resource_compute_node_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Node type to use for nodes group that are created from this template. -Only one of nodeTypeFlexibility and nodeType can be specified.`, - ConflictsWith: []string{"node_type_flexibility"}, - }, - "node_type_flexibility": { - Type: resource_compute_node_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Flexible properties for the desired node type. Node groups that -use this node template will create nodes of a type that matches -these properties. Only one of nodeTypeFlexibility and nodeType can -be specified.`, - MaxItems: 1, - Elem: &resource_compute_node_template_schema.Resource{ - Schema: map[string]*resource_compute_node_template_schema.Schema{ - "cpus": { - Type: resource_compute_node_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Number of virtual CPUs to use.`, - AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, - }, - "memory": { - Type: resource_compute_node_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Physical memory available to the node, defined in MB.`, - AtLeastOneOf: []string{"node_type_flexibility.0.cpus", "node_type_flexibility.0.memory"}, - }, - "local_ssd": { - Type: resource_compute_node_template_schema.TypeString, - Computed: true, - Description: `Use local SSD`, - }, - }, - }, - ConflictsWith: []string{"node_type"}, - }, - "region": { - Type: resource_compute_node_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where nodes using the node template will be created. -If it is not provided, the provider region is used.`, - }, - "server_binding": { - Type: resource_compute_node_template_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The server binding policy for nodes using this template. Determines -where the nodes should restart following a maintenance event.`, - MaxItems: 1, - Elem: &resource_compute_node_template_schema.Resource{ - Schema: map[string]*resource_compute_node_template_schema.Schema{ - "type": { - Type: resource_compute_node_template_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_compute_node_template_validation.StringInSlice([]string{"RESTART_NODE_ON_ANY_SERVER", "RESTART_NODE_ON_MINIMAL_SERVERS"}, false), - Description: `Type of server binding policy. If 'RESTART_NODE_ON_ANY_SERVER', -nodes using this template will restart on any physical server -following a maintenance event. - -If 'RESTART_NODE_ON_MINIMAL_SERVER', nodes using this template -will restart on the same physical server following a maintenance -event, instead of being live migrated to or restarted on a new -physical server. This option may be useful if you are using -software licenses tied to the underlying server characteristics -such as physical sockets or cores, to avoid the need for -additional licenses when maintenance occurs. However, VMs on such -nodes will experience outages while maintenance is applied. Possible values: ["RESTART_NODE_ON_ANY_SERVER", "RESTART_NODE_ON_MINIMAL_SERVERS"]`, - }, - }, - }, - }, - "creation_timestamp": { - Type: resource_compute_node_template_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_node_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_node_template_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeNodeTemplateCreate(d *resource_compute_node_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeNodeTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeNodeTemplateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(nameProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - nodeAffinityLabelsProp, err := expandComputeNodeTemplateNodeAffinityLabels(d.Get("node_affinity_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_affinity_labels"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(nodeAffinityLabelsProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, nodeAffinityLabelsProp)) { - obj["nodeAffinityLabels"] = nodeAffinityLabelsProp - } - nodeTypeProp, err := expandComputeNodeTemplateNodeType(d.Get("node_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_type"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(nodeTypeProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, nodeTypeProp)) { - obj["nodeType"] = nodeTypeProp - } - nodeTypeFlexibilityProp, err := expandComputeNodeTemplateNodeTypeFlexibility(d.Get("node_type_flexibility"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_type_flexibility"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(nodeTypeFlexibilityProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, nodeTypeFlexibilityProp)) { - obj["nodeTypeFlexibility"] = nodeTypeFlexibilityProp - } - serverBindingProp, err := expandComputeNodeTemplateServerBinding(d.Get("server_binding"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("server_binding"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(serverBindingProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, serverBindingProp)) { - obj["serverBinding"] = serverBindingProp - } - cpuOvercommitTypeProp, err := expandComputeNodeTemplateCpuOvercommitType(d.Get("cpu_overcommit_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cpu_overcommit_type"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(cpuOvercommitTypeProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, cpuOvercommitTypeProp)) { - obj["cpuOvercommitType"] = cpuOvercommitTypeProp - } - regionProp, err := expandComputeNodeTemplateRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_node_template_reflect.ValueOf(regionProp)) && (ok || !resource_compute_node_template_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates") - if err != nil { - return err - } - - resource_compute_node_template_log.Printf("[DEBUG] Creating new NodeTemplate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_node_template_fmt.Errorf("Error fetching project for NodeTemplate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_node_template_schema.TimeoutCreate)) - if err != nil { - return resource_compute_node_template_fmt.Errorf("Error creating NodeTemplate: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return resource_compute_node_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating NodeTemplate", userAgent, - d.Timeout(resource_compute_node_template_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_node_template_fmt.Errorf("Error waiting to create NodeTemplate: %s", err) - } - - resource_compute_node_template_log.Printf("[DEBUG] Finished creating NodeTemplate %q: %#v", d.Id(), res) - - return resourceComputeNodeTemplateRead(d, meta) -} - -func resourceComputeNodeTemplateRead(d *resource_compute_node_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_node_template_fmt.Errorf("Error fetching project for NodeTemplate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_node_template_fmt.Sprintf("ComputeNodeTemplate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeNodeTemplateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("description", flattenComputeNodeTemplateDescription(res["description"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("name", flattenComputeNodeTemplateName(res["name"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("node_affinity_labels", flattenComputeNodeTemplateNodeAffinityLabels(res["nodeAffinityLabels"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("node_type", flattenComputeNodeTemplateNodeType(res["nodeType"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("node_type_flexibility", flattenComputeNodeTemplateNodeTypeFlexibility(res["nodeTypeFlexibility"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("server_binding", flattenComputeNodeTemplateServerBinding(res["serverBinding"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("cpu_overcommit_type", flattenComputeNodeTemplateCpuOvercommitType(res["cpuOvercommitType"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("region", flattenComputeNodeTemplateRegion(res["region"], d, config)); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_node_template_fmt.Errorf("Error reading NodeTemplate: %s", err) - } - - return nil -} - -func resourceComputeNodeTemplateDelete(d *resource_compute_node_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_node_template_fmt.Errorf("Error fetching project for NodeTemplate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_node_template_log.Printf("[DEBUG] Deleting NodeTemplate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_node_template_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NodeTemplate") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting NodeTemplate", userAgent, - d.Timeout(resource_compute_node_template_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_node_template_log.Printf("[DEBUG] Finished deleting NodeTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeNodeTemplateImport(d *resource_compute_node_template_schema.ResourceData, meta interface{}) ([]*resource_compute_node_template_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/nodeTemplates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/nodeTemplates/{{name}}") - if err != nil { - return nil, resource_compute_node_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_node_template_schema.ResourceData{d}, nil -} - -func flattenComputeNodeTemplateCreationTimestamp(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateDescription(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateName(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeAffinityLabels(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeType(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeTypeFlexibility(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cpus"] = - flattenComputeNodeTemplateNodeTypeFlexibilityCpus(original["cpus"], d, config) - transformed["memory"] = - flattenComputeNodeTemplateNodeTypeFlexibilityMemory(original["memory"], d, config) - transformed["local_ssd"] = - flattenComputeNodeTemplateNodeTypeFlexibilityLocalSsd(original["localSsd"], d, config) - return []interface{}{transformed} -} - -func flattenComputeNodeTemplateNodeTypeFlexibilityCpus(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeTypeFlexibilityMemory(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateNodeTypeFlexibilityLocalSsd(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateServerBinding(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenComputeNodeTemplateServerBindingType(original["type"], d, config) - return []interface{}{transformed} -} - -func flattenComputeNodeTemplateServerBindingType(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateCpuOvercommitType(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeNodeTemplateRegion(v interface{}, d *resource_compute_node_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeNodeTemplateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeAffinityLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeNodeTemplateNodeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibility(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCpus, err := expandComputeNodeTemplateNodeTypeFlexibilityCpus(original["cpus"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_template_reflect.ValueOf(transformedCpus); val.IsValid() && !isEmptyValue(val) { - transformed["cpus"] = transformedCpus - } - - transformedMemory, err := expandComputeNodeTemplateNodeTypeFlexibilityMemory(original["memory"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_template_reflect.ValueOf(transformedMemory); val.IsValid() && !isEmptyValue(val) { - transformed["memory"] = transformedMemory - } - - transformedLocalSsd, err := expandComputeNodeTemplateNodeTypeFlexibilityLocalSsd(original["local_ssd"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_template_reflect.ValueOf(transformedLocalSsd); val.IsValid() && !isEmptyValue(val) { - transformed["localSsd"] = transformedLocalSsd - } - - return transformed, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibilityCpus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibilityMemory(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateNodeTypeFlexibilityLocalSsd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateServerBinding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandComputeNodeTemplateServerBindingType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_node_template_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - return transformed, nil -} - -func expandComputeNodeTemplateServerBindingType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateCpuOvercommitType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeNodeTemplateRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_node_template_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputePacketMirroring() *resource_compute_packet_mirroring_schema.Resource { - return &resource_compute_packet_mirroring_schema.Resource{ - Create: resourceComputePacketMirroringCreate, - Read: resourceComputePacketMirroringRead, - Update: resourceComputePacketMirroringUpdate, - Delete: resourceComputePacketMirroringDelete, - - Importer: &resource_compute_packet_mirroring_schema.ResourceImporter{ - State: resourceComputePacketMirroringImport, - }, - - Timeouts: &resource_compute_packet_mirroring_schema.ResourceTimeout{ - Create: resource_compute_packet_mirroring_schema.DefaultTimeout(4 * resource_compute_packet_mirroring_time.Minute), - Update: resource_compute_packet_mirroring_schema.DefaultTimeout(4 * resource_compute_packet_mirroring_time.Minute), - Delete: resource_compute_packet_mirroring_schema.DefaultTimeout(4 * resource_compute_packet_mirroring_time.Minute), - }, - - Schema: map[string]*resource_compute_packet_mirroring_schema.Schema{ - "collector_ilb": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Required: true, - Description: `The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL) -that will be used as collector for mirrored traffic. The -specified forwarding rule must have is_mirroring_collector -set to true.`, - MaxItems: 1, - Elem: &resource_compute_packet_mirroring_schema.Resource{ - Schema: map[string]*resource_compute_packet_mirroring_schema.Schema{ - "url": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the forwarding rule.`, - }, - }, - }, - }, - "mirrored_resources": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Required: true, - Description: `A means of specifying which resources to mirror.`, - MaxItems: 1, - Elem: &resource_compute_packet_mirroring_schema.Resource{ - Schema: map[string]*resource_compute_packet_mirroring_schema.Schema{ - "instances": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Optional: true, - Description: `All the listed instances will be mirrored. Specify at most 50.`, - Elem: &resource_compute_packet_mirroring_schema.Resource{ - Schema: map[string]*resource_compute_packet_mirroring_schema.Schema{ - "url": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the instances where this rule should be active.`, - }, - }, - }, - AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, - }, - "subnetworks": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Optional: true, - Description: `All instances in one of these subnetworks will be mirrored.`, - Elem: &resource_compute_packet_mirroring_schema.Resource{ - Schema: map[string]*resource_compute_packet_mirroring_schema.Schema{ - "url": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the subnetwork where this rule should be active.`, - }, - }, - }, - AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, - }, - "tags": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Optional: true, - Description: `All instances with these tags will be mirrored.`, - Elem: &resource_compute_packet_mirroring_schema.Schema{ - Type: resource_compute_packet_mirroring_schema.TypeString, - }, - AtLeastOneOf: []string{"mirrored_resources.0.subnetworks", "mirrored_resources.0.instances", "mirrored_resources.0.tags"}, - }, - }, - }, - }, - "name": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Required: true, - ValidateFunc: validateGCPName, - Description: `The name of the packet mirroring rule`, - }, - "network": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Specifies the mirrored VPC network. Only packets in this network -will be mirrored. All mirrored VMs should have a NIC in the given -network. All mirrored subnetworks should belong to the given network.`, - MaxItems: 1, - Elem: &resource_compute_packet_mirroring_schema.Resource{ - Schema: map[string]*resource_compute_packet_mirroring_schema.Schema{ - "url": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full self_link URL of the network where this rule is active.`, - }, - }, - }, - }, - "description": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A human-readable description of the rule.`, - }, - "filter": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Optional: true, - Description: `A filter for mirrored traffic. If unset, all traffic is mirrored.`, - MaxItems: 1, - Elem: &resource_compute_packet_mirroring_schema.Resource{ - Schema: map[string]*resource_compute_packet_mirroring_schema.Schema{ - "cidr_ranges": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Optional: true, - Description: `IP CIDR ranges that apply as a filter on the source (ingress) or -destination (egress) IP in the IP header. Only IPv4 is supported.`, - Elem: &resource_compute_packet_mirroring_schema.Schema{ - Type: resource_compute_packet_mirroring_schema.TypeString, - }, - }, - "direction": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_packet_mirroring_validation.StringInSlice([]string{"INGRESS", "EGRESS", "BOTH", ""}, false), - Description: `Direction of traffic to mirror. Default value: "BOTH" Possible values: ["INGRESS", "EGRESS", "BOTH"]`, - Default: "BOTH", - }, - "ip_protocols": { - Type: resource_compute_packet_mirroring_schema.TypeList, - Optional: true, - Description: `Protocols that apply as a filter on mirrored traffic. Possible values: ["tcp", "udp", "icmp"]`, - Elem: &resource_compute_packet_mirroring_schema.Schema{ - Type: resource_compute_packet_mirroring_schema.TypeString, - ValidateFunc: resource_compute_packet_mirroring_validation.StringInSlice([]string{"tcp", "udp", "icmp"}, false), - }, - }, - }, - }, - }, - "priority": { - Type: resource_compute_packet_mirroring_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Since only one rule can be active at a time, priority is -used to break ties in the case of two rules that apply to -the same instances.`, - }, - "region": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Computed: true, - Optional: true, - Description: `The Region in which the created address should reside. -If it is not provided, the provider region is used.`, - }, - "project": { - Type: resource_compute_packet_mirroring_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputePacketMirroringCreate(d *resource_compute_packet_mirroring_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputePacketMirroringName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(nameProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputePacketMirroringDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - regionProp, err := expandComputePacketMirroringRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(regionProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - networkProp, err := expandComputePacketMirroringNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(networkProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputePacketMirroringPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(priorityProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - collectorIlbProp, err := expandComputePacketMirroringCollectorIlb(d.Get("collector_ilb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collector_ilb"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(collectorIlbProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, collectorIlbProp)) { - obj["collectorIlb"] = collectorIlbProp - } - filterProp, err := expandComputePacketMirroringFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(filterProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - mirroredResourcesProp, err := expandComputePacketMirroringMirroredResources(d.Get("mirrored_resources"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mirrored_resources"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(mirroredResourcesProp)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, mirroredResourcesProp)) { - obj["mirroredResources"] = mirroredResourcesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings") - if err != nil { - return err - } - - resource_compute_packet_mirroring_log.Printf("[DEBUG] Creating new PacketMirroring: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_packet_mirroring_schema.TimeoutCreate)) - if err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error creating PacketMirroring: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating PacketMirroring", userAgent, - d.Timeout(resource_compute_packet_mirroring_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_packet_mirroring_fmt.Errorf("Error waiting to create PacketMirroring: %s", err) - } - - resource_compute_packet_mirroring_log.Printf("[DEBUG] Finished creating PacketMirroring %q: %#v", d.Id(), res) - - return resourceComputePacketMirroringRead(d, meta) -} - -func resourceComputePacketMirroringRead(d *resource_compute_packet_mirroring_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_packet_mirroring_fmt.Sprintf("ComputePacketMirroring %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - - if err := d.Set("name", flattenComputePacketMirroringName(res["name"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("description", flattenComputePacketMirroringDescription(res["description"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("region", flattenComputePacketMirroringRegion(res["region"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("network", flattenComputePacketMirroringNetwork(res["network"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("priority", flattenComputePacketMirroringPriority(res["priority"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("collector_ilb", flattenComputePacketMirroringCollectorIlb(res["collectorIlb"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("filter", flattenComputePacketMirroringFilter(res["filter"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - if err := d.Set("mirrored_resources", flattenComputePacketMirroringMirroredResources(res["mirroredResources"], d, config)); err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error reading PacketMirroring: %s", err) - } - - return nil -} - -func resourceComputePacketMirroringUpdate(d *resource_compute_packet_mirroring_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandComputePacketMirroringName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(v)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - regionProp, err := expandComputePacketMirroringRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(v)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - priorityProp, err := expandComputePacketMirroringPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(v)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - collectorIlbProp, err := expandComputePacketMirroringCollectorIlb(d.Get("collector_ilb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collector_ilb"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(v)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, collectorIlbProp)) { - obj["collectorIlb"] = collectorIlbProp - } - filterProp, err := expandComputePacketMirroringFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(v)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - mirroredResourcesProp, err := expandComputePacketMirroringMirroredResources(d.Get("mirrored_resources"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("mirrored_resources"); !isEmptyValue(resource_compute_packet_mirroring_reflect.ValueOf(v)) && (ok || !resource_compute_packet_mirroring_reflect.DeepEqual(v, mirroredResourcesProp)) { - obj["mirroredResources"] = mirroredResourcesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return err - } - - resource_compute_packet_mirroring_log.Printf("[DEBUG] Updating PacketMirroring %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_packet_mirroring_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error updating PacketMirroring %q: %s", d.Id(), err) - } else { - resource_compute_packet_mirroring_log.Printf("[DEBUG] Finished updating PacketMirroring %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating PacketMirroring", userAgent, - d.Timeout(resource_compute_packet_mirroring_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputePacketMirroringRead(d, meta) -} - -func resourceComputePacketMirroringDelete(d *resource_compute_packet_mirroring_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_packet_mirroring_fmt.Errorf("Error fetching project for PacketMirroring: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_packet_mirroring_log.Printf("[DEBUG] Deleting PacketMirroring %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_packet_mirroring_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "PacketMirroring") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting PacketMirroring", userAgent, - d.Timeout(resource_compute_packet_mirroring_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_packet_mirroring_log.Printf("[DEBUG] Finished deleting PacketMirroring %q: %#v", d.Id(), res) - return nil -} - -func resourceComputePacketMirroringImport(d *resource_compute_packet_mirroring_schema.ResourceData, meta interface{}) ([]*resource_compute_packet_mirroring_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/packetMirrorings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}") - if err != nil { - return nil, resource_compute_packet_mirroring_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_packet_mirroring_schema.ResourceData{d}, nil -} - -func flattenComputePacketMirroringName(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringDescription(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringRegion(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputePacketMirroringNetwork(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenComputePacketMirroringNetworkUrl(original["url"], d, config) - return []interface{}{transformed} -} - -func flattenComputePacketMirroringNetworkUrl(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringPriority(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_packet_mirroring_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputePacketMirroringCollectorIlb(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenComputePacketMirroringCollectorIlbUrl(original["url"], d, config) - return []interface{}{transformed} -} - -func flattenComputePacketMirroringCollectorIlbUrl(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringFilter(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ip_protocols"] = - flattenComputePacketMirroringFilterIpProtocols(original["IPProtocols"], d, config) - transformed["cidr_ranges"] = - flattenComputePacketMirroringFilterCidrRanges(original["cidrRanges"], d, config) - transformed["direction"] = - flattenComputePacketMirroringFilterDirection(original["direction"], d, config) - return []interface{}{transformed} -} - -func flattenComputePacketMirroringFilterIpProtocols(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringFilterCidrRanges(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringFilterDirection(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputePacketMirroringMirroredResources(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subnetworks"] = - flattenComputePacketMirroringMirroredResourcesSubnetworks(original["subnetworks"], d, config) - transformed["instances"] = - flattenComputePacketMirroringMirroredResourcesInstances(original["instances"], d, config) - transformed["tags"] = - flattenComputePacketMirroringMirroredResourcesTags(original["tags"], d, config) - return []interface{}{transformed} -} - -func flattenComputePacketMirroringMirroredResourcesSubnetworks(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "url": flattenComputePacketMirroringMirroredResourcesSubnetworksUrl(original["url"], d, config), - }) - } - return transformed -} - -func flattenComputePacketMirroringMirroredResourcesSubnetworksUrl(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringMirroredResourcesInstances(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "url": flattenComputePacketMirroringMirroredResourcesInstancesUrl(original["url"], d, config), - }) - } - return transformed -} - -func flattenComputePacketMirroringMirroredResourcesInstancesUrl(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputePacketMirroringMirroredResourcesTags(v interface{}, d *resource_compute_packet_mirroring_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputePacketMirroringName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringNetworkUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - return transformed, nil -} - -func expandComputePacketMirroringNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_packet_mirroring_fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringCollectorIlb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringCollectorIlbUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - return transformed, nil -} - -func expandComputePacketMirroringCollectorIlbUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("forwardingRules", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_packet_mirroring_fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpProtocols, err := expandComputePacketMirroringFilterIpProtocols(original["ip_protocols"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedIpProtocols); val.IsValid() && !isEmptyValue(val) { - transformed["IPProtocols"] = transformedIpProtocols - } - - transformedCidrRanges, err := expandComputePacketMirroringFilterCidrRanges(original["cidr_ranges"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedCidrRanges); val.IsValid() && !isEmptyValue(val) { - transformed["cidrRanges"] = transformedCidrRanges - } - - transformedDirection, err := expandComputePacketMirroringFilterDirection(original["direction"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedDirection); val.IsValid() && !isEmptyValue(val) { - transformed["direction"] = transformedDirection - } - - return transformed, nil -} - -func expandComputePacketMirroringFilterIpProtocols(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringFilterCidrRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringFilterDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputePacketMirroringMirroredResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubnetworks, err := expandComputePacketMirroringMirroredResourcesSubnetworks(original["subnetworks"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedSubnetworks); val.IsValid() && !isEmptyValue(val) { - transformed["subnetworks"] = transformedSubnetworks - } - - transformedInstances, err := expandComputePacketMirroringMirroredResourcesInstances(original["instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { - transformed["instances"] = transformedInstances - } - - transformedTags, err := expandComputePacketMirroringMirroredResourcesTags(original["tags"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedTags); val.IsValid() && !isEmptyValue(val) { - transformed["tags"] = transformedTags - } - - return transformed, nil -} - -func expandComputePacketMirroringMirroredResourcesSubnetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringMirroredResourcesSubnetworksUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputePacketMirroringMirroredResourcesSubnetworksUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_packet_mirroring_fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringMirroredResourcesInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandComputePacketMirroringMirroredResourcesInstancesUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_packet_mirroring_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputePacketMirroringMirroredResourcesInstancesUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("instances", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, resource_compute_packet_mirroring_fmt.Errorf("Invalid value for url: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputePacketMirroringMirroredResourcesTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputePerInstanceConfig() *resource_compute_per_instance_config_schema.Resource { - return &resource_compute_per_instance_config_schema.Resource{ - Create: resourceComputePerInstanceConfigCreate, - Read: resourceComputePerInstanceConfigRead, - Update: resourceComputePerInstanceConfigUpdate, - Delete: resourceComputePerInstanceConfigDelete, - - Importer: &resource_compute_per_instance_config_schema.ResourceImporter{ - State: resourceComputePerInstanceConfigImport, - }, - - Timeouts: &resource_compute_per_instance_config_schema.ResourceTimeout{ - Create: resource_compute_per_instance_config_schema.DefaultTimeout(15 * resource_compute_per_instance_config_time.Minute), - Update: resource_compute_per_instance_config_schema.DefaultTimeout(6 * resource_compute_per_instance_config_time.Minute), - Delete: resource_compute_per_instance_config_schema.DefaultTimeout(15 * resource_compute_per_instance_config_time.Minute), - }, - - Schema: map[string]*resource_compute_per_instance_config_schema.Schema{ - "instance_group_manager": { - Type: resource_compute_per_instance_config_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The instance group manager this instance config is part of.`, - }, - "name": { - Type: resource_compute_per_instance_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this per-instance config and its corresponding instance.`, - }, - "preserved_state": { - Type: resource_compute_per_instance_config_schema.TypeList, - Optional: true, - Description: `The preserved state for this instance.`, - MaxItems: 1, - Elem: &resource_compute_per_instance_config_schema.Resource{ - Schema: map[string]*resource_compute_per_instance_config_schema.Schema{ - "disk": { - Type: resource_compute_per_instance_config_schema.TypeSet, - Optional: true, - Description: `Stateful disks for the instance.`, - Elem: computePerInstanceConfigPreservedStateDiskSchema(), - }, - "metadata": { - Type: resource_compute_per_instance_config_schema.TypeMap, - Optional: true, - Description: `Preserved metadata defined for this instance. This is a list of key->value pairs.`, - Elem: &resource_compute_per_instance_config_schema.Schema{Type: resource_compute_per_instance_config_schema.TypeString}, - }, - }, - }, - }, - "zone": { - Type: resource_compute_per_instance_config_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Zone where the containing instance group manager is located`, - }, - "minimal_action": { - Type: resource_compute_per_instance_config_schema.TypeString, - Optional: true, - Default: "NONE", - }, - "most_disruptive_allowed_action": { - Type: resource_compute_per_instance_config_schema.TypeString, - Optional: true, - Default: "REPLACE", - }, - "remove_instance_state_on_destroy": { - Type: resource_compute_per_instance_config_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_compute_per_instance_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func computePerInstanceConfigPreservedStateDiskSchema() *resource_compute_per_instance_config_schema.Resource { - return &resource_compute_per_instance_config_schema.Resource{ - Schema: map[string]*resource_compute_per_instance_config_schema.Schema{ - "device_name": { - Type: resource_compute_per_instance_config_schema.TypeString, - Required: true, - Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance.`, - }, - "source": { - Type: resource_compute_per_instance_config_schema.TypeString, - Required: true, - Description: `The URI of an existing persistent disk to attach under the specified device-name in the format -'projects/project-id/zones/zone/disks/disk-name'.`, - }, - "delete_rule": { - Type: resource_compute_per_instance_config_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_per_instance_config_validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION", ""}, false), - Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. -The available options are 'NEVER' and 'ON_PERMANENT_INSTANCE_DELETION'. -'NEVER' - detach the disk when the VM is deleted, but do not delete the disk. -'ON_PERMANENT_INSTANCE_DELETION' will delete the stateful disk when the VM is permanently -deleted from the instance group. Default value: "NEVER" Possible values: ["NEVER", "ON_PERMANENT_INSTANCE_DELETION"]`, - Default: "NEVER", - }, - "mode": { - Type: resource_compute_per_instance_config_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_per_instance_config_validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE", ""}, false), - Description: `The mode of the disk. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, - Default: "READ_WRITE", - }, - }, - } -} - -func resourceComputePerInstanceConfigCreate(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_per_instance_config_reflect.ValueOf(nameProp)) && (ok || !resource_compute_per_instance_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputePerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(resource_compute_per_instance_config_reflect.ValueOf(preservedStateProp)) && (ok || !resource_compute_per_instance_config_reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputePerInstanceConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/createInstances") - if err != nil { - return err - } - - resource_compute_per_instance_config_log.Printf("[DEBUG] Creating new PerInstanceConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_per_instance_config_schema.TimeoutCreate)) - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error creating PerInstanceConfig: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{instance_group_manager}}/{{name}}") - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating PerInstanceConfig", userAgent, - d.Timeout(resource_compute_per_instance_config_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_per_instance_config_fmt.Errorf("Error waiting to create PerInstanceConfig: %s", err) - } - - resource_compute_per_instance_config_log.Printf("[DEBUG] Finished creating PerInstanceConfig %q: %#v", d.Id(), res) - - return resourceComputePerInstanceConfigRead(d, meta) -} - -func resourceComputePerInstanceConfigRead(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_per_instance_config_fmt.Sprintf("ComputePerInstanceConfig %q", d.Id())) - } - - res, err = flattenNestedComputePerInstanceConfig(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_per_instance_config_log.Printf("[DEBUG] Removing ComputePerInstanceConfig because it couldn't be matched.") - d.SetId("") - return nil - } - - if _, ok := d.GetOkExists("minimal_action"); !ok { - if err := d.Set("minimal_action", "NONE"); err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error setting minimal_action: %s", err) - } - } - if _, ok := d.GetOkExists("most_disruptive_allowed_action"); !ok { - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - } - if _, ok := d.GetOkExists("remove_instance_state_on_destroy"); !ok { - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error reading PerInstanceConfig: %s", err) - } - - if err := d.Set("name", flattenNestedComputePerInstanceConfigName(res["name"], d, config)); err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error reading PerInstanceConfig: %s", err) - } - if err := d.Set("preserved_state", flattenNestedComputePerInstanceConfigPreservedState(res["preservedState"], d, config)); err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error reading PerInstanceConfig: %s", err) - } - - return nil -} - -func resourceComputePerInstanceConfigUpdate(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error fetching project for PerInstanceConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_per_instance_config_reflect.ValueOf(v)) && (ok || !resource_compute_per_instance_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputePerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(resource_compute_per_instance_config_reflect.ValueOf(v)) && (ok || !resource_compute_per_instance_config_reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputePerInstanceConfigUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/updatePerInstanceConfigs") - if err != nil { - return err - } - - resource_compute_per_instance_config_log.Printf("[DEBUG] Updating PerInstanceConfig %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } else { - resource_compute_per_instance_config_log.Printf("[DEBUG] Finished updating PerInstanceConfig %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating PerInstanceConfig", userAgent, - d.Timeout(resource_compute_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - instanceName, err := replaceVars(d, config, "zones/{{zone}}/instances/{{name}}") - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - minAction := d.Get("minimal_action") - if minAction == "" { - minAction = "NONE" - } - obj["minimalAction"] = minAction - - mostDisruptiveAction := d.Get("most_disruptive_action_allowed") - if mostDisruptiveAction != "" { - mostDisruptiveAction = "REPLACE" - } - obj["mostDisruptiveActionAllowed"] = mostDisruptiveAction - - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - resource_compute_per_instance_config_log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_compute_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } - - err = computeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(resource_compute_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return err - } - return resourceComputePerInstanceConfigRead(d, meta) -} - -func resourceComputePerInstanceConfigDelete(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deletePerInstanceConfigs") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = map[string]interface{}{ - "names": [1]string{d.Get("name").(string)}, - } - resource_compute_per_instance_config_log.Printf("[DEBUG] Deleting PerInstanceConfig %q", d.Id()) - - res, err := sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_compute_per_instance_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "PerInstanceConfig") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting PerInstanceConfig", userAgent, - d.Timeout(resource_compute_per_instance_config_schema.TimeoutDelete)) - - if err != nil { - return err - } - - if d.Get("remove_instance_state_on_destroy").(bool) { - - instanceName, err := replaceVars(d, config, "zones/{{zone}}/instances/{{name}}") - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - resource_compute_per_instance_config_log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_compute_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) - } - - err = computeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(resource_compute_per_instance_config_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) - } - - err = PollingWaitTime(resourceComputePerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting PerInstanceConfig", d.Timeout(resource_compute_per_instance_config_schema.TimeoutDelete), 1) - if err != nil { - return resource_compute_per_instance_config_fmt.Errorf("Error waiting for delete on PerInstanceConfig %q: %s", d.Id(), err) - } - } - - resource_compute_per_instance_config_log.Printf("[DEBUG] Finished deleting PerInstanceConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceComputePerInstanceConfigImport(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}) ([]*resource_compute_per_instance_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{zone}}/{{instance_group_manager}}/{{name}}") - if err != nil { - return nil, resource_compute_per_instance_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("minimal_action", "NONE"); err != nil { - return nil, resource_compute_per_instance_config_fmt.Errorf("Error setting minimal_action: %s", err) - } - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return nil, resource_compute_per_instance_config_fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return nil, resource_compute_per_instance_config_fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - - return []*resource_compute_per_instance_config_schema.ResourceData{d}, nil -} - -func flattenNestedComputePerInstanceConfigName(v interface{}, d *resource_compute_per_instance_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputePerInstanceConfigPreservedState(v interface{}, d *resource_compute_per_instance_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["metadata"] = - flattenNestedComputePerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - transformed["disk"] = - flattenNestedComputePerInstanceConfigPreservedStateDisk(original["disks"], d, config) - return []interface{}{transformed} -} - -func flattenNestedComputePerInstanceConfigPreservedStateMetadata(v interface{}, d *resource_compute_per_instance_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputePerInstanceConfigPreservedStateDisk(v interface{}, d *resource_compute_per_instance_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - disks := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(disks)) - for devName, deleteRuleRaw := range disks { - diskObj := deleteRuleRaw.(map[string]interface{}) - source, err := getRelativePath(diskObj["source"].(string)) - if err != nil { - source = diskObj["source"].(string) - } - transformed = append(transformed, map[string]interface{}{ - "device_name": devName, - "delete_rule": diskObj["autoDelete"], - "source": source, - "mode": diskObj["mode"], - }) - } - return transformed -} - -func expandNestedComputePerInstanceConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputePerInstanceConfigPreservedState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMetadata, err := expandNestedComputePerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_per_instance_config_reflect.ValueOf(transformedMetadata); val.IsValid() && !isEmptyValue(val) { - transformed["metadata"] = transformedMetadata - } - - transformedDisk, err := expandNestedComputePerInstanceConfigPreservedStateDisk(original["disk"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_per_instance_config_reflect.ValueOf(transformedDisk); val.IsValid() && !isEmptyValue(val) { - transformed["disks"] = transformedDisk - } - - return transformed, nil -} - -func expandNestedComputePerInstanceConfigPreservedStateMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNestedComputePerInstanceConfigPreservedStateDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - l := v.(*resource_compute_per_instance_config_schema.Set).List() - req := make(map[string]interface{}) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - deviceName := original["device_name"].(string) - diskObj := make(map[string]interface{}) - deleteRule := original["delete_rule"].(string) - if deleteRule != "" { - diskObj["autoDelete"] = deleteRule - } - source := original["source"] - if source != "" { - diskObj["source"] = source - } - mode := original["mode"] - if source != "" { - diskObj["mode"] = mode - } - req[deviceName] = diskObj - } - return req, nil -} - -func resourceComputePerInstanceConfigEncoder(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - wrappedReq := map[string]interface{}{ - "instances": []interface{}{obj}, - } - return wrappedReq, nil -} - -func resourceComputePerInstanceConfigUpdateEncoder(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - wrappedReq := map[string]interface{}{ - "perInstanceConfigs": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputePerInstanceConfig(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_per_instance_config_fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputePerInstanceConfigFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputePerInstanceConfigFindNestedObjectInList(d *resource_compute_per_instance_config_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputePerInstanceConfigName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputePerInstanceConfigName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedComputePerInstanceConfigName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_per_instance_config_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_per_instance_config_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_per_instance_config_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_per_instance_config_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_per_instance_config_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeProjectDefaultNetworkTier() *resource_compute_project_default_network_tier_schema.Resource { - return &resource_compute_project_default_network_tier_schema.Resource{ - Create: resourceComputeProjectDefaultNetworkTierCreateOrUpdate, - Read: resourceComputeProjectDefaultNetworkTierRead, - Update: resourceComputeProjectDefaultNetworkTierCreateOrUpdate, - Delete: resourceComputeProjectDefaultNetworkTierDelete, - Importer: &resource_compute_project_default_network_tier_schema.ResourceImporter{ - State: resource_compute_project_default_network_tier_schema.ImportStatePassthrough, - }, - - Timeouts: &resource_compute_project_default_network_tier_schema.ResourceTimeout{ - Create: resource_compute_project_default_network_tier_schema.DefaultTimeout(4 * resource_compute_project_default_network_tier_time.Minute), - }, - - SchemaVersion: 0, - - Schema: map[string]*resource_compute_project_default_network_tier_schema.Schema{ - "network_tier": { - Type: resource_compute_project_default_network_tier_schema.TypeString, - Required: true, - Description: `The default network tier to be configured for the project. This field can take the following values: PREMIUM or STANDARD.`, - ValidateFunc: resource_compute_project_default_network_tier_validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), - }, - - "project": { - Type: resource_compute_project_default_network_tier_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeProjectDefaultNetworkTierCreateOrUpdate(d *resource_compute_project_default_network_tier_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - request := &resource_compute_project_default_network_tier_compute.ProjectsSetDefaultNetworkTierRequest{ - NetworkTier: d.Get("network_tier").(string), - } - op, err := config.NewComputeClient(userAgent).Projects.SetDefaultNetworkTier(projectID, request).Do() - if err != nil { - return resource_compute_project_default_network_tier_fmt.Errorf("SetDefaultNetworkTier failed: %s", err) - } - - resource_compute_project_default_network_tier_log.Printf("[DEBUG] SetDefaultNetworkTier: %d (%s)", op.Id, op.SelfLink) - err = computeOperationWaitTime(config, op, projectID, "SetDefaultNetworkTier", userAgent, d.Timeout(resource_compute_project_default_network_tier_schema.TimeoutCreate)) - if err != nil { - return resource_compute_project_default_network_tier_fmt.Errorf("SetDefaultNetworkTier failed: %s", err) - } - - d.SetId(projectID) - - return resourceComputeProjectDefaultNetworkTierRead(d, meta) -} - -func resourceComputeProjectDefaultNetworkTierRead(d *resource_compute_project_default_network_tier_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectId := d.Id() - - project, err := config.NewComputeClient(userAgent).Projects.Get(projectId).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_project_default_network_tier_fmt.Sprintf("Project data for project %q", projectId)) - } - - err = d.Set("network_tier", project.DefaultNetworkTier) - if err != nil { - return resource_compute_project_default_network_tier_fmt.Errorf("Error setting default network tier: %s", err) - } - - if err := d.Set("project", projectId); err != nil { - return resource_compute_project_default_network_tier_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceComputeProjectDefaultNetworkTierDelete(d *resource_compute_project_default_network_tier_schema.ResourceData, meta interface{}) error { - - resource_compute_project_default_network_tier_log.Printf("[WARNING] Default Network Tier will be only removed from Terraform state, but will be left intact on GCP.") - - return resource_compute_project_default_network_tier_schema.RemoveFromState(d, meta) -} - -func resourceComputeProjectMetadata() *resource_compute_project_metadata_schema.Resource { - return &resource_compute_project_metadata_schema.Resource{ - Create: resourceComputeProjectMetadataCreateOrUpdate, - Read: resourceComputeProjectMetadataRead, - Update: resourceComputeProjectMetadataCreateOrUpdate, - Delete: resourceComputeProjectMetadataDelete, - Importer: &resource_compute_project_metadata_schema.ResourceImporter{ - State: resource_compute_project_metadata_schema.ImportStatePassthrough, - }, - - Timeouts: &resource_compute_project_metadata_schema.ResourceTimeout{ - Create: resource_compute_project_metadata_schema.DefaultTimeout(4 * resource_compute_project_metadata_time.Minute), - Delete: resource_compute_project_metadata_schema.DefaultTimeout(4 * resource_compute_project_metadata_time.Minute), - }, - - SchemaVersion: 0, - - Schema: map[string]*resource_compute_project_metadata_schema.Schema{ - "metadata": { - Type: resource_compute_project_metadata_schema.TypeMap, - Required: true, - Elem: &resource_compute_project_metadata_schema.Schema{Type: resource_compute_project_metadata_schema.TypeString}, - Description: `A series of key value pairs.`, - }, - - "project": { - Type: resource_compute_project_metadata_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeProjectMetadataCreateOrUpdate(d *resource_compute_project_metadata_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - md := &resource_compute_project_metadata_compute.Metadata{ - Items: expandComputeMetadata(d.Get("metadata").(map[string]interface{})), - } - - err = resourceComputeProjectMetadataSet(projectID, userAgent, config, md, d.Timeout(resource_compute_project_metadata_schema.TimeoutCreate)) - if err != nil { - return resource_compute_project_metadata_fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) - } - - d.SetId(projectID) - - return resourceComputeProjectMetadataRead(d, meta) -} - -func resourceComputeProjectMetadataRead(d *resource_compute_project_metadata_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectId := d.Id() - - project, err := config.NewComputeClient(userAgent).Projects.Get(projectId).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_project_metadata_fmt.Sprintf("Project metadata for project %q", projectId)) - } - - err = d.Set("metadata", flattenMetadata(project.CommonInstanceMetadata)) - if err != nil { - return resource_compute_project_metadata_fmt.Errorf("Error setting metadata: %s", err) - } - - if err := d.Set("project", projectId); err != nil { - return resource_compute_project_metadata_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceComputeProjectMetadataDelete(d *resource_compute_project_metadata_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - md := &resource_compute_project_metadata_compute.Metadata{} - err = resourceComputeProjectMetadataSet(projectID, userAgent, config, md, d.Timeout(resource_compute_project_metadata_schema.TimeoutDelete)) - if err != nil { - return resource_compute_project_metadata_fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) - } - - return resourceComputeProjectMetadataRead(d, meta) -} - -func resourceComputeProjectMetadataSet(projectID, userAgent string, config *Config, md *resource_compute_project_metadata_compute.Metadata, timeout resource_compute_project_metadata_time.Duration) error { - createMD := func() error { - resource_compute_project_metadata_log.Printf("[DEBUG] Loading project service: %s", projectID) - project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() - if err != nil { - return resource_compute_project_metadata_fmt.Errorf("Error loading project '%s': %s", projectID, err) - } - - md.Fingerprint = project.CommonInstanceMetadata.Fingerprint - op, err := config.NewComputeClient(userAgent).Projects.SetCommonInstanceMetadata(projectID, md).Do() - if err != nil { - return resource_compute_project_metadata_fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) - } - - resource_compute_project_metadata_log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - return computeOperationWaitTime(config, op, project.Name, "SetCommonMetadata", userAgent, timeout) - } - - err := MetadataRetryWrapper(createMD) - return err -} - -type metadataPresentBehavior bool - -const ( - failIfPresent metadataPresentBehavior = true - overwritePresent metadataPresentBehavior = false -) - -func resourceComputeProjectMetadataItem() *resource_compute_project_metadata_item_schema.Resource { - return &resource_compute_project_metadata_item_schema.Resource{ - Create: resourceComputeProjectMetadataItemCreate, - Read: resourceComputeProjectMetadataItemRead, - Update: resourceComputeProjectMetadataItemUpdate, - Delete: resourceComputeProjectMetadataItemDelete, - Importer: &resource_compute_project_metadata_item_schema.ResourceImporter{ - State: resource_compute_project_metadata_item_schema.ImportStatePassthrough, - }, - - Schema: map[string]*resource_compute_project_metadata_item_schema.Schema{ - "key": { - Type: resource_compute_project_metadata_item_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The metadata key to set.`, - }, - "value": { - Type: resource_compute_project_metadata_item_schema.TypeString, - Required: true, - Description: `The value to set for the given metadata key.`, - }, - "project": { - Type: resource_compute_project_metadata_item_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - - Timeouts: &resource_compute_project_metadata_item_schema.ResourceTimeout{ - Create: resource_compute_project_metadata_item_schema.DefaultTimeout(7 * resource_compute_project_metadata_item_time.Minute), - Update: resource_compute_project_metadata_item_schema.DefaultTimeout(7 * resource_compute_project_metadata_item_time.Minute), - Delete: resource_compute_project_metadata_item_schema.DefaultTimeout(7 * resource_compute_project_metadata_item_time.Minute), - }, - UseJSONNumber: true, - } -} - -func resourceComputeProjectMetadataItemCreate(d *resource_compute_project_metadata_item_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - key := d.Get("key").(string) - val := d.Get("value").(string) - - err = updateComputeCommonInstanceMetadata(config, projectID, key, userAgent, &val, d.Timeout(resource_compute_project_metadata_item_schema.TimeoutCreate), failIfPresent) - if err != nil { - return err - } - - d.SetId(key) - - return nil -} - -func resourceComputeProjectMetadataItemRead(d *resource_compute_project_metadata_item_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - resource_compute_project_metadata_item_log.Printf("[DEBUG] Loading project metadata: %s", projectID) - project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() - if err != nil { - return resource_compute_project_metadata_item_fmt.Errorf("Error loading project '%s': %s", projectID, err) - } - - md := flattenMetadata(project.CommonInstanceMetadata) - val, ok := md[d.Id()] - if !ok { - - d.SetId("") - return nil - } - - if err := d.Set("project", projectID); err != nil { - return resource_compute_project_metadata_item_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("key", d.Id()); err != nil { - return resource_compute_project_metadata_item_fmt.Errorf("Error setting key: %s", err) - } - if err := d.Set("value", val); err != nil { - return resource_compute_project_metadata_item_fmt.Errorf("Error setting value: %s", err) - } - - return nil -} - -func resourceComputeProjectMetadataItemUpdate(d *resource_compute_project_metadata_item_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - if d.HasChange("value") { - key := d.Get("key").(string) - _, n := d.GetChange("value") - new := n.(string) - - err = updateComputeCommonInstanceMetadata(config, projectID, key, userAgent, &new, d.Timeout(resource_compute_project_metadata_item_schema.TimeoutUpdate), overwritePresent) - if err != nil { - return err - } - } - return nil -} - -func resourceComputeProjectMetadataItemDelete(d *resource_compute_project_metadata_item_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - projectID, err := getProject(d, config) - if err != nil { - return err - } - - key := d.Get("key").(string) - - err = updateComputeCommonInstanceMetadata(config, projectID, key, userAgent, nil, d.Timeout(resource_compute_project_metadata_item_schema.TimeoutDelete), overwritePresent) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func updateComputeCommonInstanceMetadata(config *Config, projectID, key, userAgent string, afterVal *string, timeout resource_compute_project_metadata_item_time.Duration, failIfPresent metadataPresentBehavior) error { - updateMD := func() error { - lockName := resource_compute_project_metadata_item_fmt.Sprintf("projects/%s/commoninstancemetadata", projectID) - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - resource_compute_project_metadata_item_log.Printf("[DEBUG] Loading project metadata: %s", projectID) - project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() - if err != nil { - return resource_compute_project_metadata_item_fmt.Errorf("Error loading project '%s': %s", projectID, err) - } - - md := flattenMetadata(project.CommonInstanceMetadata) - - val, ok := md[key] - - if !ok { - if afterVal == nil { - - return nil - } - } else { - if failIfPresent { - return resource_compute_project_metadata_item_fmt.Errorf("key %q already present in metadata for project %q. Use `terraform import` to manage it with Terraform", key, projectID) - } - if afterVal != nil && *afterVal == val { - - return nil - } - } - - if afterVal == nil { - delete(md, key) - } else { - md[key] = *afterVal - } - - op, err := config.NewComputeClient(userAgent).Projects.SetCommonInstanceMetadata( - projectID, - &resource_compute_project_metadata_item_compute.Metadata{ - Fingerprint: project.CommonInstanceMetadata.Fingerprint, - Items: expandComputeMetadata(md), - }, - ).Do() - - if err != nil { - return err - } - - resource_compute_project_metadata_item_log.Printf("[DEBUG] SetCommonInstanceMetadata: %d (%s)", op.Id, op.SelfLink) - - return computeOperationWaitTime(config, op, project.Name, "SetCommonInstanceMetadata", userAgent, timeout) - } - - return MetadataRetryWrapper(updateMD) -} - -func resourceComputeRegionAutoscaler() *resource_compute_region_autoscaler_schema.Resource { - return &resource_compute_region_autoscaler_schema.Resource{ - Create: resourceComputeRegionAutoscalerCreate, - Read: resourceComputeRegionAutoscalerRead, - Update: resourceComputeRegionAutoscalerUpdate, - Delete: resourceComputeRegionAutoscalerDelete, - - Importer: &resource_compute_region_autoscaler_schema.ResourceImporter{ - State: resourceComputeRegionAutoscalerImport, - }, - - Timeouts: &resource_compute_region_autoscaler_schema.ResourceTimeout{ - Create: resource_compute_region_autoscaler_schema.DefaultTimeout(4 * resource_compute_region_autoscaler_time.Minute), - Update: resource_compute_region_autoscaler_schema.DefaultTimeout(4 * resource_compute_region_autoscaler_time.Minute), - Delete: resource_compute_region_autoscaler_schema.DefaultTimeout(4 * resource_compute_region_autoscaler_time.Minute), - }, - - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "autoscaling_policy": { - Type: resource_compute_region_autoscaler_schema.TypeList, - Required: true, - Description: `The configuration parameters for the autoscaling algorithm. You can -define one or more of the policies for an autoscaler: cpuUtilization, -customMetricUtilizations, and loadBalancingUtilization. - -If none of these are specified, the default will be to autoscale based -on cpuUtilization to 0.6 or 60%.`, - MaxItems: 1, - Elem: &resource_compute_region_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "max_replicas": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Required: true, - Description: `The maximum number of instances that the autoscaler can scale up -to. This is required when creating or updating an autoscaler. The -maximum number of replicas should not be lower than minimal number -of replicas.`, - }, - "min_replicas": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Required: true, - Description: `The minimum number of replicas that the autoscaler can scale down -to. This cannot be less than 0. If not provided, autoscaler will -choose a default value depending on maximum number of instances -allowed.`, - }, - "cooldown_period": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Optional: true, - Description: `The number of seconds that the autoscaler should wait before it -starts collecting information from a new instance. This prevents -the autoscaler from collecting information when the instance is -initializing, during which the collected usage would not be -reliable. The default time autoscaler waits is 60 seconds. - -Virtual machine initialization times might vary because of -numerous factors. We recommend that you test how long an -instance may take to initialize. To do this, create an instance -and time the startup process.`, - Default: 60, - }, - "cpu_utilization": { - Type: resource_compute_region_autoscaler_schema.TypeList, - Computed: true, - Optional: true, - Description: `Defines the CPU utilization policy that allows the autoscaler to -scale based on the average CPU utilization of a managed instance -group.`, - MaxItems: 1, - Elem: &resource_compute_region_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "target": { - Type: resource_compute_region_autoscaler_schema.TypeFloat, - Required: true, - Description: `The target CPU utilization that the autoscaler should maintain. -Must be a float value in the range (0, 1]. If not specified, the -default is 0.6. - -If the CPU level is below the target utilization, the autoscaler -scales down the number of instances until it reaches the minimum -number of instances you specified or until the average CPU of -your instances reaches the target utilization. - -If the average CPU is above the target utilization, the autoscaler -scales up until it reaches the maximum number of instances you -specified or until the average utilization reaches the target -utilization.`, - }, - "predictive_method": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Optional: true, - Description: `Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: - -- NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. - -- OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand.`, - Default: "NONE", - }, - }, - }, - }, - "load_balancing_utilization": { - Type: resource_compute_region_autoscaler_schema.TypeList, - Optional: true, - Description: `Configuration parameters of autoscaling based on a load balancer.`, - MaxItems: 1, - Elem: &resource_compute_region_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "target": { - Type: resource_compute_region_autoscaler_schema.TypeFloat, - Required: true, - Description: `Fraction of backend capacity utilization (set in HTTP(s) load -balancing configuration) that autoscaler should maintain. Must -be a positive float value. If not defined, the default is 0.8.`, - }, - }, - }, - }, - "metric": { - Type: resource_compute_region_autoscaler_schema.TypeList, - Optional: true, - Description: `Configuration parameters of autoscaling based on a custom metric.`, - Elem: &resource_compute_region_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "name": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Required: true, - Description: `The identifier (type) of the Stackdriver Monitoring metric. -The metric cannot have negative values. - -The metric must have a value type of INT64 or DOUBLE.`, - }, - "target": { - Type: resource_compute_region_autoscaler_schema.TypeFloat, - Optional: true, - Description: `The target value of the metric that autoscaler should -maintain. This must be a positive value. A utilization -metric scales number of virtual machines handling requests -to increase or decrease proportionally to the metric. - -For example, a good metric to use as a utilizationTarget is -www.googleapis.com/compute/instance/network/received_bytes_count. -The autoscaler will work to keep this value constant for each -of the instances.`, - }, - "type": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_autoscaler_validation.StringInSlice([]string{"GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE", ""}, false), - Description: `Defines how target utilization value is expressed for a -Stackdriver Monitoring metric. Possible values: ["GAUGE", "DELTA_PER_SECOND", "DELTA_PER_MINUTE"]`, - }, - }, - }, - }, - "mode": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_autoscaler_validation.StringInSlice([]string{"OFF", "ONLY_UP", "ON", ""}, false), - Description: `Defines operating mode for this policy. Default value: "ON" Possible values: ["OFF", "ONLY_UP", "ON"]`, - Default: "ON", - }, - "scale_in_control": { - Type: resource_compute_region_autoscaler_schema.TypeList, - Optional: true, - Description: `Defines scale in controls to reduce the risk of response latency -and outages due to abrupt scale-in events`, - MaxItems: 1, - Elem: &resource_compute_region_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "max_scaled_in_replicas": { - Type: resource_compute_region_autoscaler_schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_region_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "fixed": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Optional: true, - Description: `Specifies a fixed number of VM instances. This must be a positive -integer.`, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed", "autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent"}, - }, - "percent": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Optional: true, - Description: `Specifies a percentage of instances between 0 to 100%, inclusive. -For example, specify 80 for 80%.`, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed", "autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent"}, - }, - }, - }, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas", "autoscaling_policy.0.scale_in_control.0.time_window_sec"}, - }, - "time_window_sec": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Optional: true, - Description: `How long back autoscaling should look when computing recommendations -to include directives regarding slower scale down, as described above.`, - AtLeastOneOf: []string{"autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas", "autoscaling_policy.0.scale_in_control.0.time_window_sec"}, - }, - }, - }, - }, - "scaling_schedules": { - Type: resource_compute_region_autoscaler_schema.TypeSet, - Optional: true, - Description: `Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap.`, - Elem: &resource_compute_region_autoscaler_schema.Resource{ - Schema: map[string]*resource_compute_region_autoscaler_schema.Schema{ - "name": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Required: true, - }, - "duration_sec": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Required: true, - Description: `The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300.`, - }, - "min_required_replicas": { - Type: resource_compute_region_autoscaler_schema.TypeInt, - Required: true, - Description: `Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule.`, - }, - "schedule": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Required: true, - Description: `The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field).`, - }, - "description": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Optional: true, - Description: `A description of a scaling schedule.`, - }, - "disabled": { - Type: resource_compute_region_autoscaler_schema.TypeBool, - Optional: true, - Description: `A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect.`, - Default: false, - }, - "time_zone": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Optional: true, - Description: `The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, - Default: "UTC", - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource. The name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "target": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the managed instance group that this autoscaler will scale.`, - }, - "description": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the region where the instance group resides.`, - }, - "creation_timestamp": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_autoscaler_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionAutoscalerCreate(d *resource_compute_region_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeRegionAutoscalerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeRegionAutoscalerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - autoscalingPolicyProp, err := expandComputeRegionAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(autoscalingPolicyProp)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, autoscalingPolicyProp)) { - obj["autoscalingPolicy"] = autoscalingPolicyProp - } - targetProp, err := expandComputeRegionAutoscalerTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(targetProp)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - regionProp, err := expandComputeRegionAutoscalerRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers") - if err != nil { - return err - } - - resource_compute_region_autoscaler_log.Printf("[DEBUG] Creating new RegionAutoscaler: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_autoscaler_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error creating RegionAutoscaler: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") - if err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionAutoscaler", userAgent, - d.Timeout(resource_compute_region_autoscaler_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_autoscaler_fmt.Errorf("Error waiting to create RegionAutoscaler: %s", err) - } - - resource_compute_region_autoscaler_log.Printf("[DEBUG] Finished creating RegionAutoscaler %q: %#v", d.Id(), res) - - return resourceComputeRegionAutoscalerRead(d, meta) -} - -func resourceComputeRegionAutoscalerRead(d *resource_compute_region_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_autoscaler_fmt.Sprintf("ComputeRegionAutoscaler %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRegionAutoscalerCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - if err := d.Set("name", flattenComputeRegionAutoscalerName(res["name"], d, config)); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - if err := d.Set("description", flattenComputeRegionAutoscalerDescription(res["description"], d, config)); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - if err := d.Set("autoscaling_policy", flattenComputeRegionAutoscalerAutoscalingPolicy(res["autoscalingPolicy"], d, config)); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - if err := d.Set("target", flattenComputeRegionAutoscalerTarget(res["target"], d, config)); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - if err := d.Set("region", flattenComputeRegionAutoscalerRegion(res["region"], d, config)); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error reading RegionAutoscaler: %s", err) - } - - return nil -} - -func resourceComputeRegionAutoscalerUpdate(d *resource_compute_region_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandComputeRegionAutoscalerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeRegionAutoscalerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - autoscalingPolicyProp, err := expandComputeRegionAutoscalerAutoscalingPolicy(d.Get("autoscaling_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("autoscaling_policy"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, autoscalingPolicyProp)) { - obj["autoscalingPolicy"] = autoscalingPolicyProp - } - targetProp, err := expandComputeRegionAutoscalerTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - regionProp, err := expandComputeRegionAutoscalerRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(v)) && (ok || !resource_compute_region_autoscaler_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers?autoscaler={{name}}") - if err != nil { - return err - } - - resource_compute_region_autoscaler_log.Printf("[DEBUG] Updating RegionAutoscaler %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_autoscaler_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error updating RegionAutoscaler %q: %s", d.Id(), err) - } else { - resource_compute_region_autoscaler_log.Printf("[DEBUG] Finished updating RegionAutoscaler %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionAutoscaler", userAgent, - d.Timeout(resource_compute_region_autoscaler_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRegionAutoscalerRead(d, meta) -} - -func resourceComputeRegionAutoscalerDelete(d *resource_compute_region_autoscaler_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_autoscaler_fmt.Errorf("Error fetching project for RegionAutoscaler: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_autoscaler_log.Printf("[DEBUG] Deleting RegionAutoscaler %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_autoscaler_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionAutoscaler") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionAutoscaler", userAgent, - d.Timeout(resource_compute_region_autoscaler_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_autoscaler_log.Printf("[DEBUG] Finished deleting RegionAutoscaler %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionAutoscalerImport(d *resource_compute_region_autoscaler_schema.ResourceData, meta interface{}) ([]*resource_compute_region_autoscaler_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/autoscalers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/autoscalers/{{name}}") - if err != nil { - return nil, resource_compute_region_autoscaler_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_autoscaler_schema.ResourceData{d}, nil -} - -func flattenComputeRegionAutoscalerCreationTimestamp(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerName(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerDescription(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_replicas"] = - flattenComputeRegionAutoscalerAutoscalingPolicyMinReplicas(original["minNumReplicas"], d, config) - transformed["max_replicas"] = - flattenComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(original["maxNumReplicas"], d, config) - transformed["cooldown_period"] = - flattenComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(original["coolDownPeriodSec"], d, config) - transformed["mode"] = - flattenComputeRegionAutoscalerAutoscalingPolicyMode(original["mode"], d, config) - transformed["scale_in_control"] = - flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControl(original["scaleInControl"], d, config) - transformed["cpu_utilization"] = - flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(original["cpuUtilization"], d, config) - transformed["metric"] = - flattenComputeRegionAutoscalerAutoscalingPolicyMetric(original["customMetricUtilizations"], d, config) - transformed["load_balancing_utilization"] = - flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["loadBalancingUtilization"], d, config) - transformed["scaling_schedules"] = - flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(original["scalingSchedules"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyMode(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_scaled_in_replicas"] = - flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(original["maxScaledInReplicas"], d, config) - transformed["time_window_sec"] = - flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["timeWindowSec"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed"] = - flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(original["fixed"], d, config) - transformed["percent"] = - flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target"] = - flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["utilizationTarget"], d, config) - transformed["predictive_method"] = - flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictiveMethod"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_region_autoscaler_reflect.ValueOf(v)) { - return "NONE" - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(original["metric"], d, config), - "target": flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["utilizationTarget"], d, config), - "type": flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(original["utilizationTargetType"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target"] = - flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["utilizationTarget"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "name": k, - "min_required_replicas": flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(original["minRequiredReplicas"], d, config), - "schedule": flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(original["schedule"], d, config), - "time_zone": flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(original["timeZone"], d, config), - "duration_sec": flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(original["durationSec"], d, config), - "disabled": flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(original["disabled"], d, config), - "description": flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(original["description"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_autoscaler_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerTarget(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionAutoscalerRegion(v interface{}, d *resource_compute_region_autoscaler_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeRegionAutoscalerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyMinReplicas(original["min_replicas"], d, config) - if err != nil { - return nil, err - } else { - transformed["minNumReplicas"] = transformedMinReplicas - } - - transformedMaxReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(original["max_replicas"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedMaxReplicas); val.IsValid() && !isEmptyValue(val) { - transformed["maxNumReplicas"] = transformedMaxReplicas - } - - transformedCooldownPeriod, err := expandComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(original["cooldown_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["coolDownPeriodSec"] = transformedCooldownPeriod - } - - transformedMode, err := expandComputeRegionAutoscalerAutoscalingPolicyMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - transformedScaleInControl, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControl(original["scale_in_control"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedScaleInControl); val.IsValid() && !isEmptyValue(val) { - transformed["scaleInControl"] = transformedScaleInControl - } - - transformedCpuUtilization, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(original["cpu_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedCpuUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["cpuUtilization"] = transformedCpuUtilization - } - - transformedMetric, err := expandComputeRegionAutoscalerAutoscalingPolicyMetric(original["metric"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedMetric); val.IsValid() && !isEmptyValue(val) { - transformed["customMetricUtilizations"] = transformedMetric - } - - transformedLoadBalancingUtilization, err := expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(original["load_balancing_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedLoadBalancingUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["loadBalancingUtilization"] = transformedLoadBalancingUtilization - } - - transformedScalingSchedules, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(original["scaling_schedules"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedScalingSchedules); val.IsValid() && !isEmptyValue(val) { - transformed["scalingSchedules"] = transformedScalingSchedules - } - - return transformed, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyMinReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyMaxReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyCooldownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxScaledInReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(original["max_scaled_in_replicas"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedMaxScaledInReplicas); val.IsValid() && !isEmptyValue(val) { - transformed["maxScaledInReplicas"] = transformedMaxScaledInReplicas - } - - transformedTimeWindowSec, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(original["time_window_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedTimeWindowSec); val.IsValid() && !isEmptyValue(val) { - transformed["timeWindowSec"] = transformedTimeWindowSec - } - - return transformed, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixed, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(original["fixed"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedFixed); val.IsValid() && !isEmptyValue(val) { - transformed["fixed"] = transformedFixed - } - - transformedPercent, err := expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - return transformed, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasFixed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlMaxScaledInReplicasPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScaleInControlTimeWindowSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(original["target"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTarget"] = transformedTarget - } - - transformedPredictiveMethod, err := expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(original["predictive_method"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedPredictiveMethod); val.IsValid() && !isEmptyValue(val) { - transformed["predictiveMethod"] = transformedPredictiveMethod - } - - return transformed, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyCpuUtilizationPredictiveMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyMetric(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["metric"] = transformedName - } - - transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(original["target"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTarget"] = transformedTarget - } - - transformedType, err := expandComputeRegionAutoscalerAutoscalingPolicyMetricType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTargetType"] = transformedType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyMetricName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyMetricTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyMetricType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTarget, err := expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(original["target"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedTarget); val.IsValid() && !isEmptyValue(val) { - transformed["utilizationTarget"] = transformedTarget - } - - return transformed, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_compute_region_autoscaler_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinRequiredReplicas, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(original["min_required_replicas"], d, config) - if err != nil { - return nil, err - } else { - transformed["minRequiredReplicas"] = transformedMinRequiredReplicas - } - - transformedSchedule, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(original["schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["schedule"] = transformedSchedule - } - - transformedTimeZone, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(original["time_zone"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { - transformed["timeZone"] = transformedTimeZone - } - - transformedDurationSec, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(original["duration_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedDurationSec); val.IsValid() && !isEmptyValue(val) { - transformed["durationSec"] = transformedDurationSec - } - - transformedDisabled, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - transformedDescription, err := expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_autoscaler_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedName, err := expandString(original["name"], d, config) - if err != nil { - return nil, err - } - m[transformedName] = transformed - } - return m, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesMinRequiredReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDurationSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerAutoscalingPolicyScalingSchedulesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionAutoscalerRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_autoscaler_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -var backendServiceOnlyManagedFieldNames = []string{ - "capacity_scaler", - "max_connections", - "max_connections_per_instance", - "max_connections_per_endpoint", - "max_rate", - "max_rate_per_instance", - "max_rate_per_endpoint", - "max_utilization", -} - -func validateManagedBackendServiceBackends(backends []interface{}, d *resource_compute_region_backend_service_schema.ResourceDiff) error { - sum := 0.0 - - for _, b := range backends { - if b == nil { - continue - } - backend := b.(map[string]interface{}) - if v, ok := backend["capacity_scaler"]; ok && v != nil { - sum += v.(float64) - } else { - return resource_compute_region_backend_service_fmt.Errorf("capacity_scaler is required for each backend in managed backend service") - } - } - if sum == 0.0 { - return resource_compute_region_backend_service_fmt.Errorf("managed backend service must have at least one non-zero capacity_scaler for backends") - } - return nil -} - -func validateNonManagedBackendServiceBackends(backends []interface{}, d *resource_compute_region_backend_service_schema.ResourceDiff) error { - for _, b := range backends { - if b == nil { - continue - } - backend := b.(map[string]interface{}) - for _, fn := range backendServiceOnlyManagedFieldNames { - if v, ok := backend[fn]; ok && !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) { - return resource_compute_region_backend_service_fmt.Errorf("%q cannot be set for non-managed backend service, found value %v", fn, v) - } - } - } - return nil -} - -func customDiffRegionBackendService(_ resource_compute_region_backend_service_context.Context, d *resource_compute_region_backend_service_schema.ResourceDiff, meta interface{}) error { - v, ok := d.GetOk("backend") - if !ok { - return nil - } - if v == nil { - return nil - } - - backends := v.(*resource_compute_region_backend_service_schema.Set).List() - if len(backends) == 0 { - return nil - } - - switch d.Get("load_balancing_scheme").(string) { - case "INTERNAL", "EXTERNAL": - return validateNonManagedBackendServiceBackends(backends, d) - default: - return validateManagedBackendServiceBackends(backends, d) - } -} - -func resourceComputeRegionBackendService() *resource_compute_region_backend_service_schema.Resource { - return &resource_compute_region_backend_service_schema.Resource{ - Create: resourceComputeRegionBackendServiceCreate, - Read: resourceComputeRegionBackendServiceRead, - Update: resourceComputeRegionBackendServiceUpdate, - Delete: resourceComputeRegionBackendServiceDelete, - - Importer: &resource_compute_region_backend_service_schema.ResourceImporter{ - State: resourceComputeRegionBackendServiceImport, - }, - - Timeouts: &resource_compute_region_backend_service_schema.ResourceTimeout{ - Create: resource_compute_region_backend_service_schema.DefaultTimeout(4 * resource_compute_region_backend_service_time.Minute), - Update: resource_compute_region_backend_service_schema.DefaultTimeout(4 * resource_compute_region_backend_service_time.Minute), - Delete: resource_compute_region_backend_service_schema.DefaultTimeout(4 * resource_compute_region_backend_service_time.Minute), - }, - - SchemaVersion: 1, - MigrateState: migrateStateNoop, - CustomizeDiff: customDiffRegionBackendService, - - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "name": { - Type: resource_compute_region_backend_service_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "affinity_cookie_ttl_sec": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Lifetime of cookies in seconds if session_affinity is -GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts -only until the end of the browser session (or equivalent). The -maximum allowed value for TTL is one day. - -When the load balancing scheme is INTERNAL, this field is not used.`, - }, - "backend": { - Type: resource_compute_region_backend_service_schema.TypeSet, - Optional: true, - Description: `The set of backends that serve this RegionBackendService.`, - Elem: computeRegionBackendServiceBackendSchema(), - Set: resourceGoogleComputeBackendServiceBackendHash, - }, - "cdn_policy": { - Type: resource_compute_region_backend_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `Cloud CDN configuration for this BackendService.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "cache_key_policy": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `The CacheKeyPolicy for this CdnPolicy.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "include_host": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Optional: true, - Description: `If true requests to different hosts will be cached separately.`, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "include_protocol": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Optional: true, - Description: `If true, http and https requests will be cached separately.`, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "include_query_string": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Optional: true, - Description: `If true, include query string parameters in the cache key -according to query_string_whitelist and -query_string_blacklist. If neither is set, the entire query -string will be included. - -If false, the query string will be excluded from the cache -key entirely.`, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "query_string_blacklist": { - Type: resource_compute_region_backend_service_schema.TypeSet, - Optional: true, - Description: `Names of query string parameters to exclude in cache keys. - -All other parameters will be included. Either specify -query_string_whitelist or query_string_blacklist, not both. -'&' and '=' will be percent encoded and not treated as -delimiters.`, - Elem: &resource_compute_region_backend_service_schema.Schema{ - Type: resource_compute_region_backend_service_schema.TypeString, - }, - Set: resource_compute_region_backend_service_schema.HashString, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - "query_string_whitelist": { - Type: resource_compute_region_backend_service_schema.TypeSet, - Optional: true, - Description: `Names of query string parameters to include in cache keys. - -All other parameters will be excluded. Either specify -query_string_whitelist or query_string_blacklist, not both. -'&' and '=' will be percent encoded and not treated as -delimiters.`, - Elem: &resource_compute_region_backend_service_schema.Schema{ - Type: resource_compute_region_backend_service_schema.TypeString, - }, - Set: resource_compute_region_backend_service_schema.HashString, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy.0.include_host", "cdn_policy.0.cache_key_policy.0.include_protocol", "cdn_policy.0.cache_key_policy.0.include_query_string", "cdn_policy.0.cache_key_policy.0.query_string_blacklist", "cdn_policy.0.cache_key_policy.0.query_string_whitelist"}, - }, - }, - }, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy", "cdn_policy.0.signed_url_cache_max_age_sec"}, - }, - "cache_mode": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_region_backend_service_validation.StringInSlice([]string{"USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC", ""}, false), - Description: `Specifies the cache setting for all responses from this backend. -The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC Possible values: ["USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "CACHE_ALL_STATIC"]`, - }, - "client_ttl": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "default_ttl": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the default TTL for cached content served by this origin for responses -that do not have an existing valid TTL (max-age or s-max-age).`, - }, - "max_ttl": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin.`, - }, - "negative_caching": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects.`, - }, - "negative_caching_policy": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. -Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs.`, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "code": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 -can be specified as values, and you cannot specify a status code more than once.`, - }, - }, - }, - }, - "serve_while_stale": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache.`, - }, - "signed_url_cache_max_age_sec": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Maximum number of seconds the response to a signed URL request -will be considered fresh, defaults to 1hr (3600s). After this -time period, the response will be revalidated before -being served. - -When serving responses to signed URL requests, Cloud CDN will -internally behave as though all responses from this backend had a -"Cache-Control: public, max-age=[TTL]" header, regardless of any -existing Cache-Control header. The actual headers served in -responses will not be altered.`, - Default: 3600, - AtLeastOneOf: []string{"cdn_policy.0.cache_key_policy", "cdn_policy.0.signed_url_cache_max_age_sec"}, - }, - }, - }, - }, - "circuit_breakers": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Settings controlling the volume of connections to a backend service. This field -is applicable only when the 'load_balancing_scheme' is set to INTERNAL_MANAGED -and the 'protocol' is set to HTTP, HTTPS, or HTTP2.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "max_connections": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of connections to the backend cluster. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_pending_requests": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of pending requests to the backend cluster. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_requests": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of parallel requests to the backend cluster. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_requests_per_connection": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Maximum requests for a single backend connection. This parameter -is respected by both the HTTP/1.1 and HTTP/2 implementations. If -not specified, there is no limit. Setting this parameter to 1 -will effectively disable keep alive.`, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - "max_retries": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The maximum number of parallel retries to the backend cluster. -Defaults to 3.`, - Default: 3, - AtLeastOneOf: []string{"circuit_breakers.0.max_requests_per_connection", "circuit_breakers.0.max_connections", "circuit_breakers.0.max_pending_requests", "circuit_breakers.0.max_requests", "circuit_breakers.0.max_retries"}, - }, - }, - }, - }, - "connection_draining_timeout_sec": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Time for which instance will be drained (not accept new -connections, but still work to finish started).`, - Default: 0, - }, - - "consistent_hash": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Consistent Hash-based load balancing can be used to provide soft session -affinity based on HTTP headers, cookies or other properties. This load balancing -policy is applicable only for HTTP connections. The affinity to a particular -destination host will be lost when one or more hosts are added/removed from the -destination service. This field specifies parameters that control consistent -hashing. -This field only applies when all of the following are true - - * 'load_balancing_scheme' is set to INTERNAL_MANAGED - * 'protocol' is set to HTTP, HTTPS, or HTTP2 - * 'locality_lb_policy' is set to MAGLEV or RING_HASH`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "http_cookie": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Hash is based on HTTP Cookie. This field describes a HTTP cookie -that will be used as the hash key for the consistent hash load -balancer. If the cookie is not present, it will be generated. -This field is applicable if the sessionAffinity is set to HTTP_COOKIE.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "name": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - Description: `Name of the cookie.`, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie.0.ttl", "consistent_hash.0.http_cookie.0.name", "consistent_hash.0.http_cookie.0.path"}, - }, - "path": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - Description: `Path to set for the cookie.`, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie.0.ttl", "consistent_hash.0.http_cookie.0.name", "consistent_hash.0.http_cookie.0.path"}, - }, - "ttl": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Lifetime of the cookie.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "seconds": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Required: true, - Description: `Span of time at a resolution of a second. -Must be from 0 to 315,576,000,000 inclusive.`, - }, - "nanos": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond -resolution. Durations less than one second are represented -with a 0 seconds field and a positive nanos field. Must -be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie.0.ttl", "consistent_hash.0.http_cookie.0.name", "consistent_hash.0.http_cookie.0.path"}, - }, - }, - }, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie", "consistent_hash.0.http_header_name", "consistent_hash.0.minimum_ring_size"}, - }, - "http_header_name": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - Description: `The hash based on the value of the specified header field. -This field is applicable if the sessionAffinity is set to HEADER_FIELD.`, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie", "consistent_hash.0.http_header_name", "consistent_hash.0.minimum_ring_size"}, - }, - "minimum_ring_size": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The minimum number of virtual nodes to use for the hash ring. -Larger ring sizes result in more granular load -distributions. If the number of hosts in the load balancing pool -is larger than the ring size, each host will be assigned a single -virtual node. -Defaults to 1024.`, - Default: 1024, - AtLeastOneOf: []string{"consistent_hash.0.http_cookie", "consistent_hash.0.http_header_name", "consistent_hash.0.minimum_ring_size"}, - }, - }, - }, - }, - "description": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "enable_cdn": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Optional: true, - Description: `If true, enable Cloud CDN for this RegionBackendService.`, - }, - "failover_policy": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Policy for failovers.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "disable_connection_drain_on_failover": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Optional: true, - Description: `On failover or failback, this field indicates whether connection drain -will be honored. Setting this to true has the following effect: connections -to the old active pool are not drained. Connections to the new active pool -use the timeout of 10 min (currently fixed). Setting to false has the -following effect: both old and new connections will have a drain timeout -of 10 min. -This can be set to true only if the protocol is TCP. -The default is false.`, - AtLeastOneOf: []string{"failover_policy.0.disable_connection_drain_on_failover", "failover_policy.0.drop_traffic_if_unhealthy", "failover_policy.0.failover_ratio"}, - }, - "drop_traffic_if_unhealthy": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Optional: true, - Description: `This option is used only when no healthy VMs are detected in the primary -and backup instance groups. When set to true, traffic is dropped. When -set to false, new connections are sent across all VMs in the primary group. -The default is false.`, - AtLeastOneOf: []string{"failover_policy.0.disable_connection_drain_on_failover", "failover_policy.0.drop_traffic_if_unhealthy", "failover_policy.0.failover_ratio"}, - }, - "failover_ratio": { - Type: resource_compute_region_backend_service_schema.TypeFloat, - Optional: true, - Description: `The value of the field must be in [0, 1]. If the ratio of the healthy -VMs in the primary backend is at or below this number, traffic arriving -at the load-balanced IP will be directed to the failover backend. -In case where 'failoverRatio' is not set or all the VMs in the backup -backend are unhealthy, the traffic will be directed back to the primary -backend in the "force" mode, where traffic will be spread to the healthy -VMs with the best effort, or to all VMs when no VM is healthy. -This field is only used with l4 load balancing.`, - AtLeastOneOf: []string{"failover_policy.0.disable_connection_drain_on_failover", "failover_policy.0.drop_traffic_if_unhealthy", "failover_policy.0.failover_ratio"}, - }, - }, - }, - }, - "health_checks": { - Type: resource_compute_region_backend_service_schema.TypeSet, - Optional: true, - Description: `The set of URLs to HealthCheck resources for health checking -this RegionBackendService. Currently at most one health -check can be specified. - -A health check must be specified unless the backend service uses an internet -or serverless NEG as a backend.`, - MinItems: 1, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Schema{ - Type: resource_compute_region_backend_service_schema.TypeString, - }, - Set: selfLinkRelativePathHash, - }, - "iap": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Settings for enabling Cloud Identity Aware Proxy`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "oauth2_client_id": { - Type: resource_compute_region_backend_service_schema.TypeString, - Required: true, - Description: `OAuth2 Client ID for IAP`, - }, - "oauth2_client_secret": { - Type: resource_compute_region_backend_service_schema.TypeString, - Required: true, - Description: `OAuth2 Client Secret for IAP`, - Sensitive: true, - }, - "oauth2_client_secret_sha256": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Description: `OAuth2 Client Secret SHA-256 for IAP`, - Sensitive: true, - }, - }, - }, - }, - "load_balancing_scheme": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_region_backend_service_validation.StringInSlice([]string{"EXTERNAL", "INTERNAL", "INTERNAL_MANAGED", ""}, false), - Description: `Indicates what kind of load balancing this regional backend service -will be used for. A backend service created for one type of load -balancing cannot be used with the other(s). Default value: "INTERNAL" Possible values: ["EXTERNAL", "INTERNAL", "INTERNAL_MANAGED"]`, - Default: "INTERNAL", - }, - "locality_lb_policy": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_backend_service_validation.StringInSlice([]string{"ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV", ""}, false), - Description: `The load balancing algorithm used within the scope of the locality. -The possible values are - - -* ROUND_ROBIN - This is a simple policy in which each healthy backend - is selected in round robin order. - -* LEAST_REQUEST - An O(1) algorithm which selects two random healthy - hosts and picks the host which has fewer active requests. - -* RING_HASH - The ring/modulo hash load balancer implements consistent - hashing to backends. The algorithm has the property that the - addition/removal of a host from a set of N hosts only affects - 1/N of the requests. - -* RANDOM - The load balancer selects a random healthy host. - -* ORIGINAL_DESTINATION - Backend host is selected based on the client - connection metadata, i.e., connections are opened - to the same address as the destination address of - the incoming connection before the connection - was redirected to the load balancer. - -* MAGLEV - used as a drop in replacement for the ring hash load balancer. - Maglev is not as stable as ring hash but has faster table lookup - build times and host selection times. For more information about - Maglev, refer to https://ai.google/research/pubs/pub44824 - -This field is applicable only when the 'load_balancing_scheme' is set to -INTERNAL_MANAGED and the 'protocol' is set to HTTP, HTTPS, or HTTP2. Possible values: ["ROUND_ROBIN", "LEAST_REQUEST", "RING_HASH", "RANDOM", "ORIGINAL_DESTINATION", "MAGLEV"]`, - }, - "log_config": { - Type: resource_compute_region_backend_service_schema.TypeList, - Computed: true, - Optional: true, - Description: `This field denotes the logging options for the load balancer traffic served by this backend service. -If logging is enabled, logs will be exported to Stackdriver.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "enable": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Optional: true, - Description: `Whether to enable logging for the load balancer traffic served by this backend service.`, - AtLeastOneOf: []string{"log_config.0.enable", "log_config.0.sample_rate"}, - }, - "sample_rate": { - Type: resource_compute_region_backend_service_schema.TypeFloat, - Optional: true, - DiffSuppressFunc: suppressWhenDisabled, - Description: `This field can only be specified if logging is enabled for this backend service. The value of -the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer -where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. -The default value is 1.0.`, - AtLeastOneOf: []string{"log_config.0.enable", "log_config.0.sample_rate"}, - }, - }, - }, - }, - "network": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of the network to which this backend service belongs. -This field can only be specified when the load balancing scheme is set to INTERNAL.`, - }, - "outlier_detection": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Settings controlling eviction of unhealthy hosts from the load balancing pool. -This field is applicable only when the 'load_balancing_scheme' is set -to INTERNAL_MANAGED and the 'protocol' is set to HTTP, HTTPS, or HTTP2.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "base_ejection_time": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `The base time that a host is ejected for. The real time is equal to the base -time multiplied by the number of times the host has been ejected. Defaults to -30000ms or 30s.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "seconds": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "consecutive_errors": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Number of errors before a host is ejected from the connection pool. When the -backend host is accessed over HTTP, a 5xx return code qualifies as an error. -Defaults to 5.`, - Default: 5, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "consecutive_gateway_failure": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The number of consecutive gateway failures (502, 503, 504 status or connection -errors that are mapped to one of those status codes) before a consecutive -gateway failure ejection occurs. Defaults to 5.`, - Default: 5, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "enforcing_consecutive_errors": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The percentage chance that a host will be actually ejected when an outlier -status is detected through consecutive 5xx. This setting can be used to disable -ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "enforcing_consecutive_gateway_failure": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The percentage chance that a host will be actually ejected when an outlier -status is detected through consecutive gateway failures. This setting can be -used to disable ejection or to ramp it up slowly. Defaults to 0.`, - Default: 0, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "enforcing_success_rate": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The percentage chance that a host will be actually ejected when an outlier -status is detected through success rate statistics. This setting can be used to -disable ejection or to ramp it up slowly. Defaults to 100.`, - Default: 100, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "interval": { - Type: resource_compute_region_backend_service_schema.TypeList, - Optional: true, - Description: `Time interval between ejection sweep analysis. This can result in both new -ejections as well as hosts being returned to service. Defaults to 10 seconds.`, - MaxItems: 1, - Elem: &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "seconds": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "max_ejection_percent": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `Maximum percentage of hosts in the load balancing pool for the backend service -that can be ejected. Defaults to 10%.`, - Default: 10, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "success_rate_minimum_hosts": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The number of hosts in a cluster that must have enough request volume to detect -success rate outliers. If the number of hosts is less than this setting, outlier -detection via success rate statistics is not performed for any host in the -cluster. Defaults to 5.`, - Default: 5, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "success_rate_request_volume": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The minimum number of total requests that must be collected in one interval (as -defined by the interval duration above) to include this host in success rate -based outlier detection. If the volume is lower than this setting, outlier -detection via success rate statistics is not performed for that host. Defaults -to 100.`, - Default: 100, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - "success_rate_stdev_factor": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `This factor is used to determine the ejection threshold for success rate outlier -ejection. The ejection threshold is the difference between the mean success -rate, and the product of this factor and the standard deviation of the mean -success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided -by a thousand to get a double. That is, if the desired factor is 1.9, the -runtime value should be 1900. Defaults to 1900.`, - Default: 1900, - AtLeastOneOf: []string{"outlier_detection.0.base_ejection_time", "outlier_detection.0.consecutive_errors", "outlier_detection.0.consecutive_gateway_failure", "outlier_detection.0.enforcing_consecutive_errors", "outlier_detection.0.enforcing_consecutive_gateway_failure", "outlier_detection.0.enforcing_success_rate", "outlier_detection.0.interval", "outlier_detection.0.max_ejection_percent", "outlier_detection.0.success_rate_minimum_hosts", "outlier_detection.0.success_rate_request_volume", "outlier_detection.0.success_rate_stdev_factor"}, - }, - }, - }, - }, - "port_name": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `A named port on a backend instance group representing the port for -communication to the backend VMs in that group. Required when the -loadBalancingScheme is EXTERNAL, INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED -and the backends are instance groups. The named port must be defined on each -backend instance group. This parameter has no meaning if the backends are NEGs. API sets a -default of "http" if not given. -Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing).`, - }, - "protocol": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_region_backend_service_validation.StringInSlice([]string{"HTTP", "HTTPS", "HTTP2", "SSL", "TCP", "UDP", "GRPC", "UNSPECIFIED", ""}, false), - Description: `The protocol this RegionBackendService uses to communicate with backends. -The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer -types and may result in errors if used with the GA API. Possible values: ["HTTP", "HTTPS", "HTTP2", "SSL", "TCP", "UDP", "GRPC", "UNSPECIFIED"]`, - }, - "region": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created backend service should reside. -If it is not provided, the provider region is used.`, - }, - "session_affinity": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_region_backend_service_validation.StringInSlice([]string{"NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION", ""}, false), - Description: `Type of session affinity to use. The default is NONE. Session affinity is -not applicable if the protocol is UDP. Possible values: ["NONE", "CLIENT_IP", "CLIENT_IP_PORT_PROTO", "CLIENT_IP_PROTO", "GENERATED_COOKIE", "HEADER_FIELD", "HTTP_COOKIE", "CLIENT_IP_NO_DESTINATION"]`, - }, - "timeout_sec": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Computed: true, - Optional: true, - Description: `How many seconds to wait for the backend before considering it a -failed request. Default is 30 seconds. Valid range is [1, 86400].`, - }, - "creation_timestamp": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "fingerprint": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. A hash of the contents stored in this -object. This field is used in optimistic locking.`, - }, - "project": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_backend_service_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeRegionBackendServiceBackendSchema() *resource_compute_region_backend_service_schema.Resource { - return &resource_compute_region_backend_service_schema.Resource{ - Schema: map[string]*resource_compute_region_backend_service_schema.Schema{ - "group": { - Type: resource_compute_region_backend_service_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The fully-qualified URL of an Instance Group or Network Endpoint -Group resource. In case of instance group this defines the list -of instances that serve traffic. Member virtual machine -instances from each instance group must live in the same zone as -the instance group itself. No two backends in a backend service -are allowed to use same Instance Group resource. - -For Network Endpoint Groups this defines list of endpoints. All -endpoints of Network Endpoint Group must be hosted on instances -located in the same zone as the Network Endpoint Group. - -Backend services cannot mix Instance Group and -Network Endpoint Group backends. - -When the 'load_balancing_scheme' is INTERNAL, only instance groups -are supported. - -Note that you must specify an Instance Group or Network Endpoint -Group resource using the fully-qualified URL, rather than a -partial URL.`, - }, - "balancing_mode": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_backend_service_validation.StringInSlice([]string{"UTILIZATION", "RATE", "CONNECTION", ""}, false), - Description: `Specifies the balancing mode for this backend. Default value: "CONNECTION" Possible values: ["UTILIZATION", "RATE", "CONNECTION"]`, - Default: "CONNECTION", - }, - "capacity_scaler": { - Type: resource_compute_region_backend_service_schema.TypeFloat, - Optional: true, - Description: `A multiplier applied to the group's maximum servicing capacity -(based on UTILIZATION, RATE or CONNECTION). - -~>**NOTE**: This field cannot be set for -INTERNAL region backend services (default loadBalancingScheme), -but is required for non-INTERNAL backend service. The total -capacity_scaler for all backends must be non-zero. - -A setting of 0 means the group is completely drained, offering -0% of its available Capacity. Valid range is [0.0,1.0].`, - }, - "description": { - Type: resource_compute_region_backend_service_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. -Provide this property when you create the resource.`, - }, - "failover": { - Type: resource_compute_region_backend_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `This field designates whether this is a failover backend. More -than one failover backend can be configured for a given RegionBackendService.`, - }, - "max_connections": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The max number of simultaneous connections for the group. Can -be used with either CONNECTION or UTILIZATION balancing modes. -Cannot be set for INTERNAL backend services. - -For CONNECTION mode, either maxConnections or one -of maxConnectionsPerInstance or maxConnectionsPerEndpoint, -as appropriate for group type, must be set.`, - }, - "max_connections_per_endpoint": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The max number of simultaneous connections that a single backend -network endpoint can handle. Cannot be set -for INTERNAL backend services. - -This is used to calculate the capacity of the group. Can be -used in either CONNECTION or UTILIZATION balancing modes. For -CONNECTION mode, either maxConnections or -maxConnectionsPerEndpoint must be set.`, - }, - "max_connections_per_instance": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The max number of simultaneous connections that a single -backend instance can handle. Cannot be set for INTERNAL backend -services. - -This is used to calculate the capacity of the group. -Can be used in either CONNECTION or UTILIZATION balancing modes. -For CONNECTION mode, either maxConnections or -maxConnectionsPerInstance must be set.`, - }, - "max_rate": { - Type: resource_compute_region_backend_service_schema.TypeInt, - Optional: true, - Description: `The max requests per second (RPS) of the group. Cannot be set -for INTERNAL backend services. - -Can be used with either RATE or UTILIZATION balancing modes, -but required if RATE mode. Either maxRate or one -of maxRatePerInstance or maxRatePerEndpoint, as appropriate for -group type, must be set.`, - }, - "max_rate_per_endpoint": { - Type: resource_compute_region_backend_service_schema.TypeFloat, - Optional: true, - Description: `The max requests per second (RPS) that a single backend network -endpoint can handle. This is used to calculate the capacity of -the group. Can be used in either balancing mode. For RATE mode, -either maxRate or maxRatePerEndpoint must be set. Cannot be set -for INTERNAL backend services.`, - }, - "max_rate_per_instance": { - Type: resource_compute_region_backend_service_schema.TypeFloat, - Optional: true, - Description: `The max requests per second (RPS) that a single backend -instance can handle. This is used to calculate the capacity of -the group. Can be used in either balancing mode. For RATE mode, -either maxRate or maxRatePerInstance must be set. Cannot be set -for INTERNAL backend services.`, - }, - "max_utilization": { - Type: resource_compute_region_backend_service_schema.TypeFloat, - Optional: true, - Description: `Used when balancingMode is UTILIZATION. This ratio defines the -CPU utilization target for the group. Valid range is [0.0, 1.0]. -Cannot be set for INTERNAL backend services.`, - }, - }, - } -} - -func resourceComputeRegionBackendServiceCreate(d *resource_compute_region_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - affinityCookieTtlSecProp, err := expandComputeRegionBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(affinityCookieTtlSecProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, affinityCookieTtlSecProp)) { - obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp - } - backendsProp, err := expandComputeRegionBackendServiceBackend(d.Get("backend"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(backendsProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, backendsProp)) { - obj["backends"] = backendsProp - } - circuitBreakersProp, err := expandComputeRegionBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(circuitBreakersProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, circuitBreakersProp)) { - obj["circuitBreakers"] = circuitBreakersProp - } - consistentHashProp, err := expandComputeRegionBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(consistentHashProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, consistentHashProp)) { - obj["consistentHash"] = consistentHashProp - } - cdnPolicyProp, err := expandComputeRegionBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(cdnPolicyProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - connectionDrainingProp, err := expandComputeRegionBackendServiceConnectionDraining(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(connectionDrainingProp)) { - obj["connectionDraining"] = connectionDrainingProp - } - descriptionProp, err := expandComputeRegionBackendServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - failoverPolicyProp, err := expandComputeRegionBackendServiceFailoverPolicy(d.Get("failover_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("failover_policy"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(failoverPolicyProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, failoverPolicyProp)) { - obj["failoverPolicy"] = failoverPolicyProp - } - enableCDNProp, err := expandComputeRegionBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(enableCDNProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, enableCDNProp)) { - obj["enableCDN"] = enableCDNProp - } - fingerprintProp, err := expandComputeRegionBackendServiceFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(fingerprintProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - healthChecksProp, err := expandComputeRegionBackendServiceHealthChecks(d.Get("health_checks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(healthChecksProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, healthChecksProp)) { - obj["healthChecks"] = healthChecksProp - } - iapProp, err := expandComputeRegionBackendServiceIap(d.Get("iap"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("iap"); ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, iapProp) { - obj["iap"] = iapProp - } - loadBalancingSchemeProp, err := expandComputeRegionBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(loadBalancingSchemeProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, loadBalancingSchemeProp)) { - obj["loadBalancingScheme"] = loadBalancingSchemeProp - } - localityLbPolicyProp, err := expandComputeRegionBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(localityLbPolicyProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, localityLbPolicyProp)) { - obj["localityLbPolicy"] = localityLbPolicyProp - } - nameProp, err := expandComputeRegionBackendServiceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - outlierDetectionProp, err := expandComputeRegionBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(outlierDetectionProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, outlierDetectionProp)) { - obj["outlierDetection"] = outlierDetectionProp - } - portNameProp, err := expandComputeRegionBackendServicePortName(d.Get("port_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(portNameProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, portNameProp)) { - obj["portName"] = portNameProp - } - protocolProp, err := expandComputeRegionBackendServiceProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(protocolProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - sessionAffinityProp, err := expandComputeRegionBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(sessionAffinityProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, sessionAffinityProp)) { - obj["sessionAffinity"] = sessionAffinityProp - } - timeoutSecProp, err := expandComputeRegionBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(timeoutSecProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - logConfigProp, err := expandComputeRegionBackendServiceLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(logConfigProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - networkProp, err := expandComputeRegionBackendServiceNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(networkProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - regionProp, err := expandComputeRegionBackendServiceRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - obj, err = resourceComputeRegionBackendServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices") - if err != nil { - return err - } - - resource_compute_region_backend_service_log.Printf("[DEBUG] Creating new RegionBackendService: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error fetching project for RegionBackendService: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_backend_service_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error creating RegionBackendService: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") - if err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionBackendService", userAgent, - d.Timeout(resource_compute_region_backend_service_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_backend_service_fmt.Errorf("Error waiting to create RegionBackendService: %s", err) - } - - resource_compute_region_backend_service_log.Printf("[DEBUG] Finished creating RegionBackendService %q: %#v", d.Id(), res) - - return resourceComputeRegionBackendServiceRead(d, meta) -} - -func resourceComputeRegionBackendServiceRead(d *resource_compute_region_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error fetching project for RegionBackendService: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_backend_service_fmt.Sprintf("ComputeRegionBackendService %q", d.Id())) - } - - res, err = resourceComputeRegionBackendServiceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_region_backend_service_log.Printf("[DEBUG] Removing ComputeRegionBackendService because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - - if err := d.Set("affinity_cookie_ttl_sec", flattenComputeRegionBackendServiceAffinityCookieTtlSec(res["affinityCookieTtlSec"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("backend", flattenComputeRegionBackendServiceBackend(res["backends"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("circuit_breakers", flattenComputeRegionBackendServiceCircuitBreakers(res["circuitBreakers"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("consistent_hash", flattenComputeRegionBackendServiceConsistentHash(res["consistentHash"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("cdn_policy", flattenComputeRegionBackendServiceCdnPolicy(res["cdnPolicy"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - - if flattenedProp := flattenComputeRegionBackendServiceConnectionDraining(res["connectionDraining"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_compute_region_backend_service_googleapi.Error); ok { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("creation_timestamp", flattenComputeRegionBackendServiceCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("description", flattenComputeRegionBackendServiceDescription(res["description"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("failover_policy", flattenComputeRegionBackendServiceFailoverPolicy(res["failoverPolicy"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("enable_cdn", flattenComputeRegionBackendServiceEnableCDN(res["enableCDN"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("fingerprint", flattenComputeRegionBackendServiceFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("health_checks", flattenComputeRegionBackendServiceHealthChecks(res["healthChecks"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("iap", flattenComputeRegionBackendServiceIap(res["iap"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("load_balancing_scheme", flattenComputeRegionBackendServiceLoadBalancingScheme(res["loadBalancingScheme"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("locality_lb_policy", flattenComputeRegionBackendServiceLocalityLbPolicy(res["localityLbPolicy"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("name", flattenComputeRegionBackendServiceName(res["name"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("outlier_detection", flattenComputeRegionBackendServiceOutlierDetection(res["outlierDetection"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("port_name", flattenComputeRegionBackendServicePortName(res["portName"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("protocol", flattenComputeRegionBackendServiceProtocol(res["protocol"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("session_affinity", flattenComputeRegionBackendServiceSessionAffinity(res["sessionAffinity"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeRegionBackendServiceTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("log_config", flattenComputeRegionBackendServiceLogConfig(res["logConfig"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("network", flattenComputeRegionBackendServiceNetwork(res["network"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("region", flattenComputeRegionBackendServiceRegion(res["region"], d, config)); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error reading RegionBackendService: %s", err) - } - - return nil -} - -func resourceComputeRegionBackendServiceUpdate(d *resource_compute_region_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error fetching project for RegionBackendService: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - affinityCookieTtlSecProp, err := expandComputeRegionBackendServiceAffinityCookieTtlSec(d.Get("affinity_cookie_ttl_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("affinity_cookie_ttl_sec"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, affinityCookieTtlSecProp)) { - obj["affinityCookieTtlSec"] = affinityCookieTtlSecProp - } - backendsProp, err := expandComputeRegionBackendServiceBackend(d.Get("backend"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, backendsProp)) { - obj["backends"] = backendsProp - } - circuitBreakersProp, err := expandComputeRegionBackendServiceCircuitBreakers(d.Get("circuit_breakers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("circuit_breakers"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, circuitBreakersProp)) { - obj["circuitBreakers"] = circuitBreakersProp - } - consistentHashProp, err := expandComputeRegionBackendServiceConsistentHash(d.Get("consistent_hash"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consistent_hash"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, consistentHashProp)) { - obj["consistentHash"] = consistentHashProp - } - cdnPolicyProp, err := expandComputeRegionBackendServiceCdnPolicy(d.Get("cdn_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cdn_policy"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, cdnPolicyProp)) { - obj["cdnPolicy"] = cdnPolicyProp - } - connectionDrainingProp, err := expandComputeRegionBackendServiceConnectionDraining(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(connectionDrainingProp)) { - obj["connectionDraining"] = connectionDrainingProp - } - descriptionProp, err := expandComputeRegionBackendServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - failoverPolicyProp, err := expandComputeRegionBackendServiceFailoverPolicy(d.Get("failover_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("failover_policy"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, failoverPolicyProp)) { - obj["failoverPolicy"] = failoverPolicyProp - } - enableCDNProp, err := expandComputeRegionBackendServiceEnableCDN(d.Get("enable_cdn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_cdn"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, enableCDNProp)) { - obj["enableCDN"] = enableCDNProp - } - fingerprintProp, err := expandComputeRegionBackendServiceFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - healthChecksProp, err := expandComputeRegionBackendServiceHealthChecks(d.Get("health_checks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("health_checks"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, healthChecksProp)) { - obj["healthChecks"] = healthChecksProp - } - iapProp, err := expandComputeRegionBackendServiceIap(d.Get("iap"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("iap"); ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, iapProp) { - obj["iap"] = iapProp - } - loadBalancingSchemeProp, err := expandComputeRegionBackendServiceLoadBalancingScheme(d.Get("load_balancing_scheme"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("load_balancing_scheme"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, loadBalancingSchemeProp)) { - obj["loadBalancingScheme"] = loadBalancingSchemeProp - } - localityLbPolicyProp, err := expandComputeRegionBackendServiceLocalityLbPolicy(d.Get("locality_lb_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("locality_lb_policy"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, localityLbPolicyProp)) { - obj["localityLbPolicy"] = localityLbPolicyProp - } - nameProp, err := expandComputeRegionBackendServiceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - outlierDetectionProp, err := expandComputeRegionBackendServiceOutlierDetection(d.Get("outlier_detection"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("outlier_detection"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, outlierDetectionProp)) { - obj["outlierDetection"] = outlierDetectionProp - } - portNameProp, err := expandComputeRegionBackendServicePortName(d.Get("port_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port_name"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, portNameProp)) { - obj["portName"] = portNameProp - } - protocolProp, err := expandComputeRegionBackendServiceProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - sessionAffinityProp, err := expandComputeRegionBackendServiceSessionAffinity(d.Get("session_affinity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("session_affinity"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, sessionAffinityProp)) { - obj["sessionAffinity"] = sessionAffinityProp - } - timeoutSecProp, err := expandComputeRegionBackendServiceTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - logConfigProp, err := expandComputeRegionBackendServiceLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - networkProp, err := expandComputeRegionBackendServiceNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - regionProp, err := expandComputeRegionBackendServiceRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_backend_service_reflect.ValueOf(v)) && (ok || !resource_compute_region_backend_service_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - obj, err = resourceComputeRegionBackendServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") - if err != nil { - return err - } - - resource_compute_region_backend_service_log.Printf("[DEBUG] Updating RegionBackendService %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_backend_service_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error updating RegionBackendService %q: %s", d.Id(), err) - } else { - resource_compute_region_backend_service_log.Printf("[DEBUG] Finished updating RegionBackendService %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionBackendService", userAgent, - d.Timeout(resource_compute_region_backend_service_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRegionBackendServiceRead(d, meta) -} - -func resourceComputeRegionBackendServiceDelete(d *resource_compute_region_backend_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_backend_service_fmt.Errorf("Error fetching project for RegionBackendService: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/backendServices/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_backend_service_log.Printf("[DEBUG] Deleting RegionBackendService %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_backend_service_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionBackendService") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionBackendService", userAgent, - d.Timeout(resource_compute_region_backend_service_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_backend_service_log.Printf("[DEBUG] Finished deleting RegionBackendService %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionBackendServiceImport(d *resource_compute_region_backend_service_schema.ResourceData, meta interface{}) ([]*resource_compute_region_backend_service_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/backendServices/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/backendServices/{{name}}") - if err != nil { - return nil, resource_compute_region_backend_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_backend_service_schema.ResourceData{d}, nil -} - -func flattenComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceBackend(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_region_backend_service_schema.NewSet(resourceGoogleComputeBackendServiceBackendHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "balancing_mode": flattenComputeRegionBackendServiceBackendBalancingMode(original["balancingMode"], d, config), - "capacity_scaler": flattenComputeRegionBackendServiceBackendCapacityScaler(original["capacityScaler"], d, config), - "description": flattenComputeRegionBackendServiceBackendDescription(original["description"], d, config), - "failover": flattenComputeRegionBackendServiceBackendFailover(original["failover"], d, config), - "group": flattenComputeRegionBackendServiceBackendGroup(original["group"], d, config), - "max_connections": flattenComputeRegionBackendServiceBackendMaxConnections(original["maxConnections"], d, config), - "max_connections_per_instance": flattenComputeRegionBackendServiceBackendMaxConnectionsPerInstance(original["maxConnectionsPerInstance"], d, config), - "max_connections_per_endpoint": flattenComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(original["maxConnectionsPerEndpoint"], d, config), - "max_rate": flattenComputeRegionBackendServiceBackendMaxRate(original["maxRate"], d, config), - "max_rate_per_instance": flattenComputeRegionBackendServiceBackendMaxRatePerInstance(original["maxRatePerInstance"], d, config), - "max_rate_per_endpoint": flattenComputeRegionBackendServiceBackendMaxRatePerEndpoint(original["maxRatePerEndpoint"], d, config), - "max_utilization": flattenComputeRegionBackendServiceBackendMaxUtilization(original["maxUtilization"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionBackendServiceBackendBalancingMode(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceBackendCapacityScaler(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceBackendDescription(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceBackendFailover(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceBackendGroup(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionBackendServiceBackendMaxConnections(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceBackendMaxRate(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceBackendMaxRatePerInstance(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceBackendMaxRatePerEndpoint(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceBackendMaxUtilization(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceCircuitBreakers(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_requests_per_connection"] = - flattenComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(original["maxRequestsPerConnection"], d, config) - transformed["max_connections"] = - flattenComputeRegionBackendServiceCircuitBreakersMaxConnections(original["maxConnections"], d, config) - transformed["max_pending_requests"] = - flattenComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(original["maxPendingRequests"], d, config) - transformed["max_requests"] = - flattenComputeRegionBackendServiceCircuitBreakersMaxRequests(original["maxRequests"], d, config) - transformed["max_retries"] = - flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(original["maxRetries"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceConsistentHash(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_cookie"] = - flattenComputeRegionBackendServiceConsistentHashHttpCookie(original["httpCookie"], d, config) - transformed["http_header_name"] = - flattenComputeRegionBackendServiceConsistentHashHttpHeaderName(original["httpHeaderName"], d, config) - transformed["minimum_ring_size"] = - flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(original["minimumRingSize"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ttl"] = - flattenComputeRegionBackendServiceConsistentHashHttpCookieTtl(original["ttl"], d, config) - transformed["name"] = - flattenComputeRegionBackendServiceConsistentHashHttpCookieName(original["name"], d, config) - transformed["path"] = - flattenComputeRegionBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceConsistentHashHttpCookieName(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceConsistentHashHttpCookiePath(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceConsistentHashHttpHeaderName(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCdnPolicy(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cache_key_policy"] = - flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(original["cacheKeyPolicy"], d, config) - transformed["signed_url_cache_max_age_sec"] = - flattenComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(original["signedUrlCacheMaxAgeSec"], d, config) - transformed["default_ttl"] = - flattenComputeRegionBackendServiceCdnPolicyDefaultTtl(original["defaultTtl"], d, config) - transformed["max_ttl"] = - flattenComputeRegionBackendServiceCdnPolicyMaxTtl(original["maxTtl"], d, config) - transformed["client_ttl"] = - flattenComputeRegionBackendServiceCdnPolicyClientTtl(original["clientTtl"], d, config) - transformed["negative_caching"] = - flattenComputeRegionBackendServiceCdnPolicyNegativeCaching(original["negativeCaching"], d, config) - transformed["negative_caching_policy"] = - flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(original["negativeCachingPolicy"], d, config) - transformed["cache_mode"] = - flattenComputeRegionBackendServiceCdnPolicyCacheMode(original["cacheMode"], d, config) - transformed["serve_while_stale"] = - flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(original["serveWhileStale"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["include_host"] = - flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(original["includeHost"], d, config) - transformed["include_protocol"] = - flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(original["includeProtocol"], d, config) - transformed["include_query_string"] = - flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(original["includeQueryString"], d, config) - transformed["query_string_blacklist"] = - flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(original["queryStringBlacklist"], d, config) - transformed["query_string_whitelist"] = - flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["queryStringWhitelist"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_region_backend_service_schema.NewSet(resource_compute_region_backend_service_schema.HashString, v.([]interface{})) -} - -func flattenComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_region_backend_service_schema.NewSet(resource_compute_region_backend_service_schema.HashString, v.([]interface{})) -} - -func flattenComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyNegativeCaching(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "code": flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(original["code"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyCacheMode(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceConnectionDraining(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["connection_draining_timeout_sec"] = - flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(original["drainingTimeoutSec"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceCreationTimestamp(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceDescription(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceFailoverPolicy(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["disable_connection_drain_on_failover"] = - flattenComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(original["disableConnectionDrainOnFailover"], d, config) - transformed["drop_traffic_if_unhealthy"] = - flattenComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(original["dropTrafficIfUnhealthy"], d, config) - transformed["failover_ratio"] = - flattenComputeRegionBackendServiceFailoverPolicyFailoverRatio(original["failoverRatio"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceFailoverPolicyFailoverRatio(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceEnableCDN(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceFingerprint(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceHealthChecks(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeRegionBackendServiceIap(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["oauth2_client_id"] = - flattenComputeRegionBackendServiceIapOauth2ClientId(original["oauth2ClientId"], d, config) - transformed["oauth2_client_secret"] = - flattenComputeRegionBackendServiceIapOauth2ClientSecret(original["oauth2ClientSecret"], d, config) - transformed["oauth2_client_secret_sha256"] = - flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(original["oauth2ClientSecretSha256"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceIapOauth2ClientSecret(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return d.Get("iap.0.oauth2_client_secret") -} - -func flattenComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceLocalityLbPolicy(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceName(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceOutlierDetection(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["base_ejection_time"] = - flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(original["baseEjectionTime"], d, config) - transformed["consecutive_errors"] = - flattenComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(original["consecutiveErrors"], d, config) - transformed["consecutive_gateway_failure"] = - flattenComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(original["consecutiveGatewayFailure"], d, config) - transformed["enforcing_consecutive_errors"] = - flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(original["enforcingConsecutiveErrors"], d, config) - transformed["enforcing_consecutive_gateway_failure"] = - flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(original["enforcingConsecutiveGatewayFailure"], d, config) - transformed["enforcing_success_rate"] = - flattenComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(original["enforcingSuccessRate"], d, config) - transformed["interval"] = - flattenComputeRegionBackendServiceOutlierDetectionInterval(original["interval"], d, config) - transformed["max_ejection_percent"] = - flattenComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(original["maxEjectionPercent"], d, config) - transformed["success_rate_minimum_hosts"] = - flattenComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(original["successRateMinimumHosts"], d, config) - transformed["success_rate_request_volume"] = - flattenComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(original["successRateRequestVolume"], d, config) - transformed["success_rate_stdev_factor"] = - flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(original["successRateStdevFactor"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeRegionBackendServiceOutlierDetectionIntervalSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServicePortName(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceProtocol(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceSessionAffinity(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceTimeoutSec(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_backend_service_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionBackendServiceLogConfig(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable"] = - flattenComputeRegionBackendServiceLogConfigEnable(original["enable"], d, config) - transformed["sample_rate"] = - flattenComputeRegionBackendServiceLogConfigSampleRate(original["sampleRate"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionBackendServiceLogConfigEnable(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceLogConfigSampleRate(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionBackendServiceNetwork(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionBackendServiceRegion(v interface{}, d *resource_compute_region_backend_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionBackendServiceAffinityCookieTtlSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackend(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_region_backend_service_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBalancingMode, err := expandComputeRegionBackendServiceBackendBalancingMode(original["balancing_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedBalancingMode); val.IsValid() && !isEmptyValue(val) { - transformed["balancingMode"] = transformedBalancingMode - } - - transformedCapacityScaler, err := expandComputeRegionBackendServiceBackendCapacityScaler(original["capacity_scaler"], d, config) - if err != nil { - return nil, err - } else { - transformed["capacityScaler"] = transformedCapacityScaler - } - - transformedDescription, err := expandComputeRegionBackendServiceBackendDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedFailover, err := expandComputeRegionBackendServiceBackendFailover(original["failover"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedFailover); val.IsValid() && !isEmptyValue(val) { - transformed["failover"] = transformedFailover - } - - transformedGroup, err := expandComputeRegionBackendServiceBackendGroup(original["group"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedGroup); val.IsValid() && !isEmptyValue(val) { - transformed["group"] = transformedGroup - } - - transformedMaxConnections, err := expandComputeRegionBackendServiceBackendMaxConnections(original["max_connections"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnections"] = transformedMaxConnections - } - - transformedMaxConnectionsPerInstance, err := expandComputeRegionBackendServiceBackendMaxConnectionsPerInstance(original["max_connections_per_instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxConnectionsPerInstance); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnectionsPerInstance"] = transformedMaxConnectionsPerInstance - } - - transformedMaxConnectionsPerEndpoint, err := expandComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(original["max_connections_per_endpoint"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxConnectionsPerEndpoint); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnectionsPerEndpoint"] = transformedMaxConnectionsPerEndpoint - } - - transformedMaxRate, err := expandComputeRegionBackendServiceBackendMaxRate(original["max_rate"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxRate); val.IsValid() && !isEmptyValue(val) { - transformed["maxRate"] = transformedMaxRate - } - - transformedMaxRatePerInstance, err := expandComputeRegionBackendServiceBackendMaxRatePerInstance(original["max_rate_per_instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxRatePerInstance); val.IsValid() && !isEmptyValue(val) { - transformed["maxRatePerInstance"] = transformedMaxRatePerInstance - } - - transformedMaxRatePerEndpoint, err := expandComputeRegionBackendServiceBackendMaxRatePerEndpoint(original["max_rate_per_endpoint"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxRatePerEndpoint); val.IsValid() && !isEmptyValue(val) { - transformed["maxRatePerEndpoint"] = transformedMaxRatePerEndpoint - } - - transformedMaxUtilization, err := expandComputeRegionBackendServiceBackendMaxUtilization(original["max_utilization"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxUtilization); val.IsValid() && !isEmptyValue(val) { - transformed["maxUtilization"] = transformedMaxUtilization - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionBackendServiceBackendBalancingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendCapacityScaler(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendFailover(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendMaxConnectionsPerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendMaxConnectionsPerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendMaxRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendMaxRatePerInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendMaxRatePerEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceBackendMaxUtilization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCircuitBreakers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxRequestsPerConnection, err := expandComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(original["max_requests_per_connection"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxRequestsPerConnection); val.IsValid() && !isEmptyValue(val) { - transformed["maxRequestsPerConnection"] = transformedMaxRequestsPerConnection - } - - transformedMaxConnections, err := expandComputeRegionBackendServiceCircuitBreakersMaxConnections(original["max_connections"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxConnections); val.IsValid() && !isEmptyValue(val) { - transformed["maxConnections"] = transformedMaxConnections - } - - transformedMaxPendingRequests, err := expandComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(original["max_pending_requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxPendingRequests); val.IsValid() && !isEmptyValue(val) { - transformed["maxPendingRequests"] = transformedMaxPendingRequests - } - - transformedMaxRequests, err := expandComputeRegionBackendServiceCircuitBreakersMaxRequests(original["max_requests"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxRequests); val.IsValid() && !isEmptyValue(val) { - transformed["maxRequests"] = transformedMaxRequests - } - - transformedMaxRetries, err := expandComputeRegionBackendServiceCircuitBreakersMaxRetries(original["max_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxRetries); val.IsValid() && !isEmptyValue(val) { - transformed["maxRetries"] = transformedMaxRetries - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceCircuitBreakersMaxRequestsPerConnection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCircuitBreakersMaxConnections(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCircuitBreakersMaxPendingRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCircuitBreakersMaxRequests(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCircuitBreakersMaxRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceConsistentHash(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpCookie, err := expandComputeRegionBackendServiceConsistentHashHttpCookie(original["http_cookie"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedHttpCookie); val.IsValid() && !isEmptyValue(val) { - transformed["httpCookie"] = transformedHttpCookie - } - - transformedHttpHeaderName, err := expandComputeRegionBackendServiceConsistentHashHttpHeaderName(original["http_header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedHttpHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["httpHeaderName"] = transformedHttpHeaderName - } - - transformedMinimumRingSize, err := expandComputeRegionBackendServiceConsistentHashMinimumRingSize(original["minimum_ring_size"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMinimumRingSize); val.IsValid() && !isEmptyValue(val) { - transformed["minimumRingSize"] = transformedMinimumRingSize - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceConsistentHashHttpCookie(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTtl, err := expandComputeRegionBackendServiceConsistentHashHttpCookieTtl(original["ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedTtl); val.IsValid() && !isEmptyValue(val) { - transformed["ttl"] = transformedTtl - } - - transformedName, err := expandComputeRegionBackendServiceConsistentHashHttpCookieName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPath, err := expandComputeRegionBackendServiceConsistentHashHttpCookiePath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceConsistentHashHttpCookieTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceConsistentHashHttpCookieTtlSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceConsistentHashHttpCookieTtlNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceConsistentHashHttpCookieName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceConsistentHashHttpCookiePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceConsistentHashHttpHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceConsistentHashMinimumRingSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCacheKeyPolicy, err := expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy - } - - transformedSignedUrlCacheMaxAgeSec, err := expandComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(original["signed_url_cache_max_age_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSignedUrlCacheMaxAgeSec); val.IsValid() && !isEmptyValue(val) { - transformed["signedUrlCacheMaxAgeSec"] = transformedSignedUrlCacheMaxAgeSec - } - - transformedDefaultTtl, err := expandComputeRegionBackendServiceCdnPolicyDefaultTtl(original["default_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !isEmptyValue(val) { - transformed["defaultTtl"] = transformedDefaultTtl - } - - transformedMaxTtl, err := expandComputeRegionBackendServiceCdnPolicyMaxTtl(original["max_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { - transformed["maxTtl"] = transformedMaxTtl - } - - transformedClientTtl, err := expandComputeRegionBackendServiceCdnPolicyClientTtl(original["client_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedClientTtl); val.IsValid() && !isEmptyValue(val) { - transformed["clientTtl"] = transformedClientTtl - } - - transformedNegativeCaching, err := expandComputeRegionBackendServiceCdnPolicyNegativeCaching(original["negative_caching"], d, config) - if err != nil { - return nil, err - } else { - transformed["negativeCaching"] = transformedNegativeCaching - } - - transformedNegativeCachingPolicy, err := expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy - } - - transformedCacheMode, err := expandComputeRegionBackendServiceCdnPolicyCacheMode(original["cache_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { - transformed["cacheMode"] = transformedCacheMode - } - - transformedServeWhileStale, err := expandComputeRegionBackendServiceCdnPolicyServeWhileStale(original["serve_while_stale"], d, config) - if err != nil { - return nil, err - } else { - transformed["serveWhileStale"] = transformedServeWhileStale - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIncludeHost, err := expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(original["include_host"], d, config) - if err != nil { - return nil, err - } else { - transformed["includeHost"] = transformedIncludeHost - } - - transformedIncludeProtocol, err := expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(original["include_protocol"], d, config) - if err != nil { - return nil, err - } else { - transformed["includeProtocol"] = transformedIncludeProtocol - } - - transformedIncludeQueryString, err := expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(original["include_query_string"], d, config) - if err != nil { - return nil, err - } else { - transformed["includeQueryString"] = transformedIncludeQueryString - } - - transformedQueryStringBlacklist, err := expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(original["query_string_blacklist"], d, config) - if err != nil { - return nil, err - } else { - transformed["queryStringBlacklist"] = transformedQueryStringBlacklist - } - - transformedQueryStringWhitelist, err := expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(original["query_string_whitelist"], d, config) - if err != nil { - return nil, err - } else { - transformed["queryStringWhitelist"] = transformedQueryStringWhitelist - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyIncludeQueryString(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringBlacklist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_region_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyCacheKeyPolicyQueryStringWhitelist(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_region_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicySignedUrlCacheMaxAgeSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCode, err := expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(original["code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedCode); val.IsValid() && !isEmptyValue(val) { - transformed["code"] = transformedCode - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionBackendServiceCdnPolicyNegativeCachingPolicyCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceCdnPolicyServeWhileStale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceConnectionDraining(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedConnectionDrainingTimeoutSec, err := expandComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(d.Get("connection_draining_timeout_sec"), d, config) - if err != nil { - return nil, err - } else { - transformed["drainingTimeoutSec"] = transformedConnectionDrainingTimeoutSec - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceConnectionDrainingConnectionDrainingTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceFailoverPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisableConnectionDrainOnFailover, err := expandComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(original["disable_connection_drain_on_failover"], d, config) - if err != nil { - return nil, err - } else { - transformed["disableConnectionDrainOnFailover"] = transformedDisableConnectionDrainOnFailover - } - - transformedDropTrafficIfUnhealthy, err := expandComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(original["drop_traffic_if_unhealthy"], d, config) - if err != nil { - return nil, err - } else { - transformed["dropTrafficIfUnhealthy"] = transformedDropTrafficIfUnhealthy - } - - transformedFailoverRatio, err := expandComputeRegionBackendServiceFailoverPolicyFailoverRatio(original["failover_ratio"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedFailoverRatio); val.IsValid() && !isEmptyValue(val) { - transformed["failoverRatio"] = transformedFailoverRatio - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceFailoverPolicyDisableConnectionDrainOnFailover(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceFailoverPolicyDropTrafficIfUnhealthy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceFailoverPolicyFailoverRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceEnableCDN(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceHealthChecks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_region_backend_service_schema.Set).List() - return v, nil -} - -func expandComputeRegionBackendServiceIap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOauth2ClientId, err := expandComputeRegionBackendServiceIapOauth2ClientId(original["oauth2_client_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedOauth2ClientId); val.IsValid() && !isEmptyValue(val) { - transformed["oauth2ClientId"] = transformedOauth2ClientId - } - - transformedOauth2ClientSecret, err := expandComputeRegionBackendServiceIapOauth2ClientSecret(original["oauth2_client_secret"], d, config) - if err != nil { - return nil, err - } else { - transformed["oauth2ClientSecret"] = transformedOauth2ClientSecret - } - - transformedOauth2ClientSecretSha256, err := expandComputeRegionBackendServiceIapOauth2ClientSecretSha256(original["oauth2_client_secret_sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedOauth2ClientSecretSha256); val.IsValid() && !isEmptyValue(val) { - transformed["oauth2ClientSecretSha256"] = transformedOauth2ClientSecretSha256 - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceIapOauth2ClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceIapOauth2ClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceIapOauth2ClientSecretSha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceLoadBalancingScheme(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceLocalityLbPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBaseEjectionTime, err := expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(original["base_ejection_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedBaseEjectionTime); val.IsValid() && !isEmptyValue(val) { - transformed["baseEjectionTime"] = transformedBaseEjectionTime - } - - transformedConsecutiveErrors, err := expandComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(original["consecutive_errors"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { - transformed["consecutiveErrors"] = transformedConsecutiveErrors - } - - transformedConsecutiveGatewayFailure, err := expandComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(original["consecutive_gateway_failure"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { - transformed["consecutiveGatewayFailure"] = transformedConsecutiveGatewayFailure - } - - transformedEnforcingConsecutiveErrors, err := expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(original["enforcing_consecutive_errors"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedEnforcingConsecutiveErrors); val.IsValid() && !isEmptyValue(val) { - transformed["enforcingConsecutiveErrors"] = transformedEnforcingConsecutiveErrors - } - - transformedEnforcingConsecutiveGatewayFailure, err := expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(original["enforcing_consecutive_gateway_failure"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedEnforcingConsecutiveGatewayFailure); val.IsValid() && !isEmptyValue(val) { - transformed["enforcingConsecutiveGatewayFailure"] = transformedEnforcingConsecutiveGatewayFailure - } - - transformedEnforcingSuccessRate, err := expandComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(original["enforcing_success_rate"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedEnforcingSuccessRate); val.IsValid() && !isEmptyValue(val) { - transformed["enforcingSuccessRate"] = transformedEnforcingSuccessRate - } - - transformedInterval, err := expandComputeRegionBackendServiceOutlierDetectionInterval(original["interval"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedInterval); val.IsValid() && !isEmptyValue(val) { - transformed["interval"] = transformedInterval - } - - transformedMaxEjectionPercent, err := expandComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(original["max_ejection_percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedMaxEjectionPercent); val.IsValid() && !isEmptyValue(val) { - transformed["maxEjectionPercent"] = transformedMaxEjectionPercent - } - - transformedSuccessRateMinimumHosts, err := expandComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(original["success_rate_minimum_hosts"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSuccessRateMinimumHosts); val.IsValid() && !isEmptyValue(val) { - transformed["successRateMinimumHosts"] = transformedSuccessRateMinimumHosts - } - - transformedSuccessRateRequestVolume, err := expandComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(original["success_rate_request_volume"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSuccessRateRequestVolume); val.IsValid() && !isEmptyValue(val) { - transformed["successRateRequestVolume"] = transformedSuccessRateRequestVolume - } - - transformedSuccessRateStdevFactor, err := expandComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(original["success_rate_stdev_factor"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSuccessRateStdevFactor); val.IsValid() && !isEmptyValue(val) { - transformed["successRateStdevFactor"] = transformedSuccessRateStdevFactor - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionBaseEjectionTimeNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveErrors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionEnforcingConsecutiveGatewayFailure(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionEnforcingSuccessRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeRegionBackendServiceOutlierDetectionIntervalSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeRegionBackendServiceOutlierDetectionIntervalNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionIntervalSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionIntervalNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionMaxEjectionPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionSuccessRateMinimumHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionSuccessRateRequestVolume(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceOutlierDetectionSuccessRateStdevFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServicePortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceSessionAffinity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnable, err := expandComputeRegionBackendServiceLogConfigEnable(original["enable"], d, config) - if err != nil { - return nil, err - } else { - transformed["enable"] = transformedEnable - } - - transformedSampleRate, err := expandComputeRegionBackendServiceLogConfigSampleRate(original["sample_rate"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_backend_service_reflect.ValueOf(transformedSampleRate); val.IsValid() && !isEmptyValue(val) { - transformed["sampleRate"] = transformedSampleRate - } - - return transformed, nil -} - -func expandComputeRegionBackendServiceLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceLogConfigSampleRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionBackendServiceNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_backend_service_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionBackendServiceRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_backend_service_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRegionBackendServiceEncoder(d *resource_compute_region_backend_service_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - iapVal := obj["iap"] - if iapVal == nil { - data := map[string]interface{}{} - data["enabled"] = false - data["oauth2ClientId"] = "" - data["oauth2ClientSecret"] = "" - obj["iap"] = data - } else { - iap := iapVal.(map[string]interface{}) - iap["enabled"] = true - obj["iap"] = iap - } - - if d.Get("load_balancing_scheme").(string) == "INTERNAL_MANAGED" { - return obj, nil - } - - backendServiceOnlyManagedApiFieldNames := []string{ - "capacityScaler", - "maxConnections", - "maxConnectionsPerInstance", - "maxConnectionsPerEndpoint", - "maxRate", - "maxRatePerInstance", - "maxRatePerEndpoint", - "maxUtilization", - } - - var backends []interface{} - if lsV := obj["backends"]; lsV != nil { - backends = lsV.([]interface{}) - } - for idx, v := range backends { - if v == nil { - continue - } - backend := v.(map[string]interface{}) - - for _, k := range backendServiceOnlyManagedApiFieldNames { - resource_compute_region_backend_service_log.Printf("[DEBUG] Removing field %q for request for non-managed backend service %s", k, d.Get("name")) - delete(backend, k) - } - backends[idx] = backend - } - - obj["backends"] = backends - return obj, nil -} - -func resourceComputeRegionBackendServiceDecoder(d *resource_compute_region_backend_service_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - v, ok := res["iap"] - if !ok || v == nil { - delete(res, "iap") - return res, nil - } - m := v.(map[string]interface{}) - if ok && m["enabled"] == false { - delete(res, "iap") - } - - if v, ok := res["localityLbPolicy"]; ok { - lbPolicy := v.(string) - if lbPolicy != "MAGLEV" && lbPolicy != "RING_HASH" { - delete(res, "consistentHash") - } - } - - return res, nil -} - -func resourceComputeRegionDisk() *resource_compute_region_disk_schema.Resource { - return &resource_compute_region_disk_schema.Resource{ - Create: resourceComputeRegionDiskCreate, - Read: resourceComputeRegionDiskRead, - Update: resourceComputeRegionDiskUpdate, - Delete: resourceComputeRegionDiskDelete, - - Importer: &resource_compute_region_disk_schema.ResourceImporter{ - State: resourceComputeRegionDiskImport, - }, - - Timeouts: &resource_compute_region_disk_schema.ResourceTimeout{ - Create: resource_compute_region_disk_schema.DefaultTimeout(5 * resource_compute_region_disk_time.Minute), - Update: resource_compute_region_disk_schema.DefaultTimeout(4 * resource_compute_region_disk_time.Minute), - Delete: resource_compute_region_disk_schema.DefaultTimeout(4 * resource_compute_region_disk_time.Minute), - }, - - CustomizeDiff: resource_compute_region_disk_customdiff.All( - resource_compute_region_disk_customdiff.ForceNewIfChange("size", isDiskShrinkage)), - - Schema: map[string]*resource_compute_region_disk_schema.Schema{ - "name": { - Type: resource_compute_region_disk_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "replica_zones": { - Type: resource_compute_region_disk_schema.TypeList, - Required: true, - ForceNew: true, - Description: `URLs of the zones where the disk should be replicated to.`, - MinItems: 2, - MaxItems: 2, - Elem: &resource_compute_region_disk_schema.Schema{ - Type: resource_compute_region_disk_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "description": { - Type: resource_compute_region_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "disk_encryption_key": { - Type: resource_compute_region_disk_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encrypts the disk using a customer-supplied encryption key. - -After you encrypt a disk with a customer-supplied key, you must -provide the same key if you use the disk later (e.g. to create a disk -snapshot or an image, or to attach the disk to a virtual machine). - -Customer-supplied encryption keys do not protect access to metadata of -the disk. - -If you do not provide an encryption key when creating the disk, then -the disk will be encrypted using an automatically generated key and -you do not need to provide a key to use the disk later.`, - MaxItems: 1, - Elem: &resource_compute_region_disk_schema.Resource{ - Schema: map[string]*resource_compute_region_disk_schema.Schema{ - "raw_key": { - Type: resource_compute_region_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - "sha256": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "labels": { - Type: resource_compute_region_disk_schema.TypeMap, - Optional: true, - Description: `Labels to apply to this disk. A list of key->value pairs.`, - Elem: &resource_compute_region_disk_schema.Schema{Type: resource_compute_region_disk_schema.TypeString}, - }, - "physical_block_size_bytes": { - Type: resource_compute_region_disk_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Physical block size of the persistent disk, in bytes. If not present -in a request, a default value is used. Currently supported sizes -are 4096 and 16384, other sizes may be added in the future. -If an unsupported value is requested, the error message will list -the supported values for the caller's project.`, - }, - "region": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the region where the disk resides.`, - }, - "size": { - Type: resource_compute_region_disk_schema.TypeInt, - Computed: true, - Optional: true, - Description: `Size of the persistent disk, specified in GB. You can specify this -field when creating a persistent disk using the sourceImage or -sourceSnapshot parameter, or specify it alone to create an empty -persistent disk. - -If you specify this field along with sourceImage or sourceSnapshot, -the value of sizeGb must not be less than the size of the sourceImage -or the size of the snapshot.`, - }, - "snapshot": { - Type: resource_compute_region_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The source snapshot used to create this disk. You can provide this as -a partial or full URL to the resource. For example, the following are -valid values: - -* 'https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot' -* 'projects/project/global/snapshots/snapshot' -* 'global/snapshots/snapshot' -* 'snapshot'`, - }, - "source_snapshot_encryption_key": { - Type: resource_compute_region_disk_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source snapshot. Required -if the source snapshot is protected by a customer-supplied encryption -key.`, - MaxItems: 1, - Elem: &resource_compute_region_disk_schema.Resource{ - Schema: map[string]*resource_compute_region_disk_schema.Schema{ - "raw_key": { - Type: resource_compute_region_disk_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - }, - "sha256": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "type": { - Type: resource_compute_region_disk_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the disk type resource describing which disk type to use to -create the disk. Provide this when creating the disk.`, - Default: "pd-standard", - }, - "creation_timestamp": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "label_fingerprint": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "last_attach_timestamp": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Description: `Last attach timestamp in RFC3339 text format.`, - }, - "last_detach_timestamp": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Description: `Last detach timestamp in RFC3339 text format.`, - }, - "source_snapshot_id": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - Description: `The unique ID of the snapshot used to create this disk. This value -identifies the exact snapshot that was used to create this persistent -disk. For example, if you created the persistent disk from a snapshot -that was later deleted and recreated under the same name, the source -snapshot ID would identify the exact version of the snapshot that was -used.`, - }, - "users": { - Type: resource_compute_region_disk_schema.TypeList, - Computed: true, - Description: `Links to the users of the disk (attached instances) in form: -project/zones/zone/instances/instance`, - Elem: &resource_compute_region_disk_schema.Schema{ - Type: resource_compute_region_disk_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "project": { - Type: resource_compute_region_disk_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_disk_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionDiskCreate(d *resource_compute_region_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelFingerprintProp, err := expandComputeRegionDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(labelFingerprintProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - descriptionProp, err := expandComputeRegionDiskDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(labelsProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - nameProp, err := expandComputeRegionDiskName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(sizeGbProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - physicalBlockSizeBytesProp, err := expandComputeRegionDiskPhysicalBlockSizeBytes(d.Get("physical_block_size_bytes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("physical_block_size_bytes"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(physicalBlockSizeBytesProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, physicalBlockSizeBytesProp)) { - obj["physicalBlockSizeBytes"] = physicalBlockSizeBytesProp - } - replicaZonesProp, err := expandComputeRegionDiskReplicaZones(d.Get("replica_zones"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("replica_zones"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(replicaZonesProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, replicaZonesProp)) { - obj["replicaZones"] = replicaZonesProp - } - typeProp, err := expandComputeRegionDiskType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(typeProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - regionProp, err := expandComputeRegionDiskRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - diskEncryptionKeyProp, err := expandComputeRegionDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption_key"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, diskEncryptionKeyProp)) { - obj["diskEncryptionKey"] = diskEncryptionKeyProp - } - sourceSnapshotProp, err := expandComputeRegionDiskSnapshot(d.Get("snapshot"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("snapshot"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(sourceSnapshotProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, sourceSnapshotProp)) { - obj["sourceSnapshot"] = sourceSnapshotProp - } - sourceSnapshotEncryptionKeyProp, err := expandComputeRegionDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_snapshot_encryption_key"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(sourceSnapshotEncryptionKeyProp)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, sourceSnapshotEncryptionKeyProp)) { - obj["sourceSnapshotEncryptionKey"] = sourceSnapshotEncryptionKeyProp - } - - obj, err = resourceComputeRegionDiskEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks") - if err != nil { - return err - } - - resource_compute_region_disk_log.Printf("[DEBUG] Creating new RegionDisk: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_disk_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error creating RegionDisk: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionDisk", userAgent, - d.Timeout(resource_compute_region_disk_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_disk_fmt.Errorf("Error waiting to create RegionDisk: %s", err) - } - - resource_compute_region_disk_log.Printf("[DEBUG] Finished creating RegionDisk %q: %#v", d.Id(), res) - - return resourceComputeRegionDiskRead(d, meta) -} - -func resourceComputeRegionDiskRead(d *resource_compute_region_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_disk_fmt.Sprintf("ComputeRegionDisk %q", d.Id())) - } - - res, err = resourceComputeRegionDiskDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_region_disk_log.Printf("[DEBUG] Removing ComputeRegionDisk because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - - if err := d.Set("label_fingerprint", flattenComputeRegionDiskLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeRegionDiskCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("description", flattenComputeRegionDiskDescription(res["description"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("last_attach_timestamp", flattenComputeRegionDiskLastAttachTimestamp(res["lastAttachTimestamp"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("last_detach_timestamp", flattenComputeRegionDiskLastDetachTimestamp(res["lastDetachTimestamp"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("labels", flattenComputeRegionDiskLabels(res["labels"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("name", flattenComputeRegionDiskName(res["name"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("size", flattenComputeRegionDiskSize(res["sizeGb"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("users", flattenComputeRegionDiskUsers(res["users"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("physical_block_size_bytes", flattenComputeRegionDiskPhysicalBlockSizeBytes(res["physicalBlockSizeBytes"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("replica_zones", flattenComputeRegionDiskReplicaZones(res["replicaZones"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("type", flattenComputeRegionDiskType(res["type"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("region", flattenComputeRegionDiskRegion(res["region"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("disk_encryption_key", flattenComputeRegionDiskDiskEncryptionKey(res["diskEncryptionKey"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("snapshot", flattenComputeRegionDiskSnapshot(res["sourceSnapshot"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("source_snapshot_encryption_key", flattenComputeRegionDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("source_snapshot_id", flattenComputeRegionDiskSourceSnapshotId(res["sourceSnapshotId"], d, config)); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_disk_fmt.Errorf("Error reading RegionDisk: %s", err) - } - - return nil -} - -func resourceComputeRegionDiskUpdate(d *resource_compute_region_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("label_fingerprint") || d.HasChange("labels") { - obj := make(map[string]interface{}) - - labelFingerprintProp, err := expandComputeRegionDiskLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(v)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(v)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}/setLabels") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_disk_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err) - } else { - resource_compute_region_disk_log.Printf("[DEBUG] Finished updating RegionDisk %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionDisk", userAgent, - d.Timeout(resource_compute_region_disk_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("size") { - obj := make(map[string]interface{}) - - sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("size"); !isEmptyValue(resource_compute_region_disk_reflect.ValueOf(v)) && (ok || !resource_compute_region_disk_reflect.DeepEqual(v, sizeGbProp)) { - obj["sizeGb"] = sizeGbProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}/resize") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_disk_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err) - } else { - resource_compute_region_disk_log.Printf("[DEBUG] Finished updating RegionDisk %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionDisk", userAgent, - d.Timeout(resource_compute_region_disk_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeRegionDiskRead(d, meta) -} - -func resourceComputeRegionDiskDelete(d *resource_compute_region_disk_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error fetching project for RegionDisk: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - readRes, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_disk_fmt.Sprintf("ComputeDisk %q", d.Id())) - } - - if v, ok := readRes["users"].([]interface{}); ok { - type detachArgs struct{ project, zone, instance, deviceName string } - var detachCalls []detachArgs - - for _, instance := range convertStringArr(v) { - self := d.Get("self_link").(string) - instanceProject, instanceZone, instanceName, err := GetLocationalResourcePropertiesFromSelfLinkString(instance) - if err != nil { - return err - } - - i, err := config.NewComputeClient(userAgent).Instances.Get(instanceProject, instanceZone, instanceName).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_region_disk_googleapi.Error); ok && gerr.Code == 404 { - resource_compute_region_disk_log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance) - continue - } - return resource_compute_region_disk_fmt.Errorf("Error retrieving instance %s: %s", instance, err.Error()) - } - for _, disk := range i.Disks { - if compareSelfLinkOrResourceName("", disk.Source, self, nil) { - detachCalls = append(detachCalls, detachArgs{ - project: instanceProject, - zone: GetResourceNameFromSelfLink(i.Zone), - instance: i.Name, - deviceName: disk.DeviceName, - }) - } - } - } - - for _, call := range detachCalls { - op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() - if err != nil { - return resource_compute_region_disk_fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, - call.zone, call.instance, err.Error()) - } - err = computeOperationWaitTime(config, op, call.project, - resource_compute_region_disk_fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(resource_compute_region_disk_schema.TimeoutDelete)) - if err != nil { - if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { - resource_compute_region_disk_log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) - continue - } - return err - } - } - } - resource_compute_region_disk_log.Printf("[DEBUG] Deleting RegionDisk %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_disk_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionDisk") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionDisk", userAgent, - d.Timeout(resource_compute_region_disk_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_disk_log.Printf("[DEBUG] Finished deleting RegionDisk %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionDiskImport(d *resource_compute_region_disk_schema.ResourceData, meta interface{}) ([]*resource_compute_region_disk_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") - if err != nil { - return nil, resource_compute_region_disk_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_disk_schema.ResourceData{d}, nil -} - -func flattenComputeRegionDiskLabelFingerprint(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskCreationTimestamp(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskDescription(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskLastAttachTimestamp(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskLastDetachTimestamp(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskLabels(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskName(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSize(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_disk_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionDiskUsers(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeRegionDiskPhysicalBlockSizeBytes(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_disk_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionDiskReplicaZones(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeRegionDiskType(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeRegionDiskRegion(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeRegionDiskDiskEncryptionKey(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeRegionDiskDiskEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeRegionDiskDiskEncryptionKeySha256(original["sha256"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskDiskEncryptionKeySha256(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSnapshot(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionDiskSourceSnapshotId(v interface{}, d *resource_compute_region_disk_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeRegionDiskLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeRegionDiskName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskPhysicalBlockSizeBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskReplicaZones(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_region_disk_fmt.Errorf("Invalid value for replica_zones: nil") - } - f, err := parseGlobalFieldValue("zones", raw.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_disk_fmt.Errorf("Invalid value for replica_zones: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeRegionDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("diskTypes", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_disk_fmt.Errorf("Invalid value for type: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionDiskRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_disk_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionDiskDiskEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeRegionDiskDiskEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_disk_reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeRegionDiskDiskEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_disk_reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - return transformed, nil -} - -func expandComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskDiskEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskSnapshot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_disk_fmt.Errorf("Invalid value for snapshot: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_disk_reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_disk_reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - return transformed, nil -} - -func expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeRegionDiskEncoder(d *resource_compute_region_disk_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - if v, ok := d.GetOk("type"); ok { - resource_compute_region_disk_log.Printf("[DEBUG] Loading disk type: %s", v.(string)) - diskType, err := readRegionDiskType(config, d, v.(string)) - if err != nil { - return nil, resource_compute_region_disk_fmt.Errorf( - "Error loading disk type '%s': %s", - v.(string), err) - } - - obj["type"] = diskType.RelativeLink() - } - - if v, ok := d.GetOk("image"); ok { - resource_compute_region_disk_log.Printf("[DEBUG] Resolving image name: %s", v.(string)) - imageUrl, err := resolveImage(config, project, v.(string), userAgent) - if err != nil { - return nil, resource_compute_region_disk_fmt.Errorf( - "Error resolving image name '%s': %s", - v.(string), err) - } - - obj["sourceImage"] = imageUrl - resource_compute_region_disk_log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) - } - - return obj, nil -} - -func resourceComputeRegionDiskDecoder(d *resource_compute_region_disk_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["diskEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_region_disk_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["diskEncryptionKey"] = transformed - } - - if v, ok := res["sourceImageEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_region_disk_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceImageEncryptionKey"] = transformed - } - - if v, ok := res["sourceSnapshotEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_region_disk_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceSnapshotEncryptionKey"] = transformed - } - - return res, nil -} - -func resourceComputeRegionDiskResourcePolicyAttachment() *resource_compute_region_disk_resource_policy_attachment_schema.Resource { - return &resource_compute_region_disk_resource_policy_attachment_schema.Resource{ - Create: resourceComputeRegionDiskResourcePolicyAttachmentCreate, - Read: resourceComputeRegionDiskResourcePolicyAttachmentRead, - Delete: resourceComputeRegionDiskResourcePolicyAttachmentDelete, - - Importer: &resource_compute_region_disk_resource_policy_attachment_schema.ResourceImporter{ - State: resourceComputeRegionDiskResourcePolicyAttachmentImport, - }, - - Timeouts: &resource_compute_region_disk_resource_policy_attachment_schema.ResourceTimeout{ - Create: resource_compute_region_disk_resource_policy_attachment_schema.DefaultTimeout(4 * resource_compute_region_disk_resource_policy_attachment_time.Minute), - Delete: resource_compute_region_disk_resource_policy_attachment_schema.DefaultTimeout(4 * resource_compute_region_disk_resource_policy_attachment_time.Minute), - }, - - Schema: map[string]*resource_compute_region_disk_resource_policy_attachment_schema.Schema{ - "disk": { - Type: resource_compute_region_disk_resource_policy_attachment_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the regional disk in which the resource policies are attached to.`, - }, - "name": { - Type: resource_compute_region_disk_resource_policy_attachment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource policy to be attached to the disk for scheduling snapshot -creation. Do not specify the self link.`, - }, - "region": { - Type: resource_compute_region_disk_resource_policy_attachment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the region where the disk resides.`, - }, - "project": { - Type: resource_compute_region_disk_resource_policy_attachment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionDiskResourcePolicyAttachmentCreate(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRegionDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_disk_resource_policy_attachment_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_disk_resource_policy_attachment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceComputeRegionDiskResourcePolicyAttachmentEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}/addResourcePolicies") - if err != nil { - return err - } - - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Creating new RegionDiskResourcePolicyAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_disk_resource_policy_attachment_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error creating RegionDiskResourcePolicyAttachment: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{disk}}/{{name}}") - if err != nil { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionDiskResourcePolicyAttachment", userAgent, - d.Timeout(resource_compute_region_disk_resource_policy_attachment_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error waiting to create RegionDiskResourcePolicyAttachment: %s", err) - } - - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Finished creating RegionDiskResourcePolicyAttachment %q: %#v", d.Id(), res) - - return resourceComputeRegionDiskResourcePolicyAttachmentRead(d, meta) -} - -func resourceComputeRegionDiskResourcePolicyAttachmentRead(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_disk_resource_policy_attachment_fmt.Sprintf("ComputeRegionDiskResourcePolicyAttachment %q", d.Id())) - } - - res, err = flattenNestedComputeRegionDiskResourcePolicyAttachment(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Removing ComputeRegionDiskResourcePolicyAttachment because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Removing ComputeRegionDiskResourcePolicyAttachment because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error reading RegionDiskResourcePolicyAttachment: %s", err) - } - - if err := d.Set("name", flattenNestedComputeRegionDiskResourcePolicyAttachmentName(res["name"], d, config)); err != nil { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error reading RegionDiskResourcePolicyAttachment: %s", err) - } - - return nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentDelete(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error fetching project for RegionDiskResourcePolicyAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/disks/{{disk}}/removeResourcePolicies") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = make(map[string]interface{}) - - region, err := getRegion(d, config) - if err != nil { - return err - } - if region == "" { - return resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("region must be non-empty - set in resource or at provider-level") - } - - name, err := expandNestedComputeDiskResourcePolicyAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_disk_resource_policy_attachment_reflect.ValueOf(name)) && (ok || !resource_compute_region_disk_resource_policy_attachment_reflect.DeepEqual(v, name)) { - obj["resourcePolicies"] = []interface{}{resource_compute_region_disk_resource_policy_attachment_fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name)} - } - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Deleting RegionDiskResourcePolicyAttachment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_disk_resource_policy_attachment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionDiskResourcePolicyAttachment") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionDiskResourcePolicyAttachment", userAgent, - d.Timeout(resource_compute_region_disk_resource_policy_attachment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Finished deleting RegionDiskResourcePolicyAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentImport(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}) ([]*resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/disks/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{disk}}/{{name}}") - if err != nil { - return nil, resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_disk_resource_policy_attachment_schema.ResourceData{d}, nil -} - -func flattenNestedComputeRegionDiskResourcePolicyAttachmentName(v interface{}, d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeRegionDiskResourcePolicyAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentEncoder(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - if region == "" { - return nil, resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("region must be non-empty - set in resource or at provider-level") - } - - obj["resourcePolicies"] = []interface{}{resource_compute_region_disk_resource_policy_attachment_fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, obj["name"])} - delete(obj, "name") - return obj, nil -} - -func flattenNestedComputeRegionDiskResourcePolicyAttachment(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["resourcePolicies"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_region_disk_resource_policy_attachment_fmt.Errorf("expected list or map for value resourcePolicies. Actual value: %v", v) - } - - _, item, err := resourceComputeRegionDiskResourcePolicyAttachmentFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentFindNestedObjectInList(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRegionDiskResourcePolicyAttachmentName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeRegionDiskResourcePolicyAttachmentName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - - item := map[string]interface{}{ - "name": itemRaw, - } - - item, err := resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemName := flattenNestedComputeRegionDiskResourcePolicyAttachmentName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_region_disk_resource_policy_attachment_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_region_disk_resource_policy_attachment_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_region_disk_resource_policy_attachment_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_region_disk_resource_policy_attachment_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeRegionDiskResourcePolicyAttachmentDecoder(d *resource_compute_region_disk_resource_policy_attachment_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - res["name"] = GetResourceNameFromSelfLink(res["name"].(string)) - return res, nil -} - -func resourceComputeRegionHealthCheck() *resource_compute_region_health_check_schema.Resource { - return &resource_compute_region_health_check_schema.Resource{ - Create: resourceComputeRegionHealthCheckCreate, - Read: resourceComputeRegionHealthCheckRead, - Update: resourceComputeRegionHealthCheckUpdate, - Delete: resourceComputeRegionHealthCheckDelete, - - Importer: &resource_compute_region_health_check_schema.ResourceImporter{ - State: resourceComputeRegionHealthCheckImport, - }, - - Timeouts: &resource_compute_region_health_check_schema.ResourceTimeout{ - Create: resource_compute_region_health_check_schema.DefaultTimeout(4 * resource_compute_region_health_check_time.Minute), - Update: resource_compute_region_health_check_schema.DefaultTimeout(4 * resource_compute_region_health_check_time.Minute), - Delete: resource_compute_region_health_check_schema.DefaultTimeout(4 * resource_compute_region_health_check_time.Minute), - }, - - CustomizeDiff: healthCheckCustomizeDiff, - - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "name": { - Type: resource_compute_region_health_check_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "check_interval_sec": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `How often (in seconds) to send a health check. The default value is 5 -seconds.`, - Default: 5, - }, - "description": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "grpc_health_check": { - Type: resource_compute_region_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_region_health_check_schema.Resource{ - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "grpc_service_name": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The gRPC service name for the health check. -The value of grpcServiceName has the following meanings by convention: - -* Empty serviceName means the overall status of all services at the backend. -* Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. - -The grpcServiceName can only be ASCII.`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - "port": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `The port number for the health check request. -Must be specified if portName and portSpecification are not set -or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535.`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - "port_name": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - "port_specification": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, gRPC health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"grpc_health_check.0.port", "grpc_health_check.0.port_name", "grpc_health_check.0.port_specification", "grpc_health_check.0.grpc_service_name"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "healthy_threshold": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far unhealthy instance will be marked healthy after this many -consecutive successes. The default value is 2.`, - Default: 2, - }, - "http2_health_check": { - Type: resource_compute_region_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_region_health_check_schema.Resource{ - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "host": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTP2 health check request. -If left empty (default value), the public IP on behalf of which this health -check is performed will be used.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "port": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTP2 health check request. -The default value is 443.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, HTTP2 health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "request_path": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTP2 health check request. -The default value is /.`, - Default: "/", - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"http2_health_check.0.host", "http2_health_check.0.request_path", "http2_health_check.0.response", "http2_health_check.0.port", "http2_health_check.0.port_name", "http2_health_check.0.proxy_header", "http2_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "http_health_check": { - Type: resource_compute_region_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_region_health_check_schema.Resource{ - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "host": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTP health check request. -If left empty (default value), the public IP on behalf of which this health -check is performed will be used.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "port": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTP health check request. -The default value is 80.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, HTTP health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "request_path": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTP health check request. -The default value is /.`, - Default: "/", - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"http_health_check.0.host", "http_health_check.0.request_path", "http_health_check.0.response", "http_health_check.0.port", "http_health_check.0.port_name", "http_health_check.0.proxy_header", "http_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "https_health_check": { - Type: resource_compute_region_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_region_health_check_schema.Resource{ - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "host": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The value of the host header in the HTTPS health check request. -If left empty (default value), the public IP on behalf of which this health -check is performed will be used.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "port": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the HTTPS health check request. -The default value is 443.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, HTTPS health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "request_path": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The request path of the HTTPS health check request. -The default value is /.`, - Default: "/", - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"https_health_check.0.host", "https_health_check.0.request_path", "https_health_check.0.response", "https_health_check.0.port", "https_health_check.0.port_name", "https_health_check.0.proxy_header", "https_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "log_config": { - Type: resource_compute_region_health_check_schema.TypeList, - Computed: true, - Optional: true, - Description: `Configure logging on this health check.`, - MaxItems: 1, - Elem: &resource_compute_region_health_check_schema.Resource{ - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "enable": { - Type: resource_compute_region_health_check_schema.TypeBool, - Optional: true, - Description: `Indicates whether or not to export logs. This is false by default, -which means no health check logging will be done.`, - Default: false, - }, - }, - }, - }, - "region": { - Type: resource_compute_region_health_check_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created health check should reside. -If it is not provided, the provider region is used.`, - }, - "ssl_health_check": { - Type: resource_compute_region_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_region_health_check_schema.Resource{ - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "port": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the SSL health check request. -The default value is 443.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, SSL health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "request": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The application data to send once the SSL connection has been -established (default value is empty). If both request and response are -empty, the connection establishment alone will indicate health. The request -data can only be ASCII.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"ssl_health_check.0.request", "ssl_health_check.0.response", "ssl_health_check.0.port", "ssl_health_check.0.port_name", "ssl_health_check.0.proxy_header", "ssl_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "tcp_health_check": { - Type: resource_compute_region_health_check_schema.TypeList, - Optional: true, - DiffSuppressFunc: portDiffSuppress, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_compute_region_health_check_schema.Resource{ - Schema: map[string]*resource_compute_region_health_check_schema.Schema{ - "port": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `The TCP port number for the TCP health check request. -The default value is 80.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "port_name": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `Port name as defined in InstanceGroup#NamedPort#name. If both port and -port_name are defined, port takes precedence.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "port_specification": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT", ""}, false), - Description: `Specifies how port is selected for health checking, can be one of the -following values: - - * 'USE_FIXED_PORT': The port number in 'port' is used for health checking. - - * 'USE_NAMED_PORT': The 'portName' is used for health checking. - - * 'USE_SERVING_PORT': For NetworkEndpointGroup, the port specified for each - network endpoint is used for health checking. For other backends, the - port or named port specified in the Backend Service is used for health - checking. - -If not specified, TCP health check follows behavior specified in 'port' and -'portName' fields. Possible values: ["USE_FIXED_PORT", "USE_NAMED_PORT", "USE_SERVING_PORT"]`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "proxy_header": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_health_check_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to the -backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "request": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The application data to send once the TCP connection has been -established (default value is empty). If both request and response are -empty, the connection establishment alone will indicate health. The request -data can only be ASCII.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - "response": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Description: `The bytes to match against the beginning of the response data. If left empty -(the default value), any response will indicate health. The response data -can only be ASCII.`, - AtLeastOneOf: []string{"tcp_health_check.0.request", "tcp_health_check.0.response", "tcp_health_check.0.port", "tcp_health_check.0.port_name", "tcp_health_check.0.proxy_header", "tcp_health_check.0.port_specification"}, - }, - }, - }, - ExactlyOneOf: []string{"http_health_check", "https_health_check", "http2_health_check", "tcp_health_check", "ssl_health_check", "grpc_health_check"}, - }, - "timeout_sec": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `How long (in seconds) to wait before claiming failure. -The default value is 5 seconds. It is invalid for timeoutSec to have -greater value than checkIntervalSec.`, - Default: 5, - }, - "unhealthy_threshold": { - Type: resource_compute_region_health_check_schema.TypeInt, - Optional: true, - Description: `A so-far healthy instance will be marked unhealthy after this many -consecutive failures. The default value is 2.`, - Default: 2, - }, - "creation_timestamp": { - Type: resource_compute_region_health_check_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "type": { - Type: resource_compute_region_health_check_schema.TypeString, - Computed: true, - Description: `The type of the health check. One of HTTP, HTTP2, HTTPS, TCP, or SSL.`, - }, - "project": { - Type: resource_compute_region_health_check_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_health_check_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionHealthCheckCreate(d *resource_compute_region_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeRegionHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(checkIntervalSecProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeRegionHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !resource_compute_region_health_check_reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeRegionHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(healthyThresholdProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - nameProp, err := expandComputeRegionHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - unhealthyThresholdProp, err := expandComputeRegionHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(unhealthyThresholdProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - timeoutSecProp, err := expandComputeRegionHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(timeoutSecProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - httpHealthCheckProp, err := expandComputeRegionHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(httpHealthCheckProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, httpHealthCheckProp)) { - obj["httpHealthCheck"] = httpHealthCheckProp - } - httpsHealthCheckProp, err := expandComputeRegionHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(httpsHealthCheckProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, httpsHealthCheckProp)) { - obj["httpsHealthCheck"] = httpsHealthCheckProp - } - tcpHealthCheckProp, err := expandComputeRegionHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(tcpHealthCheckProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, tcpHealthCheckProp)) { - obj["tcpHealthCheck"] = tcpHealthCheckProp - } - sslHealthCheckProp, err := expandComputeRegionHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(sslHealthCheckProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, sslHealthCheckProp)) { - obj["sslHealthCheck"] = sslHealthCheckProp - } - http2HealthCheckProp, err := expandComputeRegionHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(http2HealthCheckProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, http2HealthCheckProp)) { - obj["http2HealthCheck"] = http2HealthCheckProp - } - grpcHealthCheckProp, err := expandComputeRegionHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(grpcHealthCheckProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, grpcHealthCheckProp)) { - obj["grpcHealthCheck"] = grpcHealthCheckProp - } - logConfigProp, err := expandComputeRegionHealthCheckLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(logConfigProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - regionProp, err := expandComputeRegionHealthCheckRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - obj, err = resourceComputeRegionHealthCheckEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks") - if err != nil { - return err - } - - resource_compute_region_health_check_log.Printf("[DEBUG] Creating new RegionHealthCheck: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_health_check_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error creating RegionHealthCheck: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") - if err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionHealthCheck", userAgent, - d.Timeout(resource_compute_region_health_check_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_health_check_fmt.Errorf("Error waiting to create RegionHealthCheck: %s", err) - } - - resource_compute_region_health_check_log.Printf("[DEBUG] Finished creating RegionHealthCheck %q: %#v", d.Id(), res) - - return resourceComputeRegionHealthCheckRead(d, meta) -} - -func resourceComputeRegionHealthCheckRead(d *resource_compute_region_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_health_check_fmt.Sprintf("ComputeRegionHealthCheck %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - - if err := d.Set("check_interval_sec", flattenComputeRegionHealthCheckCheckIntervalSec(res["checkIntervalSec"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeRegionHealthCheckCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("description", flattenComputeRegionHealthCheckDescription(res["description"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("healthy_threshold", flattenComputeRegionHealthCheckHealthyThreshold(res["healthyThreshold"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("name", flattenComputeRegionHealthCheckName(res["name"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("unhealthy_threshold", flattenComputeRegionHealthCheckUnhealthyThreshold(res["unhealthyThreshold"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("timeout_sec", flattenComputeRegionHealthCheckTimeoutSec(res["timeoutSec"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("type", flattenComputeRegionHealthCheckType(res["type"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("http_health_check", flattenComputeRegionHealthCheckHttpHealthCheck(res["httpHealthCheck"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("https_health_check", flattenComputeRegionHealthCheckHttpsHealthCheck(res["httpsHealthCheck"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("tcp_health_check", flattenComputeRegionHealthCheckTcpHealthCheck(res["tcpHealthCheck"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("ssl_health_check", flattenComputeRegionHealthCheckSslHealthCheck(res["sslHealthCheck"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("http2_health_check", flattenComputeRegionHealthCheckHttp2HealthCheck(res["http2HealthCheck"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("grpc_health_check", flattenComputeRegionHealthCheckGrpcHealthCheck(res["grpcHealthCheck"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("log_config", flattenComputeRegionHealthCheckLogConfig(res["logConfig"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("region", flattenComputeRegionHealthCheckRegion(res["region"], d, config)); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error reading RegionHealthCheck: %s", err) - } - - return nil -} - -func resourceComputeRegionHealthCheckUpdate(d *resource_compute_region_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - checkIntervalSecProp, err := expandComputeRegionHealthCheckCheckIntervalSec(d.Get("check_interval_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("check_interval_sec"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, checkIntervalSecProp)) { - obj["checkIntervalSec"] = checkIntervalSecProp - } - descriptionProp, err := expandComputeRegionHealthCheckDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !resource_compute_region_health_check_reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - healthyThresholdProp, err := expandComputeRegionHealthCheckHealthyThreshold(d.Get("healthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("healthy_threshold"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, healthyThresholdProp)) { - obj["healthyThreshold"] = healthyThresholdProp - } - nameProp, err := expandComputeRegionHealthCheckName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - unhealthyThresholdProp, err := expandComputeRegionHealthCheckUnhealthyThreshold(d.Get("unhealthy_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unhealthy_threshold"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, unhealthyThresholdProp)) { - obj["unhealthyThreshold"] = unhealthyThresholdProp - } - timeoutSecProp, err := expandComputeRegionHealthCheckTimeoutSec(d.Get("timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout_sec"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, timeoutSecProp)) { - obj["timeoutSec"] = timeoutSecProp - } - httpHealthCheckProp, err := expandComputeRegionHealthCheckHttpHealthCheck(d.Get("http_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, httpHealthCheckProp)) { - obj["httpHealthCheck"] = httpHealthCheckProp - } - httpsHealthCheckProp, err := expandComputeRegionHealthCheckHttpsHealthCheck(d.Get("https_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("https_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, httpsHealthCheckProp)) { - obj["httpsHealthCheck"] = httpsHealthCheckProp - } - tcpHealthCheckProp, err := expandComputeRegionHealthCheckTcpHealthCheck(d.Get("tcp_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, tcpHealthCheckProp)) { - obj["tcpHealthCheck"] = tcpHealthCheckProp - } - sslHealthCheckProp, err := expandComputeRegionHealthCheckSslHealthCheck(d.Get("ssl_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, sslHealthCheckProp)) { - obj["sslHealthCheck"] = sslHealthCheckProp - } - http2HealthCheckProp, err := expandComputeRegionHealthCheckHttp2HealthCheck(d.Get("http2_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http2_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, http2HealthCheckProp)) { - obj["http2HealthCheck"] = http2HealthCheckProp - } - grpcHealthCheckProp, err := expandComputeRegionHealthCheckGrpcHealthCheck(d.Get("grpc_health_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("grpc_health_check"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, grpcHealthCheckProp)) { - obj["grpcHealthCheck"] = grpcHealthCheckProp - } - logConfigProp, err := expandComputeRegionHealthCheckLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - regionProp, err := expandComputeRegionHealthCheckRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_health_check_reflect.ValueOf(v)) && (ok || !resource_compute_region_health_check_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - obj, err = resourceComputeRegionHealthCheckEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") - if err != nil { - return err - } - - resource_compute_region_health_check_log.Printf("[DEBUG] Updating RegionHealthCheck %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_health_check_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error updating RegionHealthCheck %q: %s", d.Id(), err) - } else { - resource_compute_region_health_check_log.Printf("[DEBUG] Finished updating RegionHealthCheck %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionHealthCheck", userAgent, - d.Timeout(resource_compute_region_health_check_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRegionHealthCheckRead(d, meta) -} - -func resourceComputeRegionHealthCheckDelete(d *resource_compute_region_health_check_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_health_check_fmt.Errorf("Error fetching project for RegionHealthCheck: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_health_check_log.Printf("[DEBUG] Deleting RegionHealthCheck %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_health_check_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionHealthCheck") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionHealthCheck", userAgent, - d.Timeout(resource_compute_region_health_check_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_health_check_log.Printf("[DEBUG] Finished deleting RegionHealthCheck %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionHealthCheckImport(d *resource_compute_region_health_check_schema.ResourceData, meta interface{}) ([]*resource_compute_region_health_check_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/healthChecks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/healthChecks/{{name}}") - if err != nil { - return nil, resource_compute_region_health_check_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_health_check_schema.ResourceData{d}, nil -} - -func flattenComputeRegionHealthCheckCheckIntervalSec(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckCreationTimestamp(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckDescription(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHealthyThreshold(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckUnhealthyThreshold(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckTimeoutSec(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckType(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpHealthCheck(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenComputeRegionHealthCheckHttpHealthCheckHost(original["host"], d, config) - transformed["request_path"] = - flattenComputeRegionHealthCheckHttpHealthCheckRequestPath(original["requestPath"], d, config) - transformed["response"] = - flattenComputeRegionHealthCheckHttpHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeRegionHealthCheckHttpHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeRegionHealthCheckHttpHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeRegionHealthCheckHttpHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeRegionHealthCheckHttpHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionHealthCheckHttpHealthCheckHost(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpHealthCheckRequestPath(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpHealthCheckResponse(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpHealthCheckPort(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckHttpHealthCheckPortName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpHealthCheckProxyHeader(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpHealthCheckPortSpecification(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenComputeRegionHealthCheckHttpsHealthCheckHost(original["host"], d, config) - transformed["request_path"] = - flattenComputeRegionHealthCheckHttpsHealthCheckRequestPath(original["requestPath"], d, config) - transformed["response"] = - flattenComputeRegionHealthCheckHttpsHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeRegionHealthCheckHttpsHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeRegionHealthCheckHttpsHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeRegionHealthCheckHttpsHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeRegionHealthCheckHttpsHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionHealthCheckHttpsHealthCheckHost(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpsHealthCheckRequestPath(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpsHealthCheckResponse(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpsHealthCheckPort(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckHttpsHealthCheckPortName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckTcpHealthCheck(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request"] = - flattenComputeRegionHealthCheckTcpHealthCheckRequest(original["request"], d, config) - transformed["response"] = - flattenComputeRegionHealthCheckTcpHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeRegionHealthCheckTcpHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeRegionHealthCheckTcpHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeRegionHealthCheckTcpHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeRegionHealthCheckTcpHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionHealthCheckTcpHealthCheckRequest(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckTcpHealthCheckResponse(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckTcpHealthCheckPort(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckTcpHealthCheckPortName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckTcpHealthCheckProxyHeader(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckTcpHealthCheckPortSpecification(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckSslHealthCheck(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request"] = - flattenComputeRegionHealthCheckSslHealthCheckRequest(original["request"], d, config) - transformed["response"] = - flattenComputeRegionHealthCheckSslHealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeRegionHealthCheckSslHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeRegionHealthCheckSslHealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeRegionHealthCheckSslHealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeRegionHealthCheckSslHealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionHealthCheckSslHealthCheckRequest(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckSslHealthCheckResponse(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckSslHealthCheckPort(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckSslHealthCheckPortName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckSslHealthCheckProxyHeader(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckSslHealthCheckPortSpecification(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenComputeRegionHealthCheckHttp2HealthCheckHost(original["host"], d, config) - transformed["request_path"] = - flattenComputeRegionHealthCheckHttp2HealthCheckRequestPath(original["requestPath"], d, config) - transformed["response"] = - flattenComputeRegionHealthCheckHttp2HealthCheckResponse(original["response"], d, config) - transformed["port"] = - flattenComputeRegionHealthCheckHttp2HealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeRegionHealthCheckHttp2HealthCheckPortName(original["portName"], d, config) - transformed["proxy_header"] = - flattenComputeRegionHealthCheckHttp2HealthCheckProxyHeader(original["proxyHeader"], d, config) - transformed["port_specification"] = - flattenComputeRegionHealthCheckHttp2HealthCheckPortSpecification(original["portSpecification"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionHealthCheckHttp2HealthCheckHost(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttp2HealthCheckRequestPath(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttp2HealthCheckResponse(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttp2HealthCheckPort(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckHttp2HealthCheckPortName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["port"] = - flattenComputeRegionHealthCheckGrpcHealthCheckPort(original["port"], d, config) - transformed["port_name"] = - flattenComputeRegionHealthCheckGrpcHealthCheckPortName(original["portName"], d, config) - transformed["port_specification"] = - flattenComputeRegionHealthCheckGrpcHealthCheckPortSpecification(original["portSpecification"], d, config) - transformed["grpc_service_name"] = - flattenComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(original["grpcServiceName"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionHealthCheckGrpcHealthCheckPort(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_health_check_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionHealthCheckGrpcHealthCheckPortName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionHealthCheckLogConfig(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - transformed := make(map[string]interface{}) - if v == nil { - - transformed["enable"] = false - return []interface{}{transformed} - } - - original := v.(map[string]interface{}) - transformed["enable"] = original["enable"] - return []interface{}{transformed} -} - -func flattenComputeRegionHealthCheckRegion(v interface{}, d *resource_compute_region_health_check_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionHealthCheckCheckIntervalSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckUnhealthyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHost, err := expandComputeRegionHealthCheckHttpHealthCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedRequestPath, err := expandComputeRegionHealthCheckHttpHealthCheckRequestPath(original["request_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { - transformed["requestPath"] = transformedRequestPath - } - - transformedResponse, err := expandComputeRegionHealthCheckHttpHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeRegionHealthCheckHttpHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeRegionHealthCheckHttpHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeRegionHealthCheckHttpHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeRegionHealthCheckHttpHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHost, err := expandComputeRegionHealthCheckHttpsHealthCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedRequestPath, err := expandComputeRegionHealthCheckHttpsHealthCheckRequestPath(original["request_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { - transformed["requestPath"] = transformedRequestPath - } - - transformedResponse, err := expandComputeRegionHealthCheckHttpsHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeRegionHealthCheckHttpsHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeRegionHealthCheckHttpsHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeRegionHealthCheckHttpsHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeRegionHealthCheckHttpsHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttpsHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckTcpHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequest, err := expandComputeRegionHealthCheckTcpHealthCheckRequest(original["request"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { - transformed["request"] = transformedRequest - } - - transformedResponse, err := expandComputeRegionHealthCheckTcpHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeRegionHealthCheckTcpHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeRegionHealthCheckTcpHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeRegionHealthCheckTcpHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeRegionHealthCheckTcpHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeRegionHealthCheckTcpHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckTcpHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckTcpHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckTcpHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckTcpHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckTcpHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckSslHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequest, err := expandComputeRegionHealthCheckSslHealthCheckRequest(original["request"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedRequest); val.IsValid() && !isEmptyValue(val) { - transformed["request"] = transformedRequest - } - - transformedResponse, err := expandComputeRegionHealthCheckSslHealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeRegionHealthCheckSslHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeRegionHealthCheckSslHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeRegionHealthCheckSslHealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeRegionHealthCheckSslHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeRegionHealthCheckSslHealthCheckRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckSslHealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckSslHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckSslHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckSslHealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckSslHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHost, err := expandComputeRegionHealthCheckHttp2HealthCheckHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedRequestPath, err := expandComputeRegionHealthCheckHttp2HealthCheckRequestPath(original["request_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedRequestPath); val.IsValid() && !isEmptyValue(val) { - transformed["requestPath"] = transformedRequestPath - } - - transformedResponse, err := expandComputeRegionHealthCheckHttp2HealthCheckResponse(original["response"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) { - transformed["response"] = transformedResponse - } - - transformedPort, err := expandComputeRegionHealthCheckHttp2HealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeRegionHealthCheckHttp2HealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedProxyHeader, err := expandComputeRegionHealthCheckHttp2HealthCheckProxyHeader(original["proxy_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedProxyHeader); val.IsValid() && !isEmptyValue(val) { - transformed["proxyHeader"] = transformedProxyHeader - } - - transformedPortSpecification, err := expandComputeRegionHealthCheckHttp2HealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - return transformed, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheckHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheckRequestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheckResponse(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheckProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckHttp2HealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckGrpcHealthCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandComputeRegionHealthCheckGrpcHealthCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedPortName, err := expandComputeRegionHealthCheckGrpcHealthCheckPortName(original["port_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortName); val.IsValid() && !isEmptyValue(val) { - transformed["portName"] = transformedPortName - } - - transformedPortSpecification, err := expandComputeRegionHealthCheckGrpcHealthCheckPortSpecification(original["port_specification"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedPortSpecification); val.IsValid() && !isEmptyValue(val) { - transformed["portSpecification"] = transformedPortSpecification - } - - transformedGrpcServiceName, err := expandComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(original["grpc_service_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedGrpcServiceName); val.IsValid() && !isEmptyValue(val) { - transformed["grpcServiceName"] = transformedGrpcServiceName - } - - return transformed, nil -} - -func expandComputeRegionHealthCheckGrpcHealthCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckGrpcHealthCheckPortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckGrpcHealthCheckPortSpecification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckGrpcHealthCheckGrpcServiceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnable, err := expandComputeRegionHealthCheckLogConfigEnable(original["enable"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_health_check_reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { - transformed["enable"] = transformedEnable - } - - return transformed, nil -} - -func expandComputeRegionHealthCheckLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionHealthCheckRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_health_check_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRegionHealthCheckEncoder(d *resource_compute_region_health_check_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if _, ok := d.GetOk("http_health_check"); ok { - hc := d.Get("http_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["httpHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 80 - } - } - obj["type"] = "HTTP" - return obj, nil - } - if _, ok := d.GetOk("https_health_check"); ok { - hc := d.Get("https_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["httpsHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 443 - } - } - obj["type"] = "HTTPS" - return obj, nil - } - if _, ok := d.GetOk("http2_health_check"); ok { - hc := d.Get("http2_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["http2HealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 443 - } - } - obj["type"] = "HTTP2" - return obj, nil - } - if _, ok := d.GetOk("tcp_health_check"); ok { - hc := d.Get("tcp_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["tcpHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 80 - } - } - obj["type"] = "TCP" - return obj, nil - } - if _, ok := d.GetOk("ssl_health_check"); ok { - hc := d.Get("ssl_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["sslHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - m["port"] = 443 - } - } - obj["type"] = "SSL" - return obj, nil - } - - if _, ok := d.GetOk("grpc_health_check"); ok { - hc := d.Get("grpc_health_check").([]interface{})[0] - ps := hc.(map[string]interface{})["port_specification"] - pn := hc.(map[string]interface{})["port_name"] - - if ps == "USE_FIXED_PORT" || (ps == "" && pn == "") { - m := obj["grpcHealthCheck"].(map[string]interface{}) - if m["port"] == nil { - return nil, resource_compute_region_health_check_fmt.Errorf("error in HealthCheck %s: `port` must be set for GRPC health checks`.", d.Get("name").(string)) - } - } - obj["type"] = "GRPC" - return obj, nil - } - - return nil, resource_compute_region_health_check_fmt.Errorf("error in HealthCheck %s: No health check block specified.", d.Get("name").(string)) -} - -func resourceComputeRegionInstanceGroupManager() *resource_compute_region_instance_group_manager_schema.Resource { - return &resource_compute_region_instance_group_manager_schema.Resource{ - Create: resourceComputeRegionInstanceGroupManagerCreate, - Read: resourceComputeRegionInstanceGroupManagerRead, - Update: resourceComputeRegionInstanceGroupManagerUpdate, - Delete: resourceComputeRegionInstanceGroupManagerDelete, - Importer: &resource_compute_region_instance_group_manager_schema.ResourceImporter{ - State: resourceRegionInstanceGroupManagerStateImporter, - }, - Timeouts: &resource_compute_region_instance_group_manager_schema.ResourceTimeout{ - Create: resource_compute_region_instance_group_manager_schema.DefaultTimeout(15 * resource_compute_region_instance_group_manager_time.Minute), - Update: resource_compute_region_instance_group_manager_schema.DefaultTimeout(15 * resource_compute_region_instance_group_manager_time.Minute), - Delete: resource_compute_region_instance_group_manager_schema.DefaultTimeout(15 * resource_compute_region_instance_group_manager_time.Minute), - }, - - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "base_instance_name": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The base instance name to use for instances in this group. The value must be a valid RFC1035 name. Supported characters are lowercase letters, numbers, and hyphens (-). Instances are named by appending a hyphen and a random four-character string to the base instance name.`, - }, - - "version": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Required: true, - Description: `Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "name": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Optional: true, - Description: `Version name.`, - }, - - "instance_template": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The full URL to an instance template from which all new instances of this version will be created.`, - }, - - "target_size": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The number of instances calculated as a fixed number or a percentage depending on the settings.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "fixed": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Optional: true, - Description: `The number of instances which are managed for this version. Conflicts with percent.`, - }, - - "percent": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_region_instance_group_manager_validation.IntBetween(0, 100), - Description: `The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set target_size values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version.`, - }, - }, - }, - }, - }, - }, - }, - - "name": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the instance group manager. Must be 1-63 characters long and comply with RFC1035. Supported characters include lowercase letters, numbers, and hyphens.`, - }, - - "region": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region where the managed instance group resides.`, - }, - - "description": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional textual description of the instance group manager.`, - }, - - "fingerprint": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Computed: true, - Description: `The fingerprint of the instance group manager.`, - }, - - "instance_group": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Computed: true, - Description: `The full URL of the instance group created by the manager.`, - }, - - "named_port": { - Type: resource_compute_region_instance_group_manager_schema.TypeSet, - Optional: true, - Description: `The named port configuration.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "name": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - Description: `The name of the port.`, - }, - - "port": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Required: true, - Description: `The port number.`, - }, - }, - }, - }, - - "project": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "self_link": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Computed: true, - Description: `The URL of the created resource.`, - }, - - "target_pools": { - Type: resource_compute_region_instance_group_manager_schema.TypeSet, - Optional: true, - Elem: &resource_compute_region_instance_group_manager_schema.Schema{ - Type: resource_compute_region_instance_group_manager_schema.TypeString, - }, - Set: selfLinkRelativePathHash, - Description: `The full URL of all target pools to which new instances in the group are added. Updating the target pools attribute does not affect existing instances.`, - }, - "target_size": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The target number of running instances for this managed instance group. This value should always be explicitly set unless this resource is attached to an autoscaler, in which case it should never be set. Defaults to 0.`, - }, - - "wait_for_instances": { - Type: resource_compute_region_instance_group_manager_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether to wait for all instances to be created/updated before returning. Note that if this is set to true and the operation does not succeed, Terraform will continue trying until it times out.`, - }, - "wait_for_instances_status": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Optional: true, - Default: "STABLE", - ValidateFunc: resource_compute_region_instance_group_manager_validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), - Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective as well as all instances to be stable before returning.`, - }, - - "auto_healing_policies": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The autohealing policies for this managed instance group. You can specify only one value.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "health_check": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The health check resource that signals autohealing.`, - }, - - "initial_delay_sec": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Required: true, - ValidateFunc: resource_compute_region_instance_group_manager_validation.IntBetween(0, 3600), - Description: `The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600.`, - }, - }, - }, - }, - - "distribution_policy_zones": { - Type: resource_compute_region_instance_group_manager_schema.TypeSet, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The distribution policy for this managed instance group. You can specify one or more values.`, - Set: hashZoneFromSelfLinkOrResourceName, - Elem: &resource_compute_region_instance_group_manager_schema.Schema{ - Type: resource_compute_region_instance_group_manager_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - - "distribution_policy_target_shape": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType).`, - }, - - "update_policy": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Computed: true, - Optional: true, - MaxItems: 1, - Description: `The update policy for this managed instance group.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "minimal_action": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_region_instance_group_manager_validation.StringInSlice([]string{"RESTART", "REPLACE"}, false), - Description: `Minimal action to be taken on an instance. You can specify either RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a RESTART, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.`, - }, - - "type": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_region_instance_group_manager_validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), - Description: `The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls).`, - }, - - "max_surge_fixed": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Optional: true, - Computed: true, - ConflictsWith: []string{"update_policy.0.max_surge_percent"}, - Description: `The maximum number of instances that can be created above the specified targetSize during the update process. Conflicts with max_surge_percent. It has to be either 0 or at least equal to the number of zones. If fixed values are used, at least one of max_unavailable_fixed or max_surge_fixed must be greater than 0.`, - }, - - "max_surge_percent": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Optional: true, - ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, - Description: `The maximum number of instances(calculated as percentage) that can be created above the specified targetSize during the update process. Conflicts with max_surge_fixed. Percent value is only allowed for regional managed instance groups with size at least 10.`, - ValidateFunc: resource_compute_region_instance_group_manager_validation.IntBetween(0, 100), - }, - - "max_unavailable_fixed": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Optional: true, - Computed: true, - Description: `The maximum number of instances that can be unavailable during the update process. Conflicts with max_unavailable_percent. It has to be either 0 or at least equal to the number of zones. If fixed values are used, at least one of max_unavailable_fixed or max_surge_fixed must be greater than 0.`, - ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, - }, - - "max_unavailable_percent": { - Type: resource_compute_region_instance_group_manager_schema.TypeInt, - Optional: true, - ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, - ValidateFunc: resource_compute_region_instance_group_manager_validation.IntBetween(0, 100), - Description: `The maximum number of instances(calculated as percentage) that can be unavailable during the update process. Conflicts with max_unavailable_fixed. Percent value is only allowed for regional managed instance groups with size at least 10.`, - }, - - "instance_redistribution_type": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_instance_group_manager_validation.StringInSlice([]string{"PROACTIVE", "NONE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("PROACTIVE"), - Description: `The instance redistribution policy for regional managed instance groups. Valid values are: "PROACTIVE", "NONE". If PROACTIVE (default), the group attempts to maintain an even distribution of VM instances across zones in the region. If NONE, proactive redistribution is disabled.`, - }, - "replacement_method": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_instance_group_manager_validation.StringInSlice([]string{"RECREATE", "SUBSTITUTE", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("SUBSTITUTE"), - Description: `The instance replacement method for regional managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set max_unavailable_fixed or max_unavailable_percent to be greater than 0.`, - }, - }, - }, - }, - - "stateful_disk": { - Type: resource_compute_region_instance_group_manager_schema.TypeSet, - Optional: true, - Description: `Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the update_policy.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "device_name": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Required: true, - Description: `The device name of the disk to be attached.`, - }, - - "delete_rule": { - Type: resource_compute_region_instance_group_manager_schema.TypeString, - Default: "NEVER", - Optional: true, - Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER.`, - ValidateFunc: resource_compute_region_instance_group_manager_validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), - }, - }, - }, - }, - "status": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Computed: true, - Description: `The status of this managed instance group.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "is_stable": { - Type: resource_compute_region_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.`, - }, - - "version_target": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Computed: true, - Description: `A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "is_reached": { - Type: resource_compute_region_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.`, - }, - }, - }, - }, - "stateful": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Computed: true, - Description: `Stateful status of the given Instance Group Manager.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "has_stateful_config": { - Type: resource_compute_region_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.`, - }, - "per_instance_configs": { - Type: resource_compute_region_instance_group_manager_schema.TypeList, - Computed: true, - Description: `Status of per-instance configs on the instance.`, - Elem: &resource_compute_region_instance_group_manager_schema.Resource{ - Schema: map[string]*resource_compute_region_instance_group_manager_schema.Schema{ - "all_effective": { - Type: resource_compute_region_instance_group_manager_schema.TypeBool, - Computed: true, - Description: `A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionInstanceGroupManagerCreate(d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - manager := &resource_compute_region_instance_group_manager_compute.InstanceGroupManager{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - BaseInstanceName: d.Get("base_instance_name").(string), - TargetSize: int64(d.Get("target_size").(int)), - NamedPorts: getNamedPortsBeta(d.Get("named_port").(*resource_compute_region_instance_group_manager_schema.Set).List()), - TargetPools: convertStringSet(d.Get("target_pools").(*resource_compute_region_instance_group_manager_schema.Set)), - AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), - Versions: expandVersions(d.Get("version").([]interface{})), - UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), - DistributionPolicy: expandDistributionPolicy(d), - StatefulPolicy: expandStatefulPolicy(d.Get("stateful_disk").(*resource_compute_region_instance_group_manager_schema.Set).List()), - - ForceSendFields: []string{"TargetSize"}, - } - - op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Insert(project, region, manager).Do() - - if err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error creating RegionInstanceGroupManager: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") - if err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime(config, op, project, "Creating InstanceGroupManager", userAgent, d.Timeout(resource_compute_region_instance_group_manager_schema.TimeoutCreate)) - if err != nil { - return err - } - - if d.Get("wait_for_instances").(bool) { - err := computeRIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - return resourceComputeRegionInstanceGroupManagerRead(d, config) -} - -func computeRIGMWaitForInstanceStatus(d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) error { - waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" - conf := resource_compute_region_instance_group_manager_resource.StateChangeConf{ - Pending: []string{"creating", "error", "updating per instance configs", "reaching version target"}, - Target: []string{"created"}, - Refresh: waitForInstancesRefreshFunc(getRegionalManager, waitForUpdates, d, meta), - Timeout: d.Timeout(resource_compute_region_instance_group_manager_schema.TimeoutCreate), - } - _, err := conf.WaitForState() - if err != nil { - return err - } - return nil -} - -type getInstanceManagerFunc func(*resource_compute_region_instance_group_manager_schema.ResourceData, interface{}) (*resource_compute_region_instance_group_manager_compute.InstanceGroupManager, error) - -func getRegionalManager(d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) (*resource_compute_region_instance_group_manager_compute.InstanceGroupManager, error) { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - region, err := getRegion(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - name := d.Get("name").(string) - manager, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Get(project, region, name).Do() - if err != nil { - return nil, handleNotFoundError(err, d, resource_compute_region_instance_group_manager_fmt.Sprintf("Region Instance Manager %q", name)) - } - - return manager, nil -} - -func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) resource_compute_region_instance_group_manager_resource.StateRefreshFunc { - return func() (interface{}, string, error) { - m, err := f(d, meta) - if err != nil { - resource_compute_region_instance_group_manager_log.Printf("[WARNING] Error in fetching manager while waiting for instances to come up: %s\n", err) - return nil, "error", err - } - if m.Status.IsStable { - if waitForUpdates { - - if m.Status.Stateful.HasStatefulConfig { - if !m.Status.Stateful.PerInstanceConfigs.AllEffective { - return false, "updating per instance configs", nil - } - } - if !m.Status.VersionTarget.IsReached { - return false, "reaching version target", nil - } - } - return true, "created", nil - } else { - return false, "creating", nil - } - } -} - -func resourceComputeRegionInstanceGroupManagerRead(d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - manager, err := getRegionalManager(d, meta) - if err != nil { - return err - } - if manager == nil { - resource_compute_region_instance_group_manager_log.Printf("[WARN] Region Instance Group Manager %q not found, removing from state.", d.Id()) - d.SetId("") - return nil - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - if err := d.Set("base_instance_name", manager.BaseInstanceName); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting base_instance_name: %s", err) - } - if err := d.Set("name", manager.Name); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("region", GetResourceNameFromSelfLink(manager.Region)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("description", manager.Description); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("target_size", manager.TargetSize); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting target_size: %s", err) - } - if err := d.Set("target_pools", mapStringArr(manager.TargetPools, ConvertSelfLinkToV1)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting target_pools in state: %s", err.Error()) - } - if err := d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting named_port in state: %s", err.Error()) - } - if err := d.Set("fingerprint", manager.Fingerprint); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting fingerprint: %s", err) - } - if err := d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting instance_group: %s", err) - } - if err := d.Set("distribution_policy_zones", flattenDistributionPolicy(manager.DistributionPolicy)); err != nil { - return err - } - if err := d.Set("distribution_policy_target_shape", manager.DistributionPolicy.TargetShape); err != nil { - return err - } - if err := d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting self_link: %s", err) - } - - if err := d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) - } - if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { - return err - } - if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting update_policy in state: %s", err.Error()) - } - if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) - } - if err = d.Set("status", flattenStatus(manager.Status)); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting status in state: %s", err.Error()) - } - - if d.Get("wait_for_instances_status").(string) == "" { - if err = d.Set("wait_for_instances_status", "STABLE"); err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error setting wait_for_instances_status in state: %s", err.Error()) - } - } - - return nil -} - -func resourceComputeRegionInstanceGroupManagerUpdate(d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.Get("wait_for_instances").(bool) { - err := computeRIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - updatedManager := &resource_compute_region_instance_group_manager_compute.InstanceGroupManager{ - Fingerprint: d.Get("fingerprint").(string), - } - var change bool - - if d.HasChange("target_pools") { - updatedManager.TargetPools = convertStringSet(d.Get("target_pools").(*resource_compute_region_instance_group_manager_schema.Set)) - updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetPools") - change = true - } - - if d.HasChange("auto_healing_policies") { - updatedManager.AutoHealingPolicies = expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})) - updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "AutoHealingPolicies") - change = true - } - - if d.HasChange("version") { - updatedManager.Versions = expandVersions(d.Get("version").([]interface{})) - change = true - } - - if d.HasChange("update_policy") { - updatedManager.UpdatePolicy = expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})) - change = true - } - - if d.HasChange("stateful_disk") { - updatedManager.StatefulPolicy = expandStatefulPolicy(d.Get("stateful_disk").(*resource_compute_region_instance_group_manager_schema.Set).List()) - change = true - } - - if change { - op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Patch(project, region, d.Get("name").(string), updatedManager).Do() - if err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error updating region managed group instances: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating region managed group instances", userAgent, d.Timeout(resource_compute_region_instance_group_manager_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - if d.HasChange("named_port") { - d.Partial(true) - namedPorts := getNamedPortsBeta(d.Get("named_port").(*resource_compute_region_instance_group_manager_schema.Set).List()) - setNamedPorts := &resource_compute_region_instance_group_manager_compute.RegionInstanceGroupsSetNamedPortsRequest{ - NamedPorts: namedPorts, - } - - op, err := config.NewComputeClient(userAgent).RegionInstanceGroups.SetNamedPorts( - project, region, d.Get("name").(string), setNamedPorts).Do() - - if err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error updating RegionInstanceGroupManager: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating RegionInstanceGroupManager", userAgent, d.Timeout(resource_compute_region_instance_group_manager_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - if d.HasChange("target_size") { - d.Partial(true) - targetSize := int64(d.Get("target_size").(int)) - op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Resize( - project, region, d.Get("name").(string), targetSize).Do() - - if err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error resizing RegionInstanceGroupManager: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Resizing RegionInstanceGroupManager", userAgent, d.Timeout(resource_compute_region_instance_group_manager_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - if d.Get("wait_for_instances").(bool) { - err := computeRIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - return resourceComputeRegionInstanceGroupManagerRead(d, meta) -} - -func resourceComputeRegionInstanceGroupManagerDelete(d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if d.Get("wait_for_instances").(bool) { - err := computeRIGMWaitForInstanceStatus(d, meta) - if err != nil { - return err - } - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Delete(project, region, name).Do() - - if err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error deleting region instance group manager: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Deleting RegionInstanceGroupManager", userAgent, d.Timeout(resource_compute_region_instance_group_manager_schema.TimeoutDelete)) - if err != nil { - return resource_compute_region_instance_group_manager_fmt.Errorf("Error waiting for delete to complete: %s", err) - } - - d.SetId("") - return nil -} - -func expandRegionUpdatePolicy(configured []interface{}) *resource_compute_region_instance_group_manager_compute.InstanceGroupManagerUpdatePolicy { - updatePolicy := &resource_compute_region_instance_group_manager_compute.InstanceGroupManagerUpdatePolicy{} - - for _, raw := range configured { - data := raw.(map[string]interface{}) - - updatePolicy.MinimalAction = data["minimal_action"].(string) - updatePolicy.Type = data["type"].(string) - updatePolicy.InstanceRedistributionType = data["instance_redistribution_type"].(string) - updatePolicy.ReplacementMethod = data["replacement_method"].(string) - - if v := data["max_surge_percent"]; v.(int) > 0 { - updatePolicy.MaxSurge = &resource_compute_region_instance_group_manager_compute.FixedOrPercent{ - Percent: int64(v.(int)), - NullFields: []string{"Fixed"}, - } - } else { - updatePolicy.MaxSurge = &resource_compute_region_instance_group_manager_compute.FixedOrPercent{ - Fixed: int64(data["max_surge_fixed"].(int)), - - ForceSendFields: []string{"Fixed"}, - NullFields: []string{"Percent"}, - } - } - - if v := data["max_unavailable_percent"]; v.(int) > 0 { - updatePolicy.MaxUnavailable = &resource_compute_region_instance_group_manager_compute.FixedOrPercent{ - Percent: int64(v.(int)), - NullFields: []string{"Fixed"}, - } - } else { - updatePolicy.MaxUnavailable = &resource_compute_region_instance_group_manager_compute.FixedOrPercent{ - Fixed: int64(data["max_unavailable_fixed"].(int)), - - ForceSendFields: []string{"Fixed"}, - NullFields: []string{"Percent"}, - } - } - } - return updatePolicy -} - -func flattenRegionUpdatePolicy(updatePolicy *resource_compute_region_instance_group_manager_compute.InstanceGroupManagerUpdatePolicy) []map[string]interface{} { - results := []map[string]interface{}{} - if updatePolicy != nil { - up := map[string]interface{}{} - if updatePolicy.MaxSurge != nil { - up["max_surge_fixed"] = updatePolicy.MaxSurge.Fixed - up["max_surge_percent"] = updatePolicy.MaxSurge.Percent - } else { - up["max_surge_fixed"] = 0 - up["max_surge_percent"] = 0 - } - if updatePolicy.MaxUnavailable != nil { - up["max_unavailable_fixed"] = updatePolicy.MaxUnavailable.Fixed - up["max_unavailable_percent"] = updatePolicy.MaxUnavailable.Percent - } else { - up["max_unavailable_fixed"] = 0 - up["max_unavailable_percent"] = 0 - } - up["minimal_action"] = updatePolicy.MinimalAction - up["type"] = updatePolicy.Type - up["instance_redistribution_type"] = updatePolicy.InstanceRedistributionType - up["replacement_method"] = updatePolicy.ReplacementMethod - - results = append(results, up) - } - return results -} - -func expandDistributionPolicy(d *resource_compute_region_instance_group_manager_schema.ResourceData) *resource_compute_region_instance_group_manager_compute.DistributionPolicy { - dpz := d.Get("distribution_policy_zones").(*resource_compute_region_instance_group_manager_schema.Set) - dpts := d.Get("distribution_policy_target_shape").(string) - if dpz.Len() == 0 && dpts == "" { - return nil - } - - distributionPolicyZoneConfigs := make([]*resource_compute_region_instance_group_manager_compute.DistributionPolicyZoneConfiguration, 0, dpz.Len()) - for _, raw := range dpz.List() { - data := raw.(string) - distributionPolicyZoneConfig := resource_compute_region_instance_group_manager_compute.DistributionPolicyZoneConfiguration{ - Zone: "zones/" + data, - } - - distributionPolicyZoneConfigs = append(distributionPolicyZoneConfigs, &distributionPolicyZoneConfig) - } - - return &resource_compute_region_instance_group_manager_compute.DistributionPolicy{Zones: distributionPolicyZoneConfigs, TargetShape: dpts} -} - -func flattenDistributionPolicy(distributionPolicy *resource_compute_region_instance_group_manager_compute.DistributionPolicy) []string { - zones := make([]string, 0) - - if distributionPolicy != nil { - for _, zone := range distributionPolicy.Zones { - zones = append(zones, GetResourceNameFromSelfLink(zone.Zone)) - } - } - - return zones -} - -func hashZoneFromSelfLinkOrResourceName(value interface{}) int { - parts := resource_compute_region_instance_group_manager_strings.Split(value.(string), "/") - resource_compute_region_instance_group_manager_resource := parts[len(parts)-1] - - return hashcode(resource_compute_region_instance_group_manager_resource) -} - -func resourceRegionInstanceGroupManagerStateImporter(d *resource_compute_region_instance_group_manager_schema.ResourceData, meta interface{}) ([]*resource_compute_region_instance_group_manager_schema.ResourceData, error) { - if err := d.Set("wait_for_instances", false); err != nil { - return nil, resource_compute_region_instance_group_manager_fmt.Errorf("Error setting wait_for_instances: %s", err) - } - if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { - return nil, resource_compute_region_instance_group_manager_fmt.Errorf("Error setting wait_for_instances_status: %s", err) - } - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{name}}") - if err != nil { - return nil, resource_compute_region_instance_group_manager_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_instance_group_manager_schema.ResourceData{d}, nil -} - -func resourceComputeRegionNetworkEndpointGroup() *resource_compute_region_network_endpoint_group_schema.Resource { - return &resource_compute_region_network_endpoint_group_schema.Resource{ - Create: resourceComputeRegionNetworkEndpointGroupCreate, - Read: resourceComputeRegionNetworkEndpointGroupRead, - Delete: resourceComputeRegionNetworkEndpointGroupDelete, - - Importer: &resource_compute_region_network_endpoint_group_schema.ResourceImporter{ - State: resourceComputeRegionNetworkEndpointGroupImport, - }, - - Timeouts: &resource_compute_region_network_endpoint_group_schema.ResourceTimeout{ - Create: resource_compute_region_network_endpoint_group_schema.DefaultTimeout(4 * resource_compute_region_network_endpoint_group_time.Minute), - Delete: resource_compute_region_network_endpoint_group_schema.DefaultTimeout(4 * resource_compute_region_network_endpoint_group_time.Minute), - }, - - Schema: map[string]*resource_compute_region_network_endpoint_group_schema.Schema{ - "name": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "region": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the region where the Serverless NEGs Reside.`, - }, - "app_engine": { - Type: resource_compute_region_network_endpoint_group_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Only valid when networkEndpointType is "SERVERLESS". -Only one of cloud_run, app_engine or cloud_function may be set.`, - MaxItems: 1, - Elem: &resource_compute_region_network_endpoint_group_schema.Resource{ - Schema: map[string]*resource_compute_region_network_endpoint_group_schema.Schema{ - "service": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional serving service. -The service name must be 1-63 characters long, and comply with RFC1035. -Example value: "default", "my-service".`, - }, - "url_mask": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A template to parse service and version fields from a request URL. -URL mask allows for routing to multiple App Engine services without -having to create multiple Network Endpoint Groups and backend services. - -For example, the request URLs "foo1-dot-appname.appspot.com/v1" and -"foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with -URL mask "-dot-appname.appspot.com/". The URL mask will parse -them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively.`, - }, - "version": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Optional serving version. -The version must be 1-63 characters long, and comply with RFC1035. -Example value: "v1", "v2".`, - }, - }, - }, - ExactlyOneOf: []string{"app_engine", "cloud_function", "cloud_run"}, - }, - "cloud_function": { - Type: resource_compute_region_network_endpoint_group_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Only valid when networkEndpointType is "SERVERLESS". -Only one of cloud_run, app_engine or cloud_function may be set.`, - MaxItems: 1, - Elem: &resource_compute_region_network_endpoint_group_schema.Resource{ - Schema: map[string]*resource_compute_region_network_endpoint_group_schema.Schema{ - "function": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A user-defined name of the Cloud Function. -The function name is case-sensitive and must be 1-63 characters long. -Example value: "func1".`, - AtLeastOneOf: []string{"cloud_function.0.function", "cloud_function.0.url_mask"}, - }, - "url_mask": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A template to parse function field from a request URL. URL mask allows -for routing to multiple Cloud Functions without having to create -multiple Network Endpoint Groups and backend services. - -For example, request URLs "mydomain.com/function1" and "mydomain.com/function2" -can be backed by the same Serverless NEG with URL mask "/". The URL mask -will parse them to { function = "function1" } and { function = "function2" } respectively.`, - AtLeastOneOf: []string{"cloud_function.0.function", "cloud_function.0.url_mask"}, - }, - }, - }, - ExactlyOneOf: []string{"app_engine", "cloud_function", "cloud_run"}, - }, - "cloud_run": { - Type: resource_compute_region_network_endpoint_group_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Only valid when networkEndpointType is "SERVERLESS". -Only one of cloud_run, app_engine or cloud_function may be set.`, - MaxItems: 1, - Elem: &resource_compute_region_network_endpoint_group_schema.Resource{ - Schema: map[string]*resource_compute_region_network_endpoint_group_schema.Schema{ - "service": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Cloud Run service is the main resource of Cloud Run. -The service must be 1-63 characters long, and comply with RFC1035. -Example value: "run-service".`, - AtLeastOneOf: []string{"cloud_run.0.service", "cloud_run.0.url_mask"}, - }, - "tag": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Cloud Run tag represents the "named-revision" to provide -additional fine-grained traffic routing information. -The tag must be 1-63 characters long, and comply with RFC1035. -Example value: "revision-0010".`, - }, - "url_mask": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A template to parse service and tag fields from a request URL. -URL mask allows for routing to multiple Run services without having -to create multiple network endpoint groups and backend services. - -For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" -an be backed by the same Serverless Network Endpoint Group (NEG) with -URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } -and { service="bar2", tag="foo2" } respectively.`, - AtLeastOneOf: []string{"cloud_run.0.service", "cloud_run.0.url_mask"}, - }, - }, - }, - ExactlyOneOf: []string{"cloud_run", "cloud_function", "app_engine"}, - }, - "description": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "network_endpoint_type": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_region_network_endpoint_group_validation.StringInSlice([]string{"SERVERLESS", ""}, false), - Description: `Type of network endpoints in this network endpoint group. Defaults to SERVERLESS Default value: "SERVERLESS" Possible values: ["SERVERLESS"]`, - Default: "SERVERLESS", - }, - "project": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_network_endpoint_group_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionNetworkEndpointGroupCreate(d *resource_compute_region_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeRegionNetworkEndpointGroupName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_network_endpoint_group_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_network_endpoint_group_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeRegionNetworkEndpointGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_network_endpoint_group_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_network_endpoint_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - networkEndpointTypeProp, err := expandComputeRegionNetworkEndpointGroupNetworkEndpointType(d.Get("network_endpoint_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network_endpoint_type"); !isEmptyValue(resource_compute_region_network_endpoint_group_reflect.ValueOf(networkEndpointTypeProp)) && (ok || !resource_compute_region_network_endpoint_group_reflect.DeepEqual(v, networkEndpointTypeProp)) { - obj["networkEndpointType"] = networkEndpointTypeProp - } - cloudRunProp, err := expandComputeRegionNetworkEndpointGroupCloudRun(d.Get("cloud_run"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_run"); !isEmptyValue(resource_compute_region_network_endpoint_group_reflect.ValueOf(cloudRunProp)) && (ok || !resource_compute_region_network_endpoint_group_reflect.DeepEqual(v, cloudRunProp)) { - obj["cloudRun"] = cloudRunProp - } - appEngineProp, err := expandComputeRegionNetworkEndpointGroupAppEngine(d.Get("app_engine"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("app_engine"); !isEmptyValue(resource_compute_region_network_endpoint_group_reflect.ValueOf(appEngineProp)) && (ok || !resource_compute_region_network_endpoint_group_reflect.DeepEqual(v, appEngineProp)) { - obj["appEngine"] = appEngineProp - } - cloudFunctionProp, err := expandComputeRegionNetworkEndpointGroupCloudFunction(d.Get("cloud_function"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cloud_function"); !isEmptyValue(resource_compute_region_network_endpoint_group_reflect.ValueOf(cloudFunctionProp)) && (ok || !resource_compute_region_network_endpoint_group_reflect.DeepEqual(v, cloudFunctionProp)) { - obj["cloudFunction"] = cloudFunctionProp - } - regionProp, err := expandComputeRegionNetworkEndpointGroupRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_network_endpoint_group_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_network_endpoint_group_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups") - if err != nil { - return err - } - - resource_compute_region_network_endpoint_group_log.Printf("[DEBUG] Creating new RegionNetworkEndpointGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_network_endpoint_group_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error creating RegionNetworkEndpointGroup: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionNetworkEndpointGroup", userAgent, - d.Timeout(resource_compute_region_network_endpoint_group_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error waiting to create RegionNetworkEndpointGroup: %s", err) - } - - resource_compute_region_network_endpoint_group_log.Printf("[DEBUG] Finished creating RegionNetworkEndpointGroup %q: %#v", d.Id(), res) - - return resourceComputeRegionNetworkEndpointGroupRead(d, meta) -} - -func resourceComputeRegionNetworkEndpointGroupRead(d *resource_compute_region_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_network_endpoint_group_fmt.Sprintf("ComputeRegionNetworkEndpointGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - - if err := d.Set("name", flattenComputeRegionNetworkEndpointGroupName(res["name"], d, config)); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("description", flattenComputeRegionNetworkEndpointGroupDescription(res["description"], d, config)); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("network_endpoint_type", flattenComputeRegionNetworkEndpointGroupNetworkEndpointType(res["networkEndpointType"], d, config)); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("cloud_run", flattenComputeRegionNetworkEndpointGroupCloudRun(res["cloudRun"], d, config)); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("app_engine", flattenComputeRegionNetworkEndpointGroupAppEngine(res["appEngine"], d, config)); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("cloud_function", flattenComputeRegionNetworkEndpointGroupCloudFunction(res["cloudFunction"], d, config)); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("region", flattenComputeRegionNetworkEndpointGroupRegion(res["region"], d, config)); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error reading RegionNetworkEndpointGroup: %s", err) - } - - return nil -} - -func resourceComputeRegionNetworkEndpointGroupDelete(d *resource_compute_region_network_endpoint_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_network_endpoint_group_fmt.Errorf("Error fetching project for RegionNetworkEndpointGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_network_endpoint_group_log.Printf("[DEBUG] Deleting RegionNetworkEndpointGroup %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_network_endpoint_group_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionNetworkEndpointGroup") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionNetworkEndpointGroup", userAgent, - d.Timeout(resource_compute_region_network_endpoint_group_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_network_endpoint_group_log.Printf("[DEBUG] Finished deleting RegionNetworkEndpointGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionNetworkEndpointGroupImport(d *resource_compute_region_network_endpoint_group_schema.ResourceData, meta interface{}) ([]*resource_compute_region_network_endpoint_group_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/networkEndpointGroups/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{name}}") - if err != nil { - return nil, resource_compute_region_network_endpoint_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_network_endpoint_group_schema.ResourceData{d}, nil -} - -func flattenComputeRegionNetworkEndpointGroupName(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupDescription(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupNetworkEndpointType(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service"] = - flattenComputeRegionNetworkEndpointGroupCloudRunService(original["service"], d, config) - transformed["tag"] = - flattenComputeRegionNetworkEndpointGroupCloudRunTag(original["tag"], d, config) - transformed["url_mask"] = - flattenComputeRegionNetworkEndpointGroupCloudRunUrlMask(original["urlMask"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionNetworkEndpointGroupCloudRunService(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudRunTag(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudRunUrlMask(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupAppEngine(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["service"] = - flattenComputeRegionNetworkEndpointGroupAppEngineService(original["service"], d, config) - transformed["version"] = - flattenComputeRegionNetworkEndpointGroupAppEngineVersion(original["version"], d, config) - transformed["url_mask"] = - flattenComputeRegionNetworkEndpointGroupAppEngineUrlMask(original["urlMask"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionNetworkEndpointGroupAppEngineService(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupAppEngineVersion(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupAppEngineUrlMask(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudFunction(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["function"] = - flattenComputeRegionNetworkEndpointGroupCloudFunctionFunction(original["function"], d, config) - transformed["url_mask"] = - flattenComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(original["urlMask"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionNetworkEndpointGroupCloudFunctionFunction(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionNetworkEndpointGroupRegion(v interface{}, d *resource_compute_region_network_endpoint_group_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeRegionNetworkEndpointGroupName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupNetworkEndpointType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRun(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandComputeRegionNetworkEndpointGroupCloudRunService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedTag, err := expandComputeRegionNetworkEndpointGroupCloudRunTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupCloudRunUrlMask(original["url_mask"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedUrlMask); val.IsValid() && !isEmptyValue(val) { - transformed["urlMask"] = transformedUrlMask - } - - return transformed, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRunService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRunTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudRunUrlMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngine(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandComputeRegionNetworkEndpointGroupAppEngineService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedVersion, err := expandComputeRegionNetworkEndpointGroupAppEngineVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupAppEngineUrlMask(original["url_mask"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedUrlMask); val.IsValid() && !isEmptyValue(val) { - transformed["urlMask"] = transformedUrlMask - } - - return transformed, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngineService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngineVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupAppEngineUrlMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFunction, err := expandComputeRegionNetworkEndpointGroupCloudFunctionFunction(original["function"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedFunction); val.IsValid() && !isEmptyValue(val) { - transformed["function"] = transformedFunction - } - - transformedUrlMask, err := expandComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(original["url_mask"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_network_endpoint_group_reflect.ValueOf(transformedUrlMask); val.IsValid() && !isEmptyValue(val) { - transformed["urlMask"] = transformedUrlMask - } - - return transformed, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudFunctionFunction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupCloudFunctionUrlMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionNetworkEndpointGroupRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_network_endpoint_group_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRegionPerInstanceConfig() *resource_compute_region_per_instance_config_schema.Resource { - return &resource_compute_region_per_instance_config_schema.Resource{ - Create: resourceComputeRegionPerInstanceConfigCreate, - Read: resourceComputeRegionPerInstanceConfigRead, - Update: resourceComputeRegionPerInstanceConfigUpdate, - Delete: resourceComputeRegionPerInstanceConfigDelete, - - Importer: &resource_compute_region_per_instance_config_schema.ResourceImporter{ - State: resourceComputeRegionPerInstanceConfigImport, - }, - - Timeouts: &resource_compute_region_per_instance_config_schema.ResourceTimeout{ - Create: resource_compute_region_per_instance_config_schema.DefaultTimeout(15 * resource_compute_region_per_instance_config_time.Minute), - Update: resource_compute_region_per_instance_config_schema.DefaultTimeout(6 * resource_compute_region_per_instance_config_time.Minute), - Delete: resource_compute_region_per_instance_config_schema.DefaultTimeout(15 * resource_compute_region_per_instance_config_time.Minute), - }, - - Schema: map[string]*resource_compute_region_per_instance_config_schema.Schema{ - "name": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this per-instance config and its corresponding instance.`, - }, - "region_instance_group_manager": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region instance group manager this instance config is part of.`, - }, - "preserved_state": { - Type: resource_compute_region_per_instance_config_schema.TypeList, - Optional: true, - Description: `The preserved state for this instance.`, - MaxItems: 1, - Elem: &resource_compute_region_per_instance_config_schema.Resource{ - Schema: map[string]*resource_compute_region_per_instance_config_schema.Schema{ - "disk": { - Type: resource_compute_region_per_instance_config_schema.TypeSet, - Optional: true, - Description: `Stateful disks for the instance.`, - Elem: computeRegionPerInstanceConfigPreservedStateDiskSchema(), - }, - "metadata": { - Type: resource_compute_region_per_instance_config_schema.TypeMap, - Optional: true, - Description: `Preserved metadata defined for this instance. This is a list of key->value pairs.`, - Elem: &resource_compute_region_per_instance_config_schema.Schema{Type: resource_compute_region_per_instance_config_schema.TypeString}, - }, - }, - }, - }, - "region": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the containing instance group manager is located`, - }, - "minimal_action": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Optional: true, - Default: "NONE", - }, - "most_disruptive_allowed_action": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Optional: true, - Default: "REPLACE", - }, - "remove_instance_state_on_destroy": { - Type: resource_compute_region_per_instance_config_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeRegionPerInstanceConfigPreservedStateDiskSchema() *resource_compute_region_per_instance_config_schema.Resource { - return &resource_compute_region_per_instance_config_schema.Resource{ - Schema: map[string]*resource_compute_region_per_instance_config_schema.Schema{ - "device_name": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Required: true, - Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance.`, - }, - "source": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Required: true, - Description: `The URI of an existing persistent disk to attach under the specified device-name in the format -'projects/project-id/zones/zone/disks/disk-name'.`, - }, - "delete_rule": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_per_instance_config_validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION", ""}, false), - Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. -The available options are 'NEVER' and 'ON_PERMANENT_INSTANCE_DELETION'. -'NEVER' - detach the disk when the VM is deleted, but do not delete the disk. -'ON_PERMANENT_INSTANCE_DELETION' will delete the stateful disk when the VM is permanently -deleted from the instance group. Default value: "NEVER" Possible values: ["NEVER", "ON_PERMANENT_INSTANCE_DELETION"]`, - Default: "NEVER", - }, - "mode": { - Type: resource_compute_region_per_instance_config_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_per_instance_config_validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE", ""}, false), - Description: `The mode of the disk. Default value: "READ_WRITE" Possible values: ["READ_ONLY", "READ_WRITE"]`, - Default: "READ_WRITE", - }, - }, - } -} - -func resourceComputeRegionPerInstanceConfigCreate(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_per_instance_config_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_per_instance_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputeRegionPerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(resource_compute_region_per_instance_config_reflect.ValueOf(preservedStateProp)) && (ok || !resource_compute_region_per_instance_config_reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputeRegionPerInstanceConfigEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/createInstances") - if err != nil { - return err - } - - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Creating new RegionPerInstanceConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error creating RegionPerInstanceConfig: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}") - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionPerInstanceConfig", userAgent, - d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_per_instance_config_fmt.Errorf("Error waiting to create RegionPerInstanceConfig: %s", err) - } - - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Finished creating RegionPerInstanceConfig %q: %#v", d.Id(), res) - - return resourceComputeRegionPerInstanceConfigRead(d, meta) -} - -func resourceComputeRegionPerInstanceConfigRead(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_per_instance_config_fmt.Sprintf("ComputeRegionPerInstanceConfig %q", d.Id())) - } - - res, err = flattenNestedComputeRegionPerInstanceConfig(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Removing ComputeRegionPerInstanceConfig because it couldn't be matched.") - d.SetId("") - return nil - } - - if _, ok := d.GetOkExists("minimal_action"); !ok { - if err := d.Set("minimal_action", "NONE"); err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error setting minimal_action: %s", err) - } - } - if _, ok := d.GetOkExists("most_disruptive_allowed_action"); !ok { - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - } - if _, ok := d.GetOkExists("remove_instance_state_on_destroy"); !ok { - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - - if err := d.Set("name", flattenNestedComputeRegionPerInstanceConfigName(res["name"], d, config)); err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - if err := d.Set("preserved_state", flattenNestedComputeRegionPerInstanceConfigPreservedState(res["preservedState"], d, config)); err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error reading RegionPerInstanceConfig: %s", err) - } - - return nil -} - -func resourceComputeRegionPerInstanceConfigUpdate(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error fetching project for RegionPerInstanceConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_per_instance_config_reflect.ValueOf(v)) && (ok || !resource_compute_region_per_instance_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - preservedStateProp, err := expandNestedComputeRegionPerInstanceConfigPreservedState(d.Get("preserved_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("preserved_state"); !isEmptyValue(resource_compute_region_per_instance_config_reflect.ValueOf(v)) && (ok || !resource_compute_region_per_instance_config_reflect.DeepEqual(v, preservedStateProp)) { - obj["preservedState"] = preservedStateProp - } - - obj, err = resourceComputeRegionPerInstanceConfigUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/updatePerInstanceConfigs") - if err != nil { - return err - } - - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Updating RegionPerInstanceConfig %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error updating RegionPerInstanceConfig %q: %s", d.Id(), err) - } else { - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Finished updating RegionPerInstanceConfig %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionPerInstanceConfig", userAgent, - d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - instanceName, err := findInstanceName(d, config) - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - minAction := d.Get("minimal_action") - if minAction == "" { - minAction = "NONE" - } - obj["minimalAction"] = minAction - - mostDisruptiveAction := d.Get("most_disruptive_action_allowed") - if mostDisruptiveAction != "" { - mostDisruptiveAction = "REPLACE" - } - obj["mostDisruptiveActionAllowed"] = mostDisruptiveAction - - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } - - err = computeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return err - } - return resourceComputeRegionPerInstanceConfigRead(d, meta) -} - -func resourceComputeRegionPerInstanceConfigDelete(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deletePerInstanceConfigs") - if err != nil { - return err - } - - var obj map[string]interface{} - obj = map[string]interface{}{ - "names": [1]string{d.Get("name").(string)}, - } - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Deleting RegionPerInstanceConfig %q", d.Id()) - - res, err := sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionPerInstanceConfig") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionPerInstanceConfig", userAgent, - d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutDelete)) - - if err != nil { - return err - } - - if d.Get("remove_instance_state_on_destroy").(bool) { - - instanceName, err := findInstanceName(d, config) - if err != nil { - return err - } - - obj = make(map[string]interface{}) - obj["instances"] = []string{instanceName} - - url, err = replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/applyUpdatesToInstances") - if err != nil { - return err - } - - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Applying updates to PerInstanceConfig %q: %#v", d.Id(), obj) - res, err = sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error updating PerInstanceConfig %q: %s", d.Id(), err) - } - - err = computeOperationWaitTime( - config, res, project, "Applying update to PerInstanceConfig", userAgent, - d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error deleting PerInstanceConfig %q: %s", d.Id(), err) - } - - err = PollingWaitTime(resourceComputeRegionPerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting RegionPerInstanceConfig", d.Timeout(resource_compute_region_per_instance_config_schema.TimeoutDelete), 1) - if err != nil { - return resource_compute_region_per_instance_config_fmt.Errorf("Error waiting for delete on RegionPerInstanceConfig %q: %s", d.Id(), err) - } - } - - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Finished deleting RegionPerInstanceConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionPerInstanceConfigImport(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}) ([]*resource_compute_region_per_instance_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}") - if err != nil { - return nil, resource_compute_region_per_instance_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("minimal_action", "NONE"); err != nil { - return nil, resource_compute_region_per_instance_config_fmt.Errorf("Error setting minimal_action: %s", err) - } - if err := d.Set("most_disruptive_allowed_action", "REPLACE"); err != nil { - return nil, resource_compute_region_per_instance_config_fmt.Errorf("Error setting most_disruptive_allowed_action: %s", err) - } - if err := d.Set("remove_instance_state_on_destroy", false); err != nil { - return nil, resource_compute_region_per_instance_config_fmt.Errorf("Error setting remove_instance_state_on_destroy: %s", err) - } - - return []*resource_compute_region_per_instance_config_schema.ResourceData{d}, nil -} - -func flattenNestedComputeRegionPerInstanceConfigName(v interface{}, d *resource_compute_region_per_instance_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRegionPerInstanceConfigPreservedState(v interface{}, d *resource_compute_region_per_instance_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["metadata"] = - flattenNestedComputeRegionPerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - transformed["disk"] = - flattenNestedComputeRegionPerInstanceConfigPreservedStateDisk(original["disks"], d, config) - return []interface{}{transformed} -} - -func flattenNestedComputeRegionPerInstanceConfigPreservedStateMetadata(v interface{}, d *resource_compute_region_per_instance_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRegionPerInstanceConfigPreservedStateDisk(v interface{}, d *resource_compute_region_per_instance_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - disks := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(disks)) - for devName, deleteRuleRaw := range disks { - diskObj := deleteRuleRaw.(map[string]interface{}) - source, err := getRelativePath(diskObj["source"].(string)) - if err != nil { - source = diskObj["source"].(string) - } - transformed = append(transformed, map[string]interface{}{ - "device_name": devName, - "delete_rule": diskObj["autoDelete"], - "source": source, - "mode": diskObj["mode"], - }) - } - return transformed -} - -func expandNestedComputeRegionPerInstanceConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRegionPerInstanceConfigPreservedState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMetadata, err := expandNestedComputeRegionPerInstanceConfigPreservedStateMetadata(original["metadata"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_per_instance_config_reflect.ValueOf(transformedMetadata); val.IsValid() && !isEmptyValue(val) { - transformed["metadata"] = transformedMetadata - } - - transformedDisk, err := expandNestedComputeRegionPerInstanceConfigPreservedStateDisk(original["disk"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_per_instance_config_reflect.ValueOf(transformedDisk); val.IsValid() && !isEmptyValue(val) { - transformed["disks"] = transformedDisk - } - - return transformed, nil -} - -func expandNestedComputeRegionPerInstanceConfigPreservedStateMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNestedComputeRegionPerInstanceConfigPreservedStateDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - l := v.(*resource_compute_region_per_instance_config_schema.Set).List() - req := make(map[string]interface{}) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - deviceName := original["device_name"].(string) - diskObj := make(map[string]interface{}) - deleteRule := original["delete_rule"].(string) - if deleteRule != "" { - diskObj["autoDelete"] = deleteRule - } - source := original["source"] - if source != "" { - diskObj["source"] = source - } - mode := original["mode"] - if source != "" { - diskObj["mode"] = mode - } - req[deviceName] = diskObj - } - return req, nil -} - -func resourceComputeRegionPerInstanceConfigEncoder(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - wrappedReq := map[string]interface{}{ - "instances": []interface{}{obj}, - } - return wrappedReq, nil -} - -func resourceComputeRegionPerInstanceConfigUpdateEncoder(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - wrappedReq := map[string]interface{}{ - "perInstanceConfigs": []interface{}{obj}, - } - return wrappedReq, nil -} - -func flattenNestedComputeRegionPerInstanceConfig(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["items"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_region_per_instance_config_fmt.Errorf("expected list or map for value items. Actual value: %v", v) - } - - _, item, err := resourceComputeRegionPerInstanceConfigFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeRegionPerInstanceConfigFindNestedObjectInList(d *resource_compute_region_per_instance_config_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRegionPerInstanceConfigName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeRegionPerInstanceConfigName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedComputeRegionPerInstanceConfigName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_region_per_instance_config_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_region_per_instance_config_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_region_per_instance_config_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_region_per_instance_config_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeRegionSslCertificate() *resource_compute_region_ssl_certificate_schema.Resource { - return &resource_compute_region_ssl_certificate_schema.Resource{ - Create: resourceComputeRegionSslCertificateCreate, - Read: resourceComputeRegionSslCertificateRead, - Delete: resourceComputeRegionSslCertificateDelete, - - Importer: &resource_compute_region_ssl_certificate_schema.ResourceImporter{ - State: resourceComputeRegionSslCertificateImport, - }, - - Timeouts: &resource_compute_region_ssl_certificate_schema.ResourceTimeout{ - Create: resource_compute_region_ssl_certificate_schema.DefaultTimeout(4 * resource_compute_region_ssl_certificate_time.Minute), - Delete: resource_compute_region_ssl_certificate_schema.DefaultTimeout(4 * resource_compute_region_ssl_certificate_time.Minute), - }, - - Schema: map[string]*resource_compute_region_ssl_certificate_schema.Schema{ - "certificate": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The certificate in PEM format. -The certificate chain must be no greater than 5 certs long. -The chain must include at least one intermediate cert.`, - Sensitive: true, - }, - "private_key": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: sha256DiffSuppress, - Description: `The write-only private key in PEM format.`, - Sensitive: true, - }, - "description": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "name": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash. - - -These are in the same namespace as the managed SSL certificates.`, - }, - "region": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created regional ssl certificate should reside. -If it is not provided, the provider region is used.`, - }, - "certificate_id": { - Type: resource_compute_region_ssl_certificate_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "creation_timestamp": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "name_prefix": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - - value := v.(string) - if len(value) > 37 { - errors = append(errors, resource_compute_region_ssl_certificate_fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) - } - return - }, - }, - "project": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_ssl_certificate_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionSslCertificateCreate(d *resource_compute_region_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - certificateProp, err := expandComputeRegionSslCertificateCertificate(d.Get("certificate"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate"); !isEmptyValue(resource_compute_region_ssl_certificate_reflect.ValueOf(certificateProp)) && (ok || !resource_compute_region_ssl_certificate_reflect.DeepEqual(v, certificateProp)) { - obj["certificate"] = certificateProp - } - descriptionProp, err := expandComputeRegionSslCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_ssl_certificate_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_ssl_certificate_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRegionSslCertificateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_ssl_certificate_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_ssl_certificate_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - privateKeyProp, err := expandComputeRegionSslCertificatePrivateKey(d.Get("private_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_key"); !isEmptyValue(resource_compute_region_ssl_certificate_reflect.ValueOf(privateKeyProp)) && (ok || !resource_compute_region_ssl_certificate_reflect.DeepEqual(v, privateKeyProp)) { - obj["privateKey"] = privateKeyProp - } - regionProp, err := expandComputeRegionSslCertificateRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_ssl_certificate_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_ssl_certificate_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates") - if err != nil { - return err - } - - resource_compute_region_ssl_certificate_log.Printf("[DEBUG] Creating new RegionSslCertificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_ssl_certificate_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error creating RegionSslCertificate: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionSslCertificate", userAgent, - d.Timeout(resource_compute_region_ssl_certificate_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_ssl_certificate_fmt.Errorf("Error waiting to create RegionSslCertificate: %s", err) - } - - resource_compute_region_ssl_certificate_log.Printf("[DEBUG] Finished creating RegionSslCertificate %q: %#v", d.Id(), res) - - return resourceComputeRegionSslCertificateRead(d, meta) -} - -func resourceComputeRegionSslCertificateRead(d *resource_compute_region_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_ssl_certificate_fmt.Sprintf("ComputeRegionSslCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - - if err := d.Set("certificate", flattenComputeRegionSslCertificateCertificate(res["certificate"], d, config)); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeRegionSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("description", flattenComputeRegionSslCertificateDescription(res["description"], d, config)); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("certificate_id", flattenComputeRegionSslCertificateCertificateId(res["id"], d, config)); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("name", flattenComputeRegionSslCertificateName(res["name"], d, config)); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("region", flattenComputeRegionSslCertificateRegion(res["region"], d, config)); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error reading RegionSslCertificate: %s", err) - } - - return nil -} - -func resourceComputeRegionSslCertificateDelete(d *resource_compute_region_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_ssl_certificate_fmt.Errorf("Error fetching project for RegionSslCertificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_ssl_certificate_log.Printf("[DEBUG] Deleting RegionSslCertificate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_ssl_certificate_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionSslCertificate") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionSslCertificate", userAgent, - d.Timeout(resource_compute_region_ssl_certificate_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_ssl_certificate_log.Printf("[DEBUG] Finished deleting RegionSslCertificate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionSslCertificateImport(d *resource_compute_region_ssl_certificate_schema.ResourceData, meta interface{}) ([]*resource_compute_region_ssl_certificate_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/sslCertificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/sslCertificates/{{name}}") - if err != nil { - return nil, resource_compute_region_ssl_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_ssl_certificate_schema.ResourceData{d}, nil -} - -func flattenComputeRegionSslCertificateCertificate(v interface{}, d *resource_compute_region_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateCreationTimestamp(v interface{}, d *resource_compute_region_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateDescription(v interface{}, d *resource_compute_region_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateCertificateId(v interface{}, d *resource_compute_region_ssl_certificate_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_ssl_certificate_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionSslCertificateName(v interface{}, d *resource_compute_region_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionSslCertificateRegion(v interface{}, d *resource_compute_region_ssl_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionSslCertificateCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionSslCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionSslCertificateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - var certName string - if v, ok := d.GetOk("name"); ok { - certName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - certName = resource_compute_region_ssl_certificate_resource.PrefixedUniqueId(v.(string)) - } else { - certName = resource_compute_region_ssl_certificate_resource.UniqueId() - } - - if err := d.Set("name", certName); err != nil { - return nil, resource_compute_region_ssl_certificate_fmt.Errorf("Error setting name: %s", err) - } - - return certName, nil -} - -func expandComputeRegionSslCertificatePrivateKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionSslCertificateRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_ssl_certificate_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRegionTargetHttpProxy() *resource_compute_region_target_http_proxy_schema.Resource { - return &resource_compute_region_target_http_proxy_schema.Resource{ - Create: resourceComputeRegionTargetHttpProxyCreate, - Read: resourceComputeRegionTargetHttpProxyRead, - Update: resourceComputeRegionTargetHttpProxyUpdate, - Delete: resourceComputeRegionTargetHttpProxyDelete, - - Importer: &resource_compute_region_target_http_proxy_schema.ResourceImporter{ - State: resourceComputeRegionTargetHttpProxyImport, - }, - - Timeouts: &resource_compute_region_target_http_proxy_schema.ResourceTimeout{ - Create: resource_compute_region_target_http_proxy_schema.DefaultTimeout(4 * resource_compute_region_target_http_proxy_time.Minute), - Update: resource_compute_region_target_http_proxy_schema.DefaultTimeout(4 * resource_compute_region_target_http_proxy_time.Minute), - Delete: resource_compute_region_target_http_proxy_schema.DefaultTimeout(4 * resource_compute_region_target_http_proxy_time.Minute), - }, - - Schema: map[string]*resource_compute_region_target_http_proxy_schema.Schema{ - "name": { - Type: resource_compute_region_target_http_proxy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "url_map": { - Type: resource_compute_region_target_http_proxy_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the RegionUrlMap resource that defines the mapping from URL -to the BackendService.`, - }, - "description": { - Type: resource_compute_region_target_http_proxy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: resource_compute_region_target_http_proxy_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created target https proxy should reside. -If it is not provided, the provider region is used.`, - }, - "creation_timestamp": { - Type: resource_compute_region_target_http_proxy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: resource_compute_region_target_http_proxy_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_region_target_http_proxy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_target_http_proxy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionTargetHttpProxyCreate(d *resource_compute_region_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeRegionTargetHttpProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_target_http_proxy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_target_http_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRegionTargetHttpProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_target_http_proxy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_target_http_proxy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - urlMapProp, err := expandComputeRegionTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_region_target_http_proxy_reflect.ValueOf(urlMapProp)) && (ok || !resource_compute_region_target_http_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - regionProp, err := expandComputeRegionTargetHttpProxyRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_target_http_proxy_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_target_http_proxy_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies") - if err != nil { - return err - } - - resource_compute_region_target_http_proxy_log.Printf("[DEBUG] Creating new RegionTargetHttpProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_target_http_proxy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error creating RegionTargetHttpProxy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionTargetHttpProxy", userAgent, - d.Timeout(resource_compute_region_target_http_proxy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_target_http_proxy_fmt.Errorf("Error waiting to create RegionTargetHttpProxy: %s", err) - } - - resource_compute_region_target_http_proxy_log.Printf("[DEBUG] Finished creating RegionTargetHttpProxy %q: %#v", d.Id(), res) - - return resourceComputeRegionTargetHttpProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpProxyRead(d *resource_compute_region_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_target_http_proxy_fmt.Sprintf("ComputeRegionTargetHttpProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRegionTargetHttpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("description", flattenComputeRegionTargetHttpProxyDescription(res["description"], d, config)); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeRegionTargetHttpProxyProxyId(res["id"], d, config)); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("name", flattenComputeRegionTargetHttpProxyName(res["name"], d, config)); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeRegionTargetHttpProxyUrlMap(res["urlMap"], d, config)); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("region", flattenComputeRegionTargetHttpProxyRegion(res["region"], d, config)); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error reading RegionTargetHttpProxy: %s", err) - } - - return nil -} - -func resourceComputeRegionTargetHttpProxyUpdate(d *resource_compute_region_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeRegionTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_region_target_http_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_region_target_http_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_target_http_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error updating RegionTargetHttpProxy %q: %s", d.Id(), err) - } else { - resource_compute_region_target_http_proxy_log.Printf("[DEBUG] Finished updating RegionTargetHttpProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionTargetHttpProxy", userAgent, - d.Timeout(resource_compute_region_target_http_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeRegionTargetHttpProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpProxyDelete(d *resource_compute_region_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_http_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_target_http_proxy_log.Printf("[DEBUG] Deleting RegionTargetHttpProxy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_target_http_proxy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionTargetHttpProxy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionTargetHttpProxy", userAgent, - d.Timeout(resource_compute_region_target_http_proxy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_target_http_proxy_log.Printf("[DEBUG] Finished deleting RegionTargetHttpProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionTargetHttpProxyImport(d *resource_compute_region_target_http_proxy_schema.ResourceData, meta interface{}) ([]*resource_compute_region_target_http_proxy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetHttpProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}") - if err != nil { - return nil, resource_compute_region_target_http_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_target_http_proxy_schema.ResourceData{d}, nil -} - -func flattenComputeRegionTargetHttpProxyCreationTimestamp(v interface{}, d *resource_compute_region_target_http_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpProxyDescription(v interface{}, d *resource_compute_region_target_http_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpProxyProxyId(v interface{}, d *resource_compute_region_target_http_proxy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_target_http_proxy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionTargetHttpProxyName(v interface{}, d *resource_compute_region_target_http_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpProxyUrlMap(v interface{}, d *resource_compute_region_target_http_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionTargetHttpProxyRegion(v interface{}, d *resource_compute_region_target_http_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionTargetHttpProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("urlMaps", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_target_http_proxy_fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionTargetHttpProxyRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_target_http_proxy_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRegionTargetHttpsProxy() *resource_compute_region_target_https_proxy_schema.Resource { - return &resource_compute_region_target_https_proxy_schema.Resource{ - Create: resourceComputeRegionTargetHttpsProxyCreate, - Read: resourceComputeRegionTargetHttpsProxyRead, - Update: resourceComputeRegionTargetHttpsProxyUpdate, - Delete: resourceComputeRegionTargetHttpsProxyDelete, - - Importer: &resource_compute_region_target_https_proxy_schema.ResourceImporter{ - State: resourceComputeRegionTargetHttpsProxyImport, - }, - - Timeouts: &resource_compute_region_target_https_proxy_schema.ResourceTimeout{ - Create: resource_compute_region_target_https_proxy_schema.DefaultTimeout(4 * resource_compute_region_target_https_proxy_time.Minute), - Update: resource_compute_region_target_https_proxy_schema.DefaultTimeout(4 * resource_compute_region_target_https_proxy_time.Minute), - Delete: resource_compute_region_target_https_proxy_schema.DefaultTimeout(4 * resource_compute_region_target_https_proxy_time.Minute), - }, - - Schema: map[string]*resource_compute_region_target_https_proxy_schema.Schema{ - "name": { - Type: resource_compute_region_target_https_proxy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "ssl_certificates": { - Type: resource_compute_region_target_https_proxy_schema.TypeList, - Required: true, - Description: `A list of RegionSslCertificate resources that are used to authenticate -connections between users and the load balancer. Currently, exactly -one SSL certificate must be specified.`, - Elem: &resource_compute_region_target_https_proxy_schema.Schema{ - Type: resource_compute_region_target_https_proxy_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "url_map": { - Type: resource_compute_region_target_https_proxy_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the RegionUrlMap resource that defines the mapping from URL -to the RegionBackendService.`, - }, - "description": { - Type: resource_compute_region_target_https_proxy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: resource_compute_region_target_https_proxy_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the created target https proxy should reside. -If it is not provided, the provider region is used.`, - }, - "creation_timestamp": { - Type: resource_compute_region_target_https_proxy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: resource_compute_region_target_https_proxy_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_region_target_https_proxy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_target_https_proxy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRegionTargetHttpsProxyCreate(d *resource_compute_region_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeRegionTargetHttpsProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_target_https_proxy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_target_https_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRegionTargetHttpsProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_target_https_proxy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_target_https_proxy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(resource_compute_region_target_https_proxy_reflect.ValueOf(sslCertificatesProp)) && (ok || !resource_compute_region_target_https_proxy_reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - urlMapProp, err := expandComputeRegionTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_region_target_https_proxy_reflect.ValueOf(urlMapProp)) && (ok || !resource_compute_region_target_https_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - regionProp, err := expandComputeRegionTargetHttpsProxyRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_target_https_proxy_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_target_https_proxy_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies") - if err != nil { - return err - } - - resource_compute_region_target_https_proxy_log.Printf("[DEBUG] Creating new RegionTargetHttpsProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error creating RegionTargetHttpsProxy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionTargetHttpsProxy", userAgent, - d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_target_https_proxy_fmt.Errorf("Error waiting to create RegionTargetHttpsProxy: %s", err) - } - - resource_compute_region_target_https_proxy_log.Printf("[DEBUG] Finished creating RegionTargetHttpsProxy %q: %#v", d.Id(), res) - - return resourceComputeRegionTargetHttpsProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpsProxyRead(d *resource_compute_region_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_target_https_proxy_fmt.Sprintf("ComputeRegionTargetHttpsProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRegionTargetHttpsProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("description", flattenComputeRegionTargetHttpsProxyDescription(res["description"], d, config)); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeRegionTargetHttpsProxyProxyId(res["id"], d, config)); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("name", flattenComputeRegionTargetHttpsProxyName(res["name"], d, config)); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("ssl_certificates", flattenComputeRegionTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeRegionTargetHttpsProxyUrlMap(res["urlMap"], d, config)); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("region", flattenComputeRegionTargetHttpsProxyRegion(res["region"], d, config)); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error reading RegionTargetHttpsProxy: %s", err) - } - - return nil -} - -func resourceComputeRegionTargetHttpsProxyUpdate(d *resource_compute_region_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("ssl_certificates") { - obj := make(map[string]interface{}) - - sslCertificatesProp, err := expandComputeRegionTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(resource_compute_region_target_https_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_region_target_https_proxy_reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslCertificates") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error updating RegionTargetHttpsProxy %q: %s", d.Id(), err) - } else { - resource_compute_region_target_https_proxy_log.Printf("[DEBUG] Finished updating RegionTargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionTargetHttpsProxy", userAgent, - d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeRegionTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_region_target_https_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_region_target_https_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error updating RegionTargetHttpsProxy %q: %s", d.Id(), err) - } else { - resource_compute_region_target_https_proxy_log.Printf("[DEBUG] Finished updating RegionTargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionTargetHttpsProxy", userAgent, - d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeRegionTargetHttpsProxyRead(d, meta) -} - -func resourceComputeRegionTargetHttpsProxyDelete(d *resource_compute_region_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_target_https_proxy_fmt.Errorf("Error fetching project for RegionTargetHttpsProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_target_https_proxy_log.Printf("[DEBUG] Deleting RegionTargetHttpsProxy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionTargetHttpsProxy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionTargetHttpsProxy", userAgent, - d.Timeout(resource_compute_region_target_https_proxy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_target_https_proxy_log.Printf("[DEBUG] Finished deleting RegionTargetHttpsProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionTargetHttpsProxyImport(d *resource_compute_region_target_https_proxy_schema.ResourceData, meta interface{}) ([]*resource_compute_region_target_https_proxy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetHttpsProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}") - if err != nil { - return nil, resource_compute_region_target_https_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_target_https_proxy_schema.ResourceData{d}, nil -} - -func flattenComputeRegionTargetHttpsProxyCreationTimestamp(v interface{}, d *resource_compute_region_target_https_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpsProxyDescription(v interface{}, d *resource_compute_region_target_https_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpsProxyProxyId(v interface{}, d *resource_compute_region_target_https_proxy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_target_https_proxy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionTargetHttpsProxyName(v interface{}, d *resource_compute_region_target_https_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionTargetHttpsProxySslCertificates(v interface{}, d *resource_compute_region_target_https_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeRegionTargetHttpsProxyUrlMap(v interface{}, d *resource_compute_region_target_https_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionTargetHttpsProxyRegion(v interface{}, d *resource_compute_region_target_https_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionTargetHttpsProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpsProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionTargetHttpsProxySslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_region_target_https_proxy_fmt.Errorf("Invalid value for ssl_certificates: nil") - } - f, err := parseRegionalFieldValue("sslCertificates", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_target_https_proxy_fmt.Errorf("Invalid value for ssl_certificates: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeRegionTargetHttpsProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("urlMaps", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_target_https_proxy_fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionTargetHttpsProxyRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_target_https_proxy_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRegionUrlMap() *resource_compute_region_url_map_schema.Resource { - return &resource_compute_region_url_map_schema.Resource{ - Create: resourceComputeRegionUrlMapCreate, - Read: resourceComputeRegionUrlMapRead, - Update: resourceComputeRegionUrlMapUpdate, - Delete: resourceComputeRegionUrlMapDelete, - - Importer: &resource_compute_region_url_map_schema.ResourceImporter{ - State: resourceComputeRegionUrlMapImport, - }, - - Timeouts: &resource_compute_region_url_map_schema.ResourceTimeout{ - Create: resource_compute_region_url_map_schema.DefaultTimeout(4 * resource_compute_region_url_map_time.Minute), - Update: resource_compute_region_url_map_schema.DefaultTimeout(4 * resource_compute_region_url_map_time.Minute), - Delete: resource_compute_region_url_map_schema.DefaultTimeout(4 * resource_compute_region_url_map_time.Minute), - }, - - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "default_service": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full or partial URL of the defaultService resource to which traffic is directed if -none of the hostRules match. If defaultRouteAction is additionally specified, advanced -routing actions like URL Rewrites, etc. take effect prior to sending the request to the -backend. However, if defaultService is specified, defaultRouteAction cannot contain any -weightedBackendServices. Conversely, if routeAction specifies any -weightedBackendServices, service must not be specified. Only one of defaultService, -defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set.`, - ExactlyOneOf: []string{"default_service", "default_url_redirect"}, - }, - "default_url_redirect": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `When none of the specified hostRules match, the request is redirected to a URL specified -by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or -defaultRouteAction must not be set.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "strip_query": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If set to true, any accompanying query portion of the original URL is removed prior -to redirecting the request. If set to false, the query portion of the original URL is -retained. - This field is required to ensure an empty block is not set. The normal default value is false.`, - }, - "host_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one that was -supplied in the request. The value must be between 1 and 255 characters.`, - }, - "https_redirect": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. If set to -false, the URL scheme of the redirected request will remain the same as that of the -request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this -true for TargetHttpsProxy is not permitted. The default is set to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one that was -supplied in the request. pathRedirect cannot be supplied together with -prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the -original request will be used for the redirect. The value must be between 1 and 1024 -characters.`, - }, - "prefix_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, -retaining the remaining portion of the URL before redirecting the request. -prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or -neither. If neither is supplied, the path of the original request will be used for -the redirect. The value must be between 1 and 1024 characters.`, - }, - "redirect_response_code": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method -will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, -the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - }, - }, - ExactlyOneOf: []string{"default_service", "default_url_redirect"}, - }, - "description": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when -you create the resource.`, - }, - "host_rule": { - Type: resource_compute_region_url_map_schema.TypeSet, - Optional: true, - Description: `The list of HostRules to use against the URL.`, - Elem: computeRegionUrlMapHostRuleSchema(), - }, - "path_matcher": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The list of named PathMatchers to use against the URL.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name to which this PathMatcher is referred by the HostRule.`, - }, - "default_service": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to a RegionBackendService resource. This will be used if -none of the pathRules defined by this PathMatcher is matched by -the URL's path portion.`, - }, - "default_url_redirect": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `When none of the specified hostRules match, the request is redirected to a URL specified -by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or -defaultRouteAction must not be set.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "strip_query": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If set to true, any accompanying query portion of the original URL is removed prior -to redirecting the request. If set to false, the query portion of the original URL is -retained. - This field is required to ensure an empty block is not set. The normal default value is false.`, - }, - "host_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one that was -supplied in the request. The value must be between 1 and 255 characters.`, - }, - "https_redirect": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. If set to -false, the URL scheme of the redirected request will remain the same as that of the -request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this -true for TargetHttpsProxy is not permitted. The default is set to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one that was -supplied in the request. pathRedirect cannot be supplied together with -prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the -original request will be used for the redirect. The value must be between 1 and 1024 -characters.`, - }, - "prefix_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, -retaining the remaining portion of the URL before redirecting the request. -prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or -neither. If neither is supplied, the path of the original request will be used for -the redirect. The value must be between 1 and 1024 characters.`, - }, - "redirect_response_code": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method -will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, -the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - }, - }, - }, - "description": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "path_rule": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The list of path rules. Use this list instead of routeRules when routing based -on simple path matching is all that's required. The order by which path rules -are specified does not matter. Matches are always done on the longest-path-first -basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* -irrespective of the order in which those paths appear in this list. Within a -given pathMatcher, only one of pathRules or routeRules must be set.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "paths": { - Type: resource_compute_region_url_map_schema.TypeSet, - Required: true, - Description: `The list of path patterns to match. Each must start with / and the only place a -\* is allowed is at the end following a /. The string fed to the path matcher -does not include any text after the first ? or #, and those chars are not -allowed here.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - Set: resource_compute_region_url_map_schema.HashString, - }, - "route_action": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `In response to a matching path, the load balancer performs advanced routing -actions like URL rewrites, header transformations, etc. prior to forwarding the -request to the selected backend. If routeAction specifies any -weightedBackendServices, service must not be set. Conversely if service is set, -routeAction cannot contain any weightedBackendServices. Only one of routeAction -or urlRedirect must be set.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "cors_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for allowing client side cross-origin requests. Please see W3C -Recommendation for Cross Origin Resource Sharing`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "disabled": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If true, specifies the CORS policy is disabled.`, - }, - "allow_credentials": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `In response to a preflight request, setting this to true indicates that the -actual request can include user credentials. This translates to the Access- -Control-Allow-Credentials header. Defaults to false.`, - Default: false, - }, - "allow_headers": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers header.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "allow_methods": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Methods header.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "allow_origin_regexes": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the regular expression patterns that match allowed origins. For -regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript -An origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "allow_origins": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the list of origins that will be allowed to do CORS requests. An -origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "expose_headers": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Expose-Headers header.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "max_age": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies how long the results of a preflight request can be cached. This -translates to the content for the Access-Control-Max-Age header.`, - }, - }, - }, - }, - "fault_injection_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for fault injection introduced into traffic to test the -resiliency of clients to backend service failure. As part of fault injection, -when clients send requests to a backend service, delays can be introduced by -Loadbalancer on a percentage of requests before sending those request to the -backend service. Similarly requests from clients can be aborted by the -Loadbalancer for a percentage of requests. timeout and retry_policy will be -ignored by clients that are configured with a fault_injection_policy.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "abort": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are aborted as part of fault -injection.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "http_status": { - Type: resource_compute_region_url_map_schema.TypeInt, - Required: true, - Description: `The HTTP status code used to abort the request. The value must be between 200 -and 599 inclusive.`, - }, - "percentage": { - Type: resource_compute_region_url_map_schema.TypeFloat, - Required: true, - Description: `The percentage of traffic (connections/operations/requests) which will be -aborted as part of fault injection. The value must be between 0.0 and 100.0 -inclusive.`, - }, - }, - }, - }, - "delay": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are delayed as part of fault -injection, before being sent to a backend service.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "fixed_delay": { - Type: resource_compute_region_url_map_schema.TypeList, - Required: true, - Description: `Specifies the value of the fixed delay interval.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "percentage": { - Type: resource_compute_region_url_map_schema.TypeFloat, - Required: true, - Description: `The percentage of traffic (connections/operations/requests) on which delay will -be introduced as part of fault injection. The value must be between 0.0 and -100.0 inclusive.`, - }, - }, - }, - }, - }, - }, - }, - "request_mirror_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the policy on how requests intended for the route's backends are -shadowed to a separate mirrored backend service. Loadbalancer does not wait for -responses from the shadow service. Prior to sending traffic to the shadow -service, the host / authority header is suffixed with -shadow.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The RegionBackendService resource being mirrored to.`, - }, - }, - }, - }, - "retry_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the retry policy associated with this route.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "num_retries": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies the allowed number retries. This number must be > 0.`, - }, - "per_try_timeout": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a non-zero timeout per retry attempt.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "retry_conditions": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies one or more conditions when this retry rule applies. Valid values are: - -- 5xx: Loadbalancer will attempt a retry if the backend service responds with -any 5xx response code, or if the backend service does not respond at all, -example: disconnects, reset, read timeout, connection failure, and refused -streams. -- gateway-error: Similar to 5xx, but only applies to response codes -502, 503 or 504. -- connect-failure: Loadbalancer will retry on failures -connecting to backend services, for example due to connection timeouts. -- retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. -Currently the only retriable error supported is 409. -- refused-stream: Loadbalancer will retry if the backend service resets the stream with a -REFUSED_STREAM error code. This reset type indicates that it is safe to retry. -- cancelled: Loadbalancer will retry if the gRPC status code in the response -header is set to cancelled -- deadline-exceeded: Loadbalancer will retry if the -gRPC status code in the response header is set to deadline-exceeded -- resource-exhausted: Loadbalancer will retry if the gRPC status code in the response -header is set to resource-exhausted -- unavailable: Loadbalancer will retry if -the gRPC status code in the response header is set to unavailable`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "timeout": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the timeout for the selected route. Timeout is computed from the time -the request is has been fully processed (i.e. end-of-stream) up until the -response has been completely processed. Timeout includes all retries. If not -specified, the default value is 15 seconds.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "url_rewrite": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The spec to modify the URL of the request, prior to forwarding the request to -the matched service`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "host_rewrite": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected service, the request's host -header is replaced with contents of hostRewrite. The value must be between 1 and -255 characters.`, - }, - "path_prefix_rewrite": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected backend service, the matching -portion of the request's path is replaced by pathPrefixRewrite. The value must -be between 1 and 1024 characters.`, - }, - }, - }, - }, - "weighted_backend_services": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of weighted backend services to send traffic to when a route match -occurs. The weights determine the fraction of traffic that flows to their -corresponding backend service. If all traffic needs to go to a single backend -service, there must be one weightedBackendService with weight set to a non 0 -number. Once a backendService is identified and before forwarding the request to -the backend service, advanced routing actions like Url rewrites and header -transformations are applied depending on additional settings specified in this -HttpRouteAction.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The default RegionBackendService resource. Before -forwarding the request to backendService, the loadbalancer applies any relevant -headerActions specified as part of this backendServiceWeight.`, - }, - "weight": { - Type: resource_compute_region_url_map_schema.TypeInt, - Required: true, - Description: `Specifies the fraction of traffic sent to backendService, computed as weight / -(sum of all weightedBackendService weights in routeAction) . The selection of a -backend service is determined only for new traffic. Once a user's request has -been directed to a backendService, subsequent requests will be sent to the same -backendService as determined by the BackendService's session affinity policy. -The value must be between 0 and 1000`, - }, - "header_action": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. headerAction specified here take effect before -headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "service": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region backend service resource to which traffic is -directed if this rule is matched. If routeAction is additionally specified, -advanced routing actions like URL Rewrites, etc. take effect prior to sending -the request to the backend. However, if service is specified, routeAction cannot -contain any weightedBackendService s. Conversely, if routeAction specifies any -weightedBackendServices, service must not be specified. Only one of urlRedirect, -service or routeAction.weightedBackendService must be set.`, - }, - "url_redirect": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `When a path pattern is matched, the request is redirected to a URL specified -by urlRedirect. If urlRedirect is specified, service or routeAction must not -be set.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "strip_query": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If set to true, any accompanying query portion of the original URL is removed -prior to redirecting the request. If set to false, the query portion of the -original URL is retained. - This field is required to ensure an empty block is not set. The normal default value is false.`, - }, - "host_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one -that was supplied in the request. The value must be between 1 and 255 -characters.`, - }, - "https_redirect": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. -If set to false, the URL scheme of the redirected request will remain the -same as that of the request. This must only be set for UrlMaps used in -TargetHttpProxys. Setting this true for TargetHttpsProxy is not -permitted. The default is set to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one -that was supplied in the request. pathRedirect cannot be supplied -together with prefixRedirect. Supply one alone or neither. If neither is -supplied, the path of the original request will be used for the redirect. -The value must be between 1 and 1024 characters.`, - }, - "prefix_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the -HttpRouteRuleMatch, retaining the remaining portion of the URL before -redirecting the request. prefixRedirect cannot be supplied together with -pathRedirect. Supply one alone or neither. If neither is supplied, the -path of the original request will be used for the redirect. The value -must be between 1 and 1024 characters.`, - }, - "redirect_response_code": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method -will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, -the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - }, - }, - }, - }, - }, - }, - "route_rules": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The list of ordered HTTP route rules. Use this list instead of pathRules when -advanced route matching and routing actions are desired. The order of specifying -routeRules matters: the first rule that matches will cause its specified routing -action to take effect. Within a given pathMatcher, only one of pathRules or -routeRules must be set. routeRules are not supported in UrlMaps intended for -External load balancers.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "priority": { - Type: resource_compute_region_url_map_schema.TypeInt, - Required: true, - Description: `For routeRules within a given pathMatcher, priority determines the order -in which load balancer will interpret routeRules. RouteRules are evaluated -in order of priority, from the lowest to highest number. The priority of -a rule decreases as its number increases (1, 2, 3, N+1). The first rule -that matches the request is applied. - -You cannot configure two or more routeRules with the same priority. -Priority for each rule must be set to a number between 0 and -2147483647 inclusive. - -Priority numbers can have gaps, which enable you to add or remove rules -in the future without affecting the rest of the rules. For example, -1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which -you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the -future without any impact on existing rules.`, - }, - "header_action": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. The headerAction specified here are applied before -the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r -outeAction.weightedBackendService.backendServiceWeightAction[].headerAction`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "match_rules": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The rules for determining a match.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "full_path_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the path of the request must exactly -match the value specified in fullPathMatch after removing any query parameters -and anchor that may be part of the original URL. FullPathMatch must be between 1 -and 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must -be specified.`, - }, - "header_matches": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a list of header match criteria, all of which must match corresponding -headers in the request.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the HTTP header to match. For matching against the HTTP request's -authority, use a headerMatch with the header name ":authority". For matching a -request's method, use the headerName ":method".`, - }, - "exact_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The value should exactly match contents of exactMatch. Only one of exactMatch, -prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.`, - }, - "invert_match": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `If set to false, the headerMatch is considered a match if the match criteria -above are met. If set to true, the headerMatch is considered a match if the -match criteria above are NOT met. Defaults to false.`, - Default: false, - }, - "prefix_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header must start with the contents of prefixMatch. Only one of -exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch -must be set.`, - }, - "present_match": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `A header with the contents of headerName must exist. The match takes place -whether or not the request's header has a value or not. Only one of exactMatch, -prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.`, - }, - "range_match": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The header value must be an integer and its value must be in the range specified -in rangeMatch. If the header does not contain an integer, number or is empty, -the match fails. For example for a range [-5, 0] - -* -3 will match -* 0 will not match -* 0.25 will not match -* -3someString will not match. - -Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or -rangeMatch must be set.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "range_end": { - Type: resource_compute_region_url_map_schema.TypeInt, - Required: true, - Description: `The end of the range (exclusive).`, - }, - "range_start": { - Type: resource_compute_region_url_map_schema.TypeInt, - Required: true, - Description: `The start of the range (inclusive).`, - }, - }, - }, - }, - "regex_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header must match the regular expression specified in -regexMatch. For regular expression grammar, please see: -en.cppreference.com/w/cpp/regex/ecmascript For matching against a port -specified in the HTTP request, use a headerMatch with headerName set to PORT and -a regular expression that satisfies the RFC2616 Host header's port specifier. -Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or -rangeMatch must be set.`, - }, - "suffix_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header must end with the contents of suffixMatch. Only one of -exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch -must be set.`, - }, - }, - }, - }, - "ignore_case": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `Specifies that prefixMatch and fullPathMatch matches are case sensitive. -Defaults to false.`, - Default: false, - }, - "metadata_filters": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Opaque filter criteria used by Loadbalancer to restrict routing configuration to -a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS -clients present node metadata. If a match takes place, the relevant routing -configuration is made available to those proxies. For each metadataFilter in -this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the -filterLabels must match the corresponding label provided in the metadata. If its -filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match -with corresponding labels in the provided metadata. metadataFilters specified -here can be overrides those specified in ForwardingRule that refers to this -UrlMap. metadataFilters only applies to Loadbalancers that have their -loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "filter_labels": { - Type: resource_compute_region_url_map_schema.TypeList, - Required: true, - Description: `The list of label value pairs that must match labels in the provided metadata -based on filterMatchCriteria This list must not be empty and can have at the -most 64 entries.`, - MinItems: 1, - MaxItems: 64, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Name of metadata label. The name can have a maximum length of 1024 characters -and must be at least 1 character long.`, - }, - "value": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The value of the label must match the specified value. value can have a maximum -length of 1024 characters.`, - }, - }, - }, - }, - "filter_match_criteria": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_region_url_map_validation.StringInSlice([]string{"MATCH_ALL", "MATCH_ANY"}, false), - Description: `Specifies how individual filterLabel matches within the list of filterLabels -contribute towards the overall metadataFilter match. Supported values are: - -* MATCH_ANY: At least one of the filterLabels must have a matching label in the -provided metadata. -* MATCH_ALL: All filterLabels must have matching labels in -the provided metadata. Possible values: ["MATCH_ALL", "MATCH_ANY"]`, - }, - }, - }, - }, - "prefix_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the request's path must begin with the -specified prefixMatch. prefixMatch must begin with a /. The value must be -between 1 and 1024 characters. Only one of prefixMatch, fullPathMatch or -regexMatch must be specified.`, - }, - "query_parameter_matches": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a list of query parameter match criteria, all of which must match -corresponding query parameters in the request.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the query parameter to match. The query parameter must exist in the -request, in the absence of which the request match fails.`, - }, - "exact_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The queryParameterMatch matches if the value of the parameter exactly matches -the contents of exactMatch. Only one of presentMatch, exactMatch and regexMatch -must be set.`, - }, - "present_match": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `Specifies that the queryParameterMatch matches if the request contains the query -parameter, irrespective of whether the parameter has a value or not. Only one of -presentMatch, exactMatch and regexMatch must be set.`, - }, - "regex_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The queryParameterMatch matches if the value of the parameter matches the -regular expression specified by regexMatch. For the regular expression grammar, -please see en.cppreference.com/w/cpp/regex/ecmascript Only one of presentMatch, -exactMatch and regexMatch must be set.`, - }, - }, - }, - }, - "regex_match": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the path of the request must satisfy the -regular expression specified in regexMatch after removing any query parameters -and anchor supplied with the original URL. For regular expression grammar please -see en.cppreference.com/w/cpp/regex/ecmascript Only one of prefixMatch, -fullPathMatch or regexMatch must be specified.`, - }, - }, - }, - }, - "route_action": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `In response to a matching matchRule, the load balancer performs advanced routing -actions like URL rewrites, header transformations, etc. prior to forwarding the -request to the selected backend. If routeAction specifies any -weightedBackendServices, service must not be set. Conversely if service is set, -routeAction cannot contain any weightedBackendServices. Only one of routeAction -or urlRedirect must be set.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "cors_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for allowing client side cross-origin requests. Please see W3C -Recommendation for Cross Origin Resource Sharing`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "allow_credentials": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `In response to a preflight request, setting this to true indicates that the -actual request can include user credentials. This translates to the Access- -Control-Allow-Credentials header. Defaults to false.`, - Default: false, - }, - "allow_headers": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers header.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "allow_methods": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Methods header.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "allow_origin_regexes": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the regular expression patterns that match allowed origins. For -regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript -An origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "allow_origins": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the list of origins that will be allowed to do CORS requests. An -origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "disabled": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `If true, specifies the CORS policy is disabled. -which indicates that the CORS policy is in effect. Defaults to false.`, - Default: false, - }, - "expose_headers": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Expose-Headers header.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "max_age": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies how long the results of a preflight request can be cached. This -translates to the content for the Access-Control-Max-Age header.`, - }, - }, - }, - }, - "fault_injection_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for fault injection introduced into traffic to test the -resiliency of clients to backend service failure. As part of fault injection, -when clients send requests to a backend service, delays can be introduced by -Loadbalancer on a percentage of requests before sending those request to the -backend service. Similarly requests from clients can be aborted by the -Loadbalancer for a percentage of requests. timeout and retry_policy will be -ignored by clients that are configured with a fault_injection_policy.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "abort": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are aborted as part of fault -injection.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "http_status": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `The HTTP status code used to abort the request. The value must be between 200 -and 599 inclusive.`, - }, - "percentage": { - Type: resource_compute_region_url_map_schema.TypeFloat, - Optional: true, - Description: `The percentage of traffic (connections/operations/requests) which will be -aborted as part of fault injection. The value must be between 0.0 and 100.0 -inclusive.`, - }, - }, - }, - }, - "delay": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are delayed as part of fault -injection, before being sent to a backend service.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "fixed_delay": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the value of the fixed delay interval.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "percentage": { - Type: resource_compute_region_url_map_schema.TypeFloat, - Optional: true, - Description: `The percentage of traffic (connections/operations/requests) on which delay will -be introduced as part of fault injection. The value must be between 0.0 and -100.0 inclusive.`, - }, - }, - }, - }, - }, - }, - }, - "request_mirror_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the policy on how requests intended for the route's backends are -shadowed to a separate mirrored backend service. Loadbalancer does not wait for -responses from the shadow service. Prior to sending traffic to the shadow -service, the host / authority header is suffixed with -shadow.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The RegionBackendService resource being mirrored to.`, - }, - }, - }, - }, - "retry_policy": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the retry policy associated with this route.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "num_retries": { - Type: resource_compute_region_url_map_schema.TypeInt, - Required: true, - Description: `Specifies the allowed number retries. This number must be > 0.`, - }, - "per_try_timeout": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a non-zero timeout per retry attempt.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "retry_conditions": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies one or more conditions when this retry rule applies. Valid values are: - -* 5xx: Loadbalancer will attempt a retry if the backend service responds with - any 5xx response code, or if the backend service does not respond at all, - example: disconnects, reset, read timeout, connection failure, and refused - streams. -* gateway-error: Similar to 5xx, but only applies to response codes - 502, 503 or 504. -* connect-failure: Loadbalancer will retry on failures - connecting to backend services, for example due to connection timeouts. -* retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. - Currently the only retriable error supported is 409. -* refused-stream: Loadbalancer will retry if the backend service resets the stream with a - REFUSED_STREAM error code. This reset type indicates that it is safe to retry. -* cancelled: Loadbalancer will retry if the gRPC status code in the response - header is set to cancelled -* deadline-exceeded: Loadbalancer will retry if the - gRPC status code in the response header is set to deadline-exceeded -* resource-exhausted: Loadbalancer will retry if the gRPC status code in the response - header is set to resource-exhausted -* unavailable: Loadbalancer will retry if the gRPC status code in - the response header is set to unavailable`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "timeout": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the timeout for the selected route. Timeout is computed from the time -the request is has been fully processed (i.e. end-of-stream) up until the -response has been completely processed. Timeout includes all retries. If not -specified, the default value is 15 seconds.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_region_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "url_rewrite": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The spec to modify the URL of the request, prior to forwarding the request to -the matched service`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "host_rewrite": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected service, the request's host -header is replaced with contents of hostRewrite. The value must be between 1 and -255 characters.`, - }, - "path_prefix_rewrite": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected backend service, the matching -portion of the request's path is replaced by pathPrefixRewrite. The value must -be between 1 and 1024 characters.`, - }, - }, - }, - }, - "weighted_backend_services": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of weighted backend services to send traffic to when a route match -occurs. The weights determine the fraction of traffic that flows to their -corresponding backend service. If all traffic needs to go to a single backend -service, there must be one weightedBackendService with weight set to a non 0 -number. Once a backendService is identified and before forwarding the request to -the backend service, advanced routing actions like Url rewrites and header -transformations are applied depending on additional settings specified in this -HttpRouteAction.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The default RegionBackendService resource. Before -forwarding the request to backendService, the loadbalancer applies any relevant -headerActions specified as part of this backendServiceWeight.`, - }, - "weight": { - Type: resource_compute_region_url_map_schema.TypeInt, - Required: true, - Description: `Specifies the fraction of traffic sent to backendService, computed as weight / -(sum of all weightedBackendService weights in routeAction) . The selection of a -backend service is determined only for new traffic. Once a user's request has -been directed to a backendService, subsequent requests will be sent to the same -backendService as determined by the BackendService's session affinity policy. -The value must be between 0 and 1000`, - }, - "header_action": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. headerAction specified here take effect before -headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_region_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "service": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region backend service resource to which traffic is -directed if this rule is matched. If routeAction is additionally specified, -advanced routing actions like URL Rewrites, etc. take effect prior to sending -the request to the backend. However, if service is specified, routeAction cannot -contain any weightedBackendService s. Conversely, if routeAction specifies any -weightedBackendServices, service must not be specified. Only one of urlRedirect, -service or routeAction.weightedBackendService must be set.`, - }, - "url_redirect": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `When this rule is matched, the request is redirected to a URL specified by -urlRedirect. If urlRedirect is specified, service or routeAction must not be -set.`, - MaxItems: 1, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "host_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one -that was supplied in the request. The value must be between 1 and 255 -characters.`, - }, - "https_redirect": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. -If set to false, the URL scheme of the redirected request will remain the -same as that of the request. This must only be set for UrlMaps used in -TargetHttpProxys. Setting this true for TargetHttpsProxy is not -permitted. The default is set to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one -that was supplied in the request. pathRedirect cannot be supplied -together with prefixRedirect. Supply one alone or neither. If neither is -supplied, the path of the original request will be used for the redirect. -The value must be between 1 and 1024 characters.`, - }, - "prefix_redirect": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the -HttpRouteRuleMatch, retaining the remaining portion of the URL before -redirecting the request. prefixRedirect cannot be supplied together with -pathRedirect. Supply one alone or neither. If neither is supplied, the -path of the original request will be used for the redirect. The value -must be between 1 and 1024 characters.`, - }, - "redirect_response_code": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_region_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method -will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, -the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - "strip_query": { - Type: resource_compute_region_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, any accompanying query portion of the original URL is -removed prior to redirecting the request. If set to false, the query -portion of the original URL is retained. The default value is false.`, - Default: false, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "region": { - Type: resource_compute_region_url_map_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Region in which the url map should reside. -If it is not provided, the provider region is used.`, - }, - "test": { - Type: resource_compute_region_url_map_schema.TypeList, - Optional: true, - Description: `The list of expected URL mappings. Requests to update this UrlMap will -succeed only if all of the test cases pass.`, - Elem: &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "host": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Host portion of the URL.`, - }, - "path": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `Path portion of the URL.`, - }, - "service": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to expected RegionBackendService resource the given URL should be mapped to.`, - }, - "description": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `Description of this test case.`, - }, - }, - }, - }, - "creation_timestamp": { - Type: resource_compute_region_url_map_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "fingerprint": { - Type: resource_compute_region_url_map_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. This field is used internally during -updates of this resource.`, - }, - "map_id": { - Type: resource_compute_region_url_map_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_region_url_map_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeRegionUrlMapHostRuleSchema() *resource_compute_region_url_map_schema.Resource { - return &resource_compute_region_url_map_schema.Resource{ - Schema: map[string]*resource_compute_region_url_map_schema.Schema{ - "hosts": { - Type: resource_compute_region_url_map_schema.TypeSet, - Required: true, - Description: `The list of host patterns to match. They must be valid -hostnames, except * will match any string of ([a-z0-9-.]*). In -that case, * must be the first character and must be followed in -the pattern by either - or ..`, - Elem: &resource_compute_region_url_map_schema.Schema{ - Type: resource_compute_region_url_map_schema.TypeString, - }, - Set: resource_compute_region_url_map_schema.HashString, - }, - "path_matcher": { - Type: resource_compute_region_url_map_schema.TypeString, - Required: true, - Description: `The name of the PathMatcher to use to match the path portion of -the URL if the hostRule matches the URL's host portion.`, - }, - "description": { - Type: resource_compute_region_url_map_schema.TypeString, - Optional: true, - Description: `An optional description of this HostRule. Provide this property -when you create the resource.`, - }, - }, - } -} - -func resourceComputeRegionUrlMapCreate(d *resource_compute_region_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - defaultServiceProp, err := expandComputeRegionUrlMapDefaultService(d.Get("default_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(defaultServiceProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, defaultServiceProp)) { - obj["defaultService"] = defaultServiceProp - } - descriptionProp, err := expandComputeRegionUrlMapDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - hostRulesProp, err := expandComputeRegionUrlMapHostRule(d.Get("host_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(hostRulesProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, hostRulesProp)) { - obj["hostRules"] = hostRulesProp - } - fingerprintProp, err := expandComputeRegionUrlMapFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(fingerprintProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - nameProp, err := expandComputeRegionUrlMapName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(nameProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - pathMatchersProp, err := expandComputeRegionUrlMapPathMatcher(d.Get("path_matcher"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(pathMatchersProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, pathMatchersProp)) { - obj["pathMatchers"] = pathMatchersProp - } - testsProp, err := expandComputeRegionUrlMapTest(d.Get("test"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(testsProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, testsProp)) { - obj["tests"] = testsProp - } - defaultUrlRedirectProp, err := expandComputeRegionUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(defaultUrlRedirectProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, defaultUrlRedirectProp)) { - obj["defaultUrlRedirect"] = defaultUrlRedirectProp - } - regionProp, err := expandComputeRegionUrlMapRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(regionProp)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps") - if err != nil { - return err - } - - resource_compute_region_url_map_log.Printf("[DEBUG] Creating new RegionUrlMap: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_url_map_schema.TimeoutCreate)) - if err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error creating RegionUrlMap: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") - if err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RegionUrlMap", userAgent, - d.Timeout(resource_compute_region_url_map_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_region_url_map_fmt.Errorf("Error waiting to create RegionUrlMap: %s", err) - } - - resource_compute_region_url_map_log.Printf("[DEBUG] Finished creating RegionUrlMap %q: %#v", d.Id(), res) - - return resourceComputeRegionUrlMapRead(d, meta) -} - -func resourceComputeRegionUrlMapRead(d *resource_compute_region_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_region_url_map_fmt.Sprintf("ComputeRegionUrlMap %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRegionUrlMapCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("default_service", flattenComputeRegionUrlMapDefaultService(res["defaultService"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("description", flattenComputeRegionUrlMapDescription(res["description"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("host_rule", flattenComputeRegionUrlMapHostRule(res["hostRules"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("map_id", flattenComputeRegionUrlMapMapId(res["id"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("fingerprint", flattenComputeRegionUrlMapFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("name", flattenComputeRegionUrlMapName(res["name"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("path_matcher", flattenComputeRegionUrlMapPathMatcher(res["pathMatchers"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("test", flattenComputeRegionUrlMapTest(res["tests"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("default_url_redirect", flattenComputeRegionUrlMapDefaultUrlRedirect(res["defaultUrlRedirect"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("region", flattenComputeRegionUrlMapRegion(res["region"], d, config)); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error reading RegionUrlMap: %s", err) - } - - return nil -} - -func resourceComputeRegionUrlMapUpdate(d *resource_compute_region_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - defaultServiceProp, err := expandComputeRegionUrlMapDefaultService(d.Get("default_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, defaultServiceProp)) { - obj["defaultService"] = defaultServiceProp - } - descriptionProp, err := expandComputeRegionUrlMapDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - hostRulesProp, err := expandComputeRegionUrlMapHostRule(d.Get("host_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, hostRulesProp)) { - obj["hostRules"] = hostRulesProp - } - fingerprintProp, err := expandComputeRegionUrlMapFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - nameProp, err := expandComputeRegionUrlMapName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - pathMatchersProp, err := expandComputeRegionUrlMapPathMatcher(d.Get("path_matcher"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, pathMatchersProp)) { - obj["pathMatchers"] = pathMatchersProp - } - testsProp, err := expandComputeRegionUrlMapTest(d.Get("test"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, testsProp)) { - obj["tests"] = testsProp - } - defaultUrlRedirectProp, err := expandComputeRegionUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, defaultUrlRedirectProp)) { - obj["defaultUrlRedirect"] = defaultUrlRedirectProp - } - regionProp, err := expandComputeRegionUrlMapRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_region_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_region_url_map_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") - if err != nil { - return err - } - - resource_compute_region_url_map_log.Printf("[DEBUG] Updating RegionUrlMap %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_url_map_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error updating RegionUrlMap %q: %s", d.Id(), err) - } else { - resource_compute_region_url_map_log.Printf("[DEBUG] Finished updating RegionUrlMap %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RegionUrlMap", userAgent, - d.Timeout(resource_compute_region_url_map_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRegionUrlMapRead(d, meta) -} - -func resourceComputeRegionUrlMapDelete(d *resource_compute_region_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_region_url_map_fmt.Errorf("Error fetching project for RegionUrlMap: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_region_url_map_log.Printf("[DEBUG] Deleting RegionUrlMap %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_region_url_map_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RegionUrlMap") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RegionUrlMap", userAgent, - d.Timeout(resource_compute_region_url_map_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_region_url_map_log.Printf("[DEBUG] Finished deleting RegionUrlMap %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRegionUrlMapImport(d *resource_compute_region_url_map_schema.ResourceData, meta interface{}) ([]*resource_compute_region_url_map_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/urlMaps/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/urlMaps/{{name}}") - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_region_url_map_schema.ResourceData{d}, nil -} - -func flattenComputeRegionUrlMapCreationTimestamp(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapDefaultService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapDescription(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapHostRule(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_region_url_map_schema.NewSet(resource_compute_region_url_map_schema.HashResource(computeRegionUrlMapHostRuleSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "description": flattenComputeRegionUrlMapHostRuleDescription(original["description"], d, config), - "hosts": flattenComputeRegionUrlMapHostRuleHosts(original["hosts"], d, config), - "path_matcher": flattenComputeRegionUrlMapHostRulePathMatcher(original["pathMatcher"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapHostRuleDescription(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapHostRuleHosts(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_region_url_map_schema.NewSet(resource_compute_region_url_map_schema.HashString, v.([]interface{})) -} - -func flattenComputeRegionUrlMapHostRulePathMatcher(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapMapId(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapFingerprint(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcher(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "default_service": flattenComputeRegionUrlMapPathMatcherDefaultService(original["defaultService"], d, config), - "description": flattenComputeRegionUrlMapPathMatcherDescription(original["description"], d, config), - "name": flattenComputeRegionUrlMapPathMatcherName(original["name"], d, config), - "route_rules": flattenComputeRegionUrlMapPathMatcherRouteRules(original["routeRules"], d, config), - "path_rule": flattenComputeRegionUrlMapPathMatcherPathRule(original["pathRules"], d, config), - "default_url_redirect": flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirect(original["defaultUrlRedirect"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherDefaultService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapPathMatcherDescription(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "priority": flattenComputeRegionUrlMapPathMatcherRouteRulesPriority(original["priority"], d, config), - "service": flattenComputeRegionUrlMapPathMatcherRouteRulesService(original["service"], d, config), - "header_action": flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(original["headerAction"], d, config), - "match_rules": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRules(original["matchRules"], d, config), - "route_action": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteAction(original["routeAction"], d, config), - "url_redirect": flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(original["urlRedirect"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesPriority(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "full_path_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(original["fullPathMatch"], d, config), - "header_matches": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(original["headerMatches"], d, config), - "ignore_case": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(original["ignoreCase"], d, config), - "metadata_filters": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(original["metadataFilters"], d, config), - "prefix_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(original["prefixMatch"], d, config), - "query_parameter_matches": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(original["queryParameterMatches"], d, config), - "regex_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(original["regexMatch"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "exact_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(original["exactMatch"], d, config), - "header_name": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(original["headerName"], d, config), - "invert_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(original["invertMatch"], d, config), - "prefix_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(original["prefixMatch"], d, config), - "present_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(original["presentMatch"], d, config), - "range_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(original["rangeMatch"], d, config), - "regex_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(original["regexMatch"], d, config), - "suffix_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(original["suffixMatch"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["range_end"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(original["rangeEnd"], d, config) - transformed["range_start"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["rangeStart"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "filter_labels": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(original["filterLabels"], d, config), - "filter_match_criteria": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(original["filterMatchCriteria"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(original["name"], d, config), - "value": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(original["value"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "exact_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(original["exactMatch"], d, config), - "name": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(original["name"], d, config), - "present_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(original["presentMatch"], d, config), - "regex_match": flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(original["regexMatch"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cors_policy"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(original["corsPolicy"], d, config) - transformed["fault_injection_policy"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) - transformed["request_mirror_policy"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(original["requestMirrorPolicy"], d, config) - transformed["retry_policy"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(original["retryPolicy"], d, config) - transformed["timeout"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(original["timeout"], d, config) - transformed["url_rewrite"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(original["urlRewrite"], d, config) - transformed["weighted_backend_services"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_credentials"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(original["allowCredentials"], d, config) - transformed["allow_headers"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(original["allowHeaders"], d, config) - transformed["allow_methods"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(original["allowMethods"], d, config) - transformed["allow_origin_regexes"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(original["allowOriginRegexes"], d, config) - transformed["allow_origins"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(original["allowOrigins"], d, config) - transformed["disabled"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(original["disabled"], d, config) - transformed["expose_headers"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(original["exposeHeaders"], d, config) - transformed["max_age"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["abort"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - transformed["delay"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_status"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(original["httpStatus"], d, config) - transformed["percentage"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_delay"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixedDelay"], d, config) - transformed["percentage"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backend_service"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_retries"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(original["numRetries"], d, config) - transformed["per_try_timeout"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) - transformed["retry_conditions"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_rewrite"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) - transformed["path_prefix_rewrite"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "backend_service": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(original["backendService"], d, config), - "header_action": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(original["headerAction"], d, config), - "weight": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(original["weight"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRule(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service": flattenComputeRegionUrlMapPathMatcherPathRuleService(original["service"], d, config), - "paths": flattenComputeRegionUrlMapPathMatcherPathRulePaths(original["paths"], d, config), - "route_action": flattenComputeRegionUrlMapPathMatcherPathRuleRouteAction(original["routeAction"], d, config), - "url_redirect": flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(original["urlRedirect"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapPathMatcherPathRulePaths(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_region_url_map_schema.NewSet(resource_compute_region_url_map_schema.HashString, v.([]interface{})) -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cors_policy"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(original["corsPolicy"], d, config) - transformed["fault_injection_policy"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) - transformed["request_mirror_policy"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(original["requestMirrorPolicy"], d, config) - transformed["retry_policy"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(original["retryPolicy"], d, config) - transformed["timeout"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(original["timeout"], d, config) - transformed["url_rewrite"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(original["urlRewrite"], d, config) - transformed["weighted_backend_services"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_credentials"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(original["allowCredentials"], d, config) - transformed["allow_headers"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(original["allowHeaders"], d, config) - transformed["allow_methods"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(original["allowMethods"], d, config) - transformed["allow_origin_regexes"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(original["allowOriginRegexes"], d, config) - transformed["allow_origins"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(original["allowOrigins"], d, config) - transformed["disabled"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) - transformed["expose_headers"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(original["exposeHeaders"], d, config) - transformed["max_age"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["abort"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - transformed["delay"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_status"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(original["httpStatus"], d, config) - transformed["percentage"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_delay"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixedDelay"], d, config) - transformed["percentage"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backend_service"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_retries"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(original["numRetries"], d, config) - transformed["per_try_timeout"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) - transformed["retry_conditions"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_rewrite"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) - transformed["path_prefix_rewrite"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "backend_service": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(original["backendService"], d, config), - "header_action": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(original["headerAction"], d, config), - "weight": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(original["weight"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_region_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapTest(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "description": flattenComputeRegionUrlMapTestDescription(original["description"], d, config), - "host": flattenComputeRegionUrlMapTestHost(original["host"], d, config), - "path": flattenComputeRegionUrlMapTestPath(original["path"], d, config), - "service": flattenComputeRegionUrlMapTestService(original["service"], d, config), - }) - } - return transformed -} - -func flattenComputeRegionUrlMapTestDescription(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapTestHost(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapTestPath(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapTestService(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeRegionUrlMapDefaultUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeRegionUrlMapDefaultUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeRegionUrlMapDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRegionUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapDefaultUrlRedirectStripQuery(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRegionUrlMapRegion(v interface{}, d *resource_compute_region_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRegionUrlMapDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for default_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapHostRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_region_url_map_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDescription, err := expandComputeRegionUrlMapHostRuleDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedHosts, err := expandComputeRegionUrlMapHostRuleHosts(original["hosts"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHosts); val.IsValid() && !isEmptyValue(val) { - transformed["hosts"] = transformedHosts - } - - transformedPathMatcher, err := expandComputeRegionUrlMapHostRulePathMatcher(original["path_matcher"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["pathMatcher"] = transformedPathMatcher - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapHostRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapHostRuleHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_region_url_map_schema.Set).List() - return v, nil -} - -func expandComputeRegionUrlMapHostRulePathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDefaultService, err := expandComputeRegionUrlMapPathMatcherDefaultService(original["default_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDefaultService); val.IsValid() && !isEmptyValue(val) { - transformed["defaultService"] = transformedDefaultService - } - - transformedDescription, err := expandComputeRegionUrlMapPathMatcherDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedName, err := expandComputeRegionUrlMapPathMatcherName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedRouteRules, err := expandComputeRegionUrlMapPathMatcherRouteRules(original["route_rules"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRouteRules); val.IsValid() && !isEmptyValue(val) { - transformed["routeRules"] = transformedRouteRules - } - - transformedPathRule, err := expandComputeRegionUrlMapPathMatcherPathRule(original["path_rule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathRule); val.IsValid() && !isEmptyValue(val) { - transformed["pathRules"] = transformedPathRule - } - - transformedDefaultUrlRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirect(original["default_url_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDefaultUrlRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["defaultUrlRedirect"] = transformedDefaultUrlRedirect - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for default_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapPathMatcherDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPriority, err := expandComputeRegionUrlMapPathMatcherRouteRulesPriority(original["priority"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { - transformed["priority"] = transformedPriority - } - - transformedService, err := expandComputeRegionUrlMapPathMatcherRouteRulesService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedHeaderAction, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedMatchRules, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRules(original["match_rules"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedMatchRules); val.IsValid() && !isEmptyValue(val) { - transformed["matchRules"] = transformedMatchRules - } - - transformedRouteAction, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteAction(original["route_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { - transformed["routeAction"] = transformedRouteAction - } - - transformedUrlRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(original["url_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["urlRedirect"] = transformedUrlRedirect - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFullPathMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(original["full_path_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !isEmptyValue(val) { - transformed["fullPathMatch"] = transformedFullPathMatch - } - - transformedHeaderMatches, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(original["header_matches"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderMatches); val.IsValid() && !isEmptyValue(val) { - transformed["headerMatches"] = transformedHeaderMatches - } - - transformedIgnoreCase, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(original["ignore_case"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !isEmptyValue(val) { - transformed["ignoreCase"] = transformedIgnoreCase - } - - transformedMetadataFilters, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(original["metadata_filters"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedMetadataFilters); val.IsValid() && !isEmptyValue(val) { - transformed["metadataFilters"] = transformedMetadataFilters - } - - transformedPrefixMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(original["prefix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["prefixMatch"] = transformedPrefixMatch - } - - transformedQueryParameterMatches, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(original["query_parameter_matches"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedQueryParameterMatches); val.IsValid() && !isEmptyValue(val) { - transformed["queryParameterMatches"] = transformedQueryParameterMatches - } - - transformedRegexMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(original["regex_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { - transformed["regexMatch"] = transformedRegexMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExactMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(original["exact_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { - transformed["exactMatch"] = transformedExactMatch - } - - transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedInvertMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(original["invert_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedInvertMatch); val.IsValid() && !isEmptyValue(val) { - transformed["invertMatch"] = transformedInvertMatch - } - - transformedPrefixMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(original["prefix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["prefixMatch"] = transformedPrefixMatch - } - - transformedPresentMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(original["present_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { - transformed["presentMatch"] = transformedPresentMatch - } - - transformedRangeMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(original["range_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRangeMatch); val.IsValid() && !isEmptyValue(val) { - transformed["rangeMatch"] = transformedRangeMatch - } - - transformedRegexMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(original["regex_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { - transformed["regexMatch"] = transformedRegexMatch - } - - transformedSuffixMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(original["suffix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["suffixMatch"] = transformedSuffixMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRangeEnd, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(original["range_end"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRangeEnd); val.IsValid() && !isEmptyValue(val) { - transformed["rangeEnd"] = transformedRangeEnd - } - - transformedRangeStart, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["range_start"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRangeStart); val.IsValid() && !isEmptyValue(val) { - transformed["rangeStart"] = transformedRangeStart - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilterLabels, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(original["filter_labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedFilterLabels); val.IsValid() && !isEmptyValue(val) { - transformed["filterLabels"] = transformedFilterLabels - } - - transformedFilterMatchCriteria, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(original["filter_match_criteria"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !isEmptyValue(val) { - transformed["filterMatchCriteria"] = transformedFilterMatchCriteria - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExactMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(original["exact_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { - transformed["exactMatch"] = transformedExactMatch - } - - transformedName, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPresentMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(original["present_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { - transformed["presentMatch"] = transformedPresentMatch - } - - transformedRegexMatch, err := expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(original["regex_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { - transformed["regexMatch"] = transformedRegexMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCorsPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(original["cors_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["corsPolicy"] = transformedCorsPolicy - } - - transformedFaultInjectionPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy - } - - transformedRequestMirrorPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy - } - - transformedRetryPolicy, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(original["retry_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["retryPolicy"] = transformedRetryPolicy - } - - transformedTimeout, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedUrlRewrite, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(original["url_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["urlRewrite"] = transformedUrlRewrite - } - - transformedWeightedBackendServices, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { - transformed["weightedBackendServices"] = transformedWeightedBackendServices - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowCredentials, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { - transformed["allowCredentials"] = transformedAllowCredentials - } - - transformedAllowHeaders, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["allowHeaders"] = transformedAllowHeaders - } - - transformedAllowMethods, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { - transformed["allowMethods"] = transformedAllowMethods - } - - transformedAllowOriginRegexes, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { - transformed["allowOriginRegexes"] = transformedAllowOriginRegexes - } - - transformedAllowOrigins, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { - transformed["allowOrigins"] = transformedAllowOrigins - } - - transformedDisabled, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - transformedExposeHeaders, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["exposeHeaders"] = transformedExposeHeaders - } - - transformedMaxAge, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["max_age"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { - transformed["maxAge"] = transformedMaxAge - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAbort, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { - transformed["abort"] = transformedAbort - } - - transformedDelay, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["delay"] = transformedDelay - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpStatus, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { - transformed["httpStatus"] = transformedHttpStatus - } - - transformedPercentage, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedDelay, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["fixedDelay"] = transformedFixedDelay - } - - transformedPercentage, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumRetries, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { - transformed["numRetries"] = transformedNumRetries - } - - transformedPerTryTimeout, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["perTryTimeout"] = transformedPerTryTimeout - } - - transformedRetryConditions, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { - transformed["retryConditions"] = transformedRetryConditions - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRewrite, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["hostRewrite"] = transformedHostRewrite - } - - transformedPathPrefixRewrite, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - transformedHeaderAction, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedWeight, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandComputeRegionUrlMapPathMatcherPathRuleService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedPaths, err := expandComputeRegionUrlMapPathMatcherPathRulePaths(original["paths"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPaths); val.IsValid() && !isEmptyValue(val) { - transformed["paths"] = transformedPaths - } - - transformedRouteAction, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteAction(original["route_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { - transformed["routeAction"] = transformedRouteAction - } - - transformedUrlRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(original["url_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["urlRedirect"] = transformedUrlRedirect - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapPathMatcherPathRulePaths(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_region_url_map_schema.Set).List() - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCorsPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(original["cors_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["corsPolicy"] = transformedCorsPolicy - } - - transformedFaultInjectionPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy - } - - transformedRequestMirrorPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy - } - - transformedRetryPolicy, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(original["retry_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["retryPolicy"] = transformedRetryPolicy - } - - transformedTimeout, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedUrlRewrite, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(original["url_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["urlRewrite"] = transformedUrlRewrite - } - - transformedWeightedBackendServices, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { - transformed["weightedBackendServices"] = transformedWeightedBackendServices - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowCredentials, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { - transformed["allowCredentials"] = transformedAllowCredentials - } - - transformedAllowHeaders, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["allowHeaders"] = transformedAllowHeaders - } - - transformedAllowMethods, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { - transformed["allowMethods"] = transformedAllowMethods - } - - transformedAllowOriginRegexes, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { - transformed["allowOriginRegexes"] = transformedAllowOriginRegexes - } - - transformedAllowOrigins, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { - transformed["allowOrigins"] = transformedAllowOrigins - } - - transformedDisabled, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - transformedExposeHeaders, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["exposeHeaders"] = transformedExposeHeaders - } - - transformedMaxAge, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["max_age"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { - transformed["maxAge"] = transformedMaxAge - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAbort, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { - transformed["abort"] = transformedAbort - } - - transformedDelay, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["delay"] = transformedDelay - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpStatus, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { - transformed["httpStatus"] = transformedHttpStatus - } - - transformedPercentage, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedDelay, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["fixedDelay"] = transformedFixedDelay - } - - transformedPercentage, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumRetries, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { - transformed["numRetries"] = transformedNumRetries - } - - transformedPerTryTimeout, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["perTryTimeout"] = transformedPerTryTimeout - } - - transformedRetryConditions, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { - transformed["retryConditions"] = transformedRetryConditions - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRewrite, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["hostRewrite"] = transformedHostRewrite - } - - transformedPathPrefixRewrite, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - transformedHeaderAction, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedWeight, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapTest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDescription, err := expandComputeRegionUrlMapTestDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedHost, err := expandComputeRegionUrlMapTestHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedPath, err := expandComputeRegionUrlMapTestPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedService, err := expandComputeRegionUrlMapTestService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRegionUrlMapTestDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapTestHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapTestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapTestService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("backendServices", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRegionUrlMapDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeRegionUrlMapDefaultUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_region_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeRegionUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRegionUrlMapRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_region_url_map_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeReservation() *resource_compute_reservation_schema.Resource { - return &resource_compute_reservation_schema.Resource{ - Create: resourceComputeReservationCreate, - Read: resourceComputeReservationRead, - Update: resourceComputeReservationUpdate, - Delete: resourceComputeReservationDelete, - - Importer: &resource_compute_reservation_schema.ResourceImporter{ - State: resourceComputeReservationImport, - }, - - Timeouts: &resource_compute_reservation_schema.ResourceTimeout{ - Create: resource_compute_reservation_schema.DefaultTimeout(4 * resource_compute_reservation_time.Minute), - Update: resource_compute_reservation_schema.DefaultTimeout(4 * resource_compute_reservation_time.Minute), - Delete: resource_compute_reservation_schema.DefaultTimeout(4 * resource_compute_reservation_time.Minute), - }, - - Schema: map[string]*resource_compute_reservation_schema.Schema{ - "name": { - Type: resource_compute_reservation_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "specific_reservation": { - Type: resource_compute_reservation_schema.TypeList, - Required: true, - Description: `Reservation for instances with specific machine shapes.`, - MaxItems: 1, - Elem: &resource_compute_reservation_schema.Resource{ - Schema: map[string]*resource_compute_reservation_schema.Schema{ - "count": { - Type: resource_compute_reservation_schema.TypeInt, - Required: true, - ValidateFunc: resource_compute_reservation_validation.IntAtLeast(1), - Description: `The number of resources that are allocated.`, - }, - "instance_properties": { - Type: resource_compute_reservation_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The instance properties for the reservation.`, - MaxItems: 1, - Elem: &resource_compute_reservation_schema.Resource{ - Schema: map[string]*resource_compute_reservation_schema.Schema{ - "machine_type": { - Type: resource_compute_reservation_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the machine type to reserve.`, - }, - "guest_accelerators": { - Type: resource_compute_reservation_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Guest accelerator type and count.`, - Elem: &resource_compute_reservation_schema.Resource{ - Schema: map[string]*resource_compute_reservation_schema.Schema{ - "accelerator_count": { - Type: resource_compute_reservation_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of the guest accelerator cards exposed to -this instance.`, - }, - "accelerator_type": { - Type: resource_compute_reservation_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full or partial URL of the accelerator type to -attach to this instance. For example: -'projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100' - -If you are creating an instance template, specify only the accelerator name.`, - }, - }, - }, - }, - "local_ssds": { - Type: resource_compute_reservation_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The amount of local ssd to reserve with each instance. This -reserves disks of type 'local-ssd'.`, - Elem: &resource_compute_reservation_schema.Resource{ - Schema: map[string]*resource_compute_reservation_schema.Schema{ - "disk_size_gb": { - Type: resource_compute_reservation_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The size of the disk in base-2 GB.`, - }, - "interface": { - Type: resource_compute_reservation_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_reservation_validation.StringInSlice([]string{"SCSI", "NVME", ""}, false), - Description: `The disk interface to use for attaching this disk. Default value: "SCSI" Possible values: ["SCSI", "NVME"]`, - Default: "SCSI", - }, - }, - }, - }, - "min_cpu_platform": { - Type: resource_compute_reservation_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The minimum CPU platform for the reservation. For example, -'"Intel Skylake"'. See -the CPU platform availability reference](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones) -for information on available CPU platforms.`, - }, - }, - }, - }, - "in_use_count": { - Type: resource_compute_reservation_schema.TypeInt, - Computed: true, - Description: `How many instances are in use.`, - }, - }, - }, - }, - "zone": { - Type: resource_compute_reservation_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The zone where the reservation is made.`, - }, - "description": { - Type: resource_compute_reservation_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "specific_reservation_required": { - Type: resource_compute_reservation_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `When set to true, only VMs that target this reservation by name can -consume this reservation. Otherwise, it can be consumed by VMs with -affinity for any reservation. Defaults to false.`, - Default: false, - }, - "commitment": { - Type: resource_compute_reservation_schema.TypeString, - Computed: true, - Description: `Full or partial URL to a parent commitment. This field displays for -reservations that are tied to a commitment.`, - }, - "creation_timestamp": { - Type: resource_compute_reservation_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "status": { - Type: resource_compute_reservation_schema.TypeString, - Computed: true, - Description: `The status of the reservation.`, - }, - "project": { - Type: resource_compute_reservation_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_reservation_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeReservationCreate(d *resource_compute_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeReservationDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_reservation_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_reservation_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeReservationName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_reservation_reflect.ValueOf(nameProp)) && (ok || !resource_compute_reservation_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - specificReservationRequiredProp, err := expandComputeReservationSpecificReservationRequired(d.Get("specific_reservation_required"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("specific_reservation_required"); !isEmptyValue(resource_compute_reservation_reflect.ValueOf(specificReservationRequiredProp)) && (ok || !resource_compute_reservation_reflect.DeepEqual(v, specificReservationRequiredProp)) { - obj["specificReservationRequired"] = specificReservationRequiredProp - } - specificReservationProp, err := expandComputeReservationSpecificReservation(d.Get("specific_reservation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("specific_reservation"); !isEmptyValue(resource_compute_reservation_reflect.ValueOf(specificReservationProp)) && (ok || !resource_compute_reservation_reflect.DeepEqual(v, specificReservationProp)) { - obj["specificReservation"] = specificReservationProp - } - zoneProp, err := expandComputeReservationZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_reservation_reflect.ValueOf(zoneProp)) && (ok || !resource_compute_reservation_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations") - if err != nil { - return err - } - - resource_compute_reservation_log.Printf("[DEBUG] Creating new Reservation: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_reservation_schema.TimeoutCreate)) - if err != nil { - return resource_compute_reservation_fmt.Errorf("Error creating Reservation: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") - if err != nil { - return resource_compute_reservation_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Reservation", userAgent, - d.Timeout(resource_compute_reservation_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_reservation_fmt.Errorf("Error waiting to create Reservation: %s", err) - } - - resource_compute_reservation_log.Printf("[DEBUG] Finished creating Reservation %q: %#v", d.Id(), res) - - return resourceComputeReservationRead(d, meta) -} - -func resourceComputeReservationRead(d *resource_compute_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_reservation_fmt.Sprintf("ComputeReservation %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeReservationCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("description", flattenComputeReservationDescription(res["description"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("name", flattenComputeReservationName(res["name"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("commitment", flattenComputeReservationCommitment(res["commitment"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("specific_reservation_required", flattenComputeReservationSpecificReservationRequired(res["specificReservationRequired"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("status", flattenComputeReservationStatus(res["status"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("specific_reservation", flattenComputeReservationSpecificReservation(res["specificReservation"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("zone", flattenComputeReservationZone(res["zone"], d, config)); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - - return nil -} - -func resourceComputeReservationUpdate(d *resource_compute_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("specific_reservation") { - obj := make(map[string]interface{}) - - specificReservationProp, err := expandComputeReservationSpecificReservation(d.Get("specific_reservation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("specific_reservation"); !isEmptyValue(resource_compute_reservation_reflect.ValueOf(v)) && (ok || !resource_compute_reservation_reflect.DeepEqual(v, specificReservationProp)) { - obj["specificReservation"] = specificReservationProp - } - - obj, err = resourceComputeReservationUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}/resize") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_reservation_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_reservation_fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) - } else { - resource_compute_reservation_log.Printf("[DEBUG] Finished updating Reservation %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Reservation", userAgent, - d.Timeout(resource_compute_reservation_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeReservationRead(d, meta) -} - -func resourceComputeReservationDelete(d *resource_compute_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/reservations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_reservation_log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_reservation_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Reservation") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Reservation", userAgent, - d.Timeout(resource_compute_reservation_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_reservation_log.Printf("[DEBUG] Finished deleting Reservation %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeReservationImport(d *resource_compute_reservation_schema.ResourceData, meta interface{}) ([]*resource_compute_reservation_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/reservations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/reservations/{{name}}") - if err != nil { - return nil, resource_compute_reservation_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_reservation_schema.ResourceData{d}, nil -} - -func flattenComputeReservationCreationTimestamp(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationDescription(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationName(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationCommitment(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationSpecificReservationRequired(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationStatus(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationSpecificReservation(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["count"] = - flattenComputeReservationSpecificReservationCount(original["count"], d, config) - transformed["in_use_count"] = - flattenComputeReservationSpecificReservationInUseCount(original["inUseCount"], d, config) - transformed["instance_properties"] = - flattenComputeReservationSpecificReservationInstanceProperties(original["instanceProperties"], d, config) - return []interface{}{transformed} -} - -func flattenComputeReservationSpecificReservationCount(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_reservation_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeReservationSpecificReservationInUseCount(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_reservation_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeReservationSpecificReservationInstanceProperties(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["machine_type"] = - flattenComputeReservationSpecificReservationInstancePropertiesMachineType(original["machineType"], d, config) - transformed["min_cpu_platform"] = - flattenComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(original["minCpuPlatform"], d, config) - transformed["guest_accelerators"] = - flattenComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(original["guestAccelerators"], d, config) - transformed["local_ssds"] = - flattenComputeReservationSpecificReservationInstancePropertiesLocalSsds(original["localSsds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeReservationSpecificReservationInstancePropertiesMachineType(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "accelerator_type": flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(original["acceleratorType"], d, config), - "accelerator_count": flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(original["acceleratorCount"], d, config), - }) - } - return transformed -} - -func flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_reservation_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsds(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "interface": flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(original["interface"], d, config), - "disk_size_gb": flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(original["diskSizeGb"], d, config), - }) - } - return transformed -} - -func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_reservation_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeReservationZone(v interface{}, d *resource_compute_reservation_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeReservationDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCount, err := expandComputeReservationSpecificReservationCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - transformedInUseCount, err := expandComputeReservationSpecificReservationInUseCount(original["in_use_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedInUseCount); val.IsValid() && !isEmptyValue(val) { - transformed["inUseCount"] = transformedInUseCount - } - - transformedInstanceProperties, err := expandComputeReservationSpecificReservationInstanceProperties(original["instance_properties"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedInstanceProperties); val.IsValid() && !isEmptyValue(val) { - transformed["instanceProperties"] = transformedInstanceProperties - } - - return transformed, nil -} - -func expandComputeReservationSpecificReservationCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationInUseCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationInstanceProperties(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMachineType, err := expandComputeReservationSpecificReservationInstancePropertiesMachineType(original["machine_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedMachineType); val.IsValid() && !isEmptyValue(val) { - transformed["machineType"] = transformedMachineType - } - - transformedMinCpuPlatform, err := expandComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(original["min_cpu_platform"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedMinCpuPlatform); val.IsValid() && !isEmptyValue(val) { - transformed["minCpuPlatform"] = transformedMinCpuPlatform - } - - transformedGuestAccelerators, err := expandComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(original["guest_accelerators"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedGuestAccelerators); val.IsValid() && !isEmptyValue(val) { - transformed["guestAccelerators"] = transformedGuestAccelerators - } - - transformedLocalSsds, err := expandComputeReservationSpecificReservationInstancePropertiesLocalSsds(original["local_ssds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedLocalSsds); val.IsValid() && !isEmptyValue(val) { - transformed["localSsds"] = transformedLocalSsds - } - - return transformed, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesMinCpuPlatform(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesGuestAccelerators(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAcceleratorType, err := expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(original["accelerator_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !isEmptyValue(val) { - transformed["acceleratorType"] = transformedAcceleratorType - } - - transformedAcceleratorCount, err := expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(original["accelerator_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedAcceleratorCount); val.IsValid() && !isEmptyValue(val) { - transformed["acceleratorCount"] = transformedAcceleratorCount - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesGuestAcceleratorsAcceleratorCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesLocalSsds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInterface, err := expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(original["interface"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedInterface); val.IsValid() && !isEmptyValue(val) { - transformed["interface"] = transformedInterface - } - - transformedDiskSizeGb, err := expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(original["disk_size_gb"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_reservation_reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !isEmptyValue(val) { - transformed["diskSizeGb"] = transformedDiskSizeGb - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationSpecificReservationInstancePropertiesLocalSsdsDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeReservationZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_reservation_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeReservationUpdateEncoder(d *resource_compute_reservation_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["specificSkuCount"] = obj["specificReservation"].(map[string]interface{})["count"] - - return newObj, nil -} - -func resourceComputeResourcePolicy() *resource_compute_resource_policy_schema.Resource { - return &resource_compute_resource_policy_schema.Resource{ - Create: resourceComputeResourcePolicyCreate, - Read: resourceComputeResourcePolicyRead, - Delete: resourceComputeResourcePolicyDelete, - - Importer: &resource_compute_resource_policy_schema.ResourceImporter{ - State: resourceComputeResourcePolicyImport, - }, - - Timeouts: &resource_compute_resource_policy_schema.ResourceTimeout{ - Create: resource_compute_resource_policy_schema.DefaultTimeout(4 * resource_compute_resource_policy_time.Minute), - Delete: resource_compute_resource_policy_schema.DefaultTimeout(4 * resource_compute_resource_policy_time.Minute), - }, - - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "name": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the resource, provided by the client when initially creating -the resource. The resource name must be 1-63 characters long, and comply -with RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])'? which means the -first character must be a lowercase letter, and all following characters -must be a dash, lowercase letter, or digit, except the last character, -which cannot be a dash.`, - }, - "description": { - Type: resource_compute_resource_policy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when you create the resource.`, - }, - "group_placement_policy": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Resource policy for instances used for placement configuration.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "availability_domain_count": { - Type: resource_compute_resource_policy_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The number of availability domains instances will be spread across. If two instances are in different -availability domain, they will not be put in the same low latency network`, - AtLeastOneOf: []string{"group_placement_policy.0.vm_count", "group_placement_policy.0.availability_domain_count"}, - }, - "collocation": { - Type: resource_compute_resource_policy_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_resource_policy_validation.StringInSlice([]string{"COLLOCATED", ""}, false), - Description: `Collocation specifies whether to place VMs inside the same availability domain on the same low-latency network. -Specify 'COLLOCATED' to enable collocation. Can only be specified with 'vm_count'. If compute instances are created -with a COLLOCATED policy, then exactly 'vm_count' instances must be created at the same time with the resource policy -attached. Possible values: ["COLLOCATED"]`, - }, - "vm_count": { - Type: resource_compute_resource_policy_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `Number of vms in this placement group.`, - AtLeastOneOf: []string{"group_placement_policy.0.vm_count", "group_placement_policy.0.availability_domain_count"}, - }, - }, - }, - ConflictsWith: []string{"snapshot_schedule_policy", "instance_schedule_policy"}, - }, - "instance_schedule_policy": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Resource policy for scheduling instance operations.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "time_zone": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Specifies the time zone to be used in interpreting the schedule. The value of this field must be a time zone name -from the tz database: http://en.wikipedia.org/wiki/Tz_database.`, - }, - "expiration_time": { - Type: resource_compute_resource_policy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The expiration time of the schedule. The timestamp is an RFC3339 string.`, - }, - "start_time": { - Type: resource_compute_resource_policy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The start time of the schedule. The timestamp is an RFC3339 string.`, - }, - "vm_start_schedule": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Specifies the schedule for starting instances.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "schedule": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Specifies the frequency for the operation, using the unix-cron format.`, - }, - }, - }, - AtLeastOneOf: []string{"instance_schedule_policy.0.vm_start_schedule", "instance_schedule_policy.0.vm_stop_schedule"}, - }, - "vm_stop_schedule": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Specifies the schedule for stopping instances.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "schedule": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Specifies the frequency for the operation, using the unix-cron format.`, - }, - }, - }, - AtLeastOneOf: []string{"instance_schedule_policy.0.vm_start_schedule", "instance_schedule_policy.0.vm_stop_schedule"}, - }, - }, - }, - ConflictsWith: []string{"snapshot_schedule_policy", "group_placement_policy"}, - }, - "region": { - Type: resource_compute_resource_policy_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where resource policy resides.`, - }, - "snapshot_schedule_policy": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Policy for creating snapshots of persistent disks.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "schedule": { - Type: resource_compute_resource_policy_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Contains one of an 'hourlySchedule', 'dailySchedule', or 'weeklySchedule'.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "daily_schedule": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The policy will execute every nth day at the specified time.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "days_in_cycle": { - Type: resource_compute_resource_policy_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of days between snapshots.`, - }, - "start_time": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateHourlyOnly, - Description: `This must be in UTC format that resolves to one of -00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, -both 13:00-5 and 08:00 are valid.`, - }, - }, - }, - ExactlyOneOf: []string{"snapshot_schedule_policy.0.schedule.0.hourly_schedule", "snapshot_schedule_policy.0.schedule.0.daily_schedule", "snapshot_schedule_policy.0.schedule.0.weekly_schedule"}, - }, - "hourly_schedule": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The policy will execute every nth hour starting at the specified time.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "hours_in_cycle": { - Type: resource_compute_resource_policy_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of hours between snapshots.`, - }, - "start_time": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateHourlyOnly, - Description: `Time within the window to start the operations. -It must be in an hourly format "HH:MM", -where HH : [00-23] and MM : [00] GMT. -eg: 21:00`, - }, - }, - }, - ExactlyOneOf: []string{"snapshot_schedule_policy.0.schedule.0.hourly_schedule", "snapshot_schedule_policy.0.schedule.0.daily_schedule", "snapshot_schedule_policy.0.schedule.0.weekly_schedule"}, - }, - "weekly_schedule": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Allows specifying a snapshot time for each day of the week.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "day_of_weeks": { - Type: resource_compute_resource_policy_schema.TypeSet, - Required: true, - ForceNew: true, - Description: `May contain up to seven (one for each day of the week) snapshot times.`, - MinItems: 1, - MaxItems: 7, - Elem: computeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksSchema(), - }, - }, - }, - ExactlyOneOf: []string{"snapshot_schedule_policy.0.schedule.0.hourly_schedule", "snapshot_schedule_policy.0.schedule.0.daily_schedule", "snapshot_schedule_policy.0.schedule.0.weekly_schedule"}, - }, - }, - }, - }, - "retention_policy": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Retention policy applied to snapshots created by this resource policy.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "max_retention_days": { - Type: resource_compute_resource_policy_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Maximum age of the snapshot that is allowed to be kept.`, - }, - "on_source_disk_delete": { - Type: resource_compute_resource_policy_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_resource_policy_validation.StringInSlice([]string{"KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY", ""}, false), - Description: `Specifies the behavior to apply to scheduled snapshots when -the source disk is deleted. Default value: "KEEP_AUTO_SNAPSHOTS" Possible values: ["KEEP_AUTO_SNAPSHOTS", "APPLY_RETENTION_POLICY"]`, - Default: "KEEP_AUTO_SNAPSHOTS", - }, - }, - }, - }, - "snapshot_properties": { - Type: resource_compute_resource_policy_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Properties with which the snapshots are created, such as labels.`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "guest_flush": { - Type: resource_compute_resource_policy_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to perform a 'guest aware' snapshot.`, - AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, - }, - "labels": { - Type: resource_compute_resource_policy_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `A set of key-value pairs.`, - Elem: &resource_compute_resource_policy_schema.Schema{Type: resource_compute_resource_policy_schema.TypeString}, - AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, - }, - "storage_locations": { - Type: resource_compute_resource_policy_schema.TypeSet, - Optional: true, - ForceNew: true, - Description: `Cloud Storage bucket location to store the auto snapshot -(regional or multi-regional)`, - MaxItems: 1, - Elem: &resource_compute_resource_policy_schema.Schema{ - Type: resource_compute_resource_policy_schema.TypeString, - }, - Set: resource_compute_resource_policy_schema.HashString, - AtLeastOneOf: []string{"snapshot_schedule_policy.0.snapshot_properties.0.labels", "snapshot_schedule_policy.0.snapshot_properties.0.storage_locations", "snapshot_schedule_policy.0.snapshot_properties.0.guest_flush"}, - }, - }, - }, - }, - }, - }, - ConflictsWith: []string{"group_placement_policy", "instance_schedule_policy"}, - }, - "project": { - Type: resource_compute_resource_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_resource_policy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksSchema() *resource_compute_resource_policy_schema.Resource { - return &resource_compute_resource_policy_schema.Resource{ - Schema: map[string]*resource_compute_resource_policy_schema.Schema{ - "day": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_compute_resource_policy_validation.StringInSlice([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}, false), - Description: `The day of the week to create the snapshot. e.g. MONDAY Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "start_time": { - Type: resource_compute_resource_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Time within the window to start the operations. -It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT.`, - }, - }, - } -} - -func resourceComputeResourcePolicyCreate(d *resource_compute_resource_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeResourcePolicyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_resource_policy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_resource_policy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeResourcePolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_resource_policy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_resource_policy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - snapshotSchedulePolicyProp, err := expandComputeResourcePolicySnapshotSchedulePolicy(d.Get("snapshot_schedule_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("snapshot_schedule_policy"); !isEmptyValue(resource_compute_resource_policy_reflect.ValueOf(snapshotSchedulePolicyProp)) && (ok || !resource_compute_resource_policy_reflect.DeepEqual(v, snapshotSchedulePolicyProp)) { - obj["snapshotSchedulePolicy"] = snapshotSchedulePolicyProp - } - groupPlacementPolicyProp, err := expandComputeResourcePolicyGroupPlacementPolicy(d.Get("group_placement_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("group_placement_policy"); !isEmptyValue(resource_compute_resource_policy_reflect.ValueOf(groupPlacementPolicyProp)) && (ok || !resource_compute_resource_policy_reflect.DeepEqual(v, groupPlacementPolicyProp)) { - obj["groupPlacementPolicy"] = groupPlacementPolicyProp - } - instanceSchedulePolicyProp, err := expandComputeResourcePolicyInstanceSchedulePolicy(d.Get("instance_schedule_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_schedule_policy"); !isEmptyValue(resource_compute_resource_policy_reflect.ValueOf(instanceSchedulePolicyProp)) && (ok || !resource_compute_resource_policy_reflect.DeepEqual(v, instanceSchedulePolicyProp)) { - obj["instanceSchedulePolicy"] = instanceSchedulePolicyProp - } - regionProp, err := expandComputeResourcePolicyRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_resource_policy_reflect.ValueOf(regionProp)) && (ok || !resource_compute_resource_policy_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies") - if err != nil { - return err - } - - resource_compute_resource_policy_log.Printf("[DEBUG] Creating new ResourcePolicy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error fetching project for ResourcePolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_resource_policy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error creating ResourcePolicy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") - if err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating ResourcePolicy", userAgent, - d.Timeout(resource_compute_resource_policy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_resource_policy_fmt.Errorf("Error waiting to create ResourcePolicy: %s", err) - } - - resource_compute_resource_policy_log.Printf("[DEBUG] Finished creating ResourcePolicy %q: %#v", d.Id(), res) - - return resourceComputeResourcePolicyRead(d, meta) -} - -func resourceComputeResourcePolicyRead(d *resource_compute_resource_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error fetching project for ResourcePolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_resource_policy_fmt.Sprintf("ComputeResourcePolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - - if err := d.Set("name", flattenComputeResourcePolicyName(res["name"], d, config)); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - if err := d.Set("description", flattenComputeResourcePolicyDescription(res["description"], d, config)); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - if err := d.Set("snapshot_schedule_policy", flattenComputeResourcePolicySnapshotSchedulePolicy(res["snapshotSchedulePolicy"], d, config)); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - if err := d.Set("group_placement_policy", flattenComputeResourcePolicyGroupPlacementPolicy(res["groupPlacementPolicy"], d, config)); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - if err := d.Set("instance_schedule_policy", flattenComputeResourcePolicyInstanceSchedulePolicy(res["instanceSchedulePolicy"], d, config)); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - if err := d.Set("region", flattenComputeResourcePolicyRegion(res["region"], d, config)); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error reading ResourcePolicy: %s", err) - } - - return nil -} - -func resourceComputeResourcePolicyDelete(d *resource_compute_resource_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_resource_policy_fmt.Errorf("Error fetching project for ResourcePolicy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_resource_policy_log.Printf("[DEBUG] Deleting ResourcePolicy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_resource_policy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ResourcePolicy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting ResourcePolicy", userAgent, - d.Timeout(resource_compute_resource_policy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_resource_policy_log.Printf("[DEBUG] Finished deleting ResourcePolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeResourcePolicyImport(d *resource_compute_resource_policy_schema.ResourceData, meta interface{}) ([]*resource_compute_resource_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/resourcePolicies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}") - if err != nil { - return nil, resource_compute_resource_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_resource_policy_schema.ResourceData{d}, nil -} - -func flattenComputeResourcePolicyName(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyDescription(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["schedule"] = - flattenComputeResourcePolicySnapshotSchedulePolicySchedule(original["schedule"], d, config) - transformed["retention_policy"] = - flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(original["retentionPolicy"], d, config) - transformed["snapshot_properties"] = - flattenComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(original["snapshotProperties"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hourly_schedule"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(original["hourlySchedule"], d, config) - transformed["daily_schedule"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(original["dailySchedule"], d, config) - transformed["weekly_schedule"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(original["weeklySchedule"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours_in_cycle"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(original["hoursInCycle"], d, config) - transformed["start_time"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(original["startTime"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_resource_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["days_in_cycle"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(original["daysInCycle"], d, config) - transformed["start_time"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(original["startTime"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_resource_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["day_of_weeks"] = - flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(original["dayOfWeeks"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_resource_policy_schema.NewSet(resource_compute_resource_policy_schema.HashResource(computeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "start_time": flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(original["startTime"], d, config), - "day": flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(original["day"], d, config), - }) - } - return transformed -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_retention_days"] = - flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(original["maxRetentionDays"], d, config) - transformed["on_source_disk_delete"] = - flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(original["onSourceDiskDelete"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_resource_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["labels"] = - flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(original["labels"], d, config) - transformed["storage_locations"] = - flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(original["storageLocations"], d, config) - transformed["guest_flush"] = - flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(original["guestFlush"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_resource_policy_schema.NewSet(resource_compute_resource_policy_schema.HashString, v.([]interface{})) -} - -func flattenComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyGroupPlacementPolicy(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["vm_count"] = - flattenComputeResourcePolicyGroupPlacementPolicyVmCount(original["vmCount"], d, config) - transformed["availability_domain_count"] = - flattenComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(original["availabilityDomainCount"], d, config) - transformed["collocation"] = - flattenComputeResourcePolicyGroupPlacementPolicyCollocation(original["collocation"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicyGroupPlacementPolicyVmCount(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_resource_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_resource_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeResourcePolicyGroupPlacementPolicyCollocation(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["vm_start_schedule"] = - flattenComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(original["vmStartSchedule"], d, config) - transformed["vm_stop_schedule"] = - flattenComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(original["vmStopSchedule"], d, config) - transformed["time_zone"] = - flattenComputeResourcePolicyInstanceSchedulePolicyTimeZone(original["timeZone"], d, config) - transformed["start_time"] = - flattenComputeResourcePolicyInstanceSchedulePolicyStartTime(original["startTime"], d, config) - transformed["expiration_time"] = - flattenComputeResourcePolicyInstanceSchedulePolicyExpirationTime(original["expirationTime"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["schedule"] = - flattenComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(original["schedule"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["schedule"] = - flattenComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(original["schedule"], d, config) - return []interface{}{transformed} -} - -func flattenComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyInstanceSchedulePolicyTimeZone(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyInstanceSchedulePolicyStartTime(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyInstanceSchedulePolicyExpirationTime(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeResourcePolicyRegion(v interface{}, d *resource_compute_resource_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeResourcePolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchedule, err := expandComputeResourcePolicySnapshotSchedulePolicySchedule(original["schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["schedule"] = transformedSchedule - } - - transformedRetentionPolicy, err := expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(original["retention_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedRetentionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["retentionPolicy"] = transformedRetentionPolicy - } - - transformedSnapshotProperties, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(original["snapshot_properties"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedSnapshotProperties); val.IsValid() && !isEmptyValue(val) { - transformed["snapshotProperties"] = transformedSnapshotProperties - } - - return transformed, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHourlySchedule, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(original["hourly_schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedHourlySchedule); val.IsValid() && !isEmptyValue(val) { - transformed["hourlySchedule"] = transformedHourlySchedule - } - - transformedDailySchedule, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(original["daily_schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedDailySchedule); val.IsValid() && !isEmptyValue(val) { - transformed["dailySchedule"] = transformedDailySchedule - } - - transformedWeeklySchedule, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(original["weekly_schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedWeeklySchedule); val.IsValid() && !isEmptyValue(val) { - transformed["weeklySchedule"] = transformedWeeklySchedule - } - - return transformed, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHoursInCycle, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(original["hours_in_cycle"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedHoursInCycle); val.IsValid() && !isEmptyValue(val) { - transformed["hoursInCycle"] = transformedHoursInCycle - } - - transformedStartTime, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - return transformed, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleHoursInCycle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleHourlyScheduleStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDaysInCycle, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(original["days_in_cycle"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedDaysInCycle); val.IsValid() && !isEmptyValue(val) { - transformed["daysInCycle"] = transformedDaysInCycle - } - - transformedStartTime, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - return transformed, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleDaysInCycle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleDailyScheduleStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklySchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDayOfWeeks, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(original["day_of_weeks"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedDayOfWeeks); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeeks"] = transformedDayOfWeeks - } - - return transformed, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_resource_policy_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedDay, err := expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyScheduleWeeklyScheduleDayOfWeeksDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxRetentionDays, err := expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(original["max_retention_days"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedMaxRetentionDays); val.IsValid() && !isEmptyValue(val) { - transformed["maxRetentionDays"] = transformedMaxRetentionDays - } - - transformedOnSourceDiskDelete, err := expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(original["on_source_disk_delete"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedOnSourceDiskDelete); val.IsValid() && !isEmptyValue(val) { - transformed["onSourceDiskDelete"] = transformedOnSourceDiskDelete - } - - return transformed, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyMaxRetentionDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicyRetentionPolicyOnSourceDiskDelete(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotProperties(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedStorageLocations, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(original["storage_locations"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedStorageLocations); val.IsValid() && !isEmptyValue(val) { - transformed["storageLocations"] = transformedStorageLocations - } - - transformedGuestFlush, err := expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(original["guest_flush"], d, config) - if err != nil { - return nil, err - } else { - transformed["guestFlush"] = transformedGuestFlush - } - - return transformed, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesStorageLocations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_resource_policy_schema.Set).List() - return v, nil -} - -func expandComputeResourcePolicySnapshotSchedulePolicySnapshotPropertiesGuestFlush(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyGroupPlacementPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedVmCount, err := expandComputeResourcePolicyGroupPlacementPolicyVmCount(original["vm_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedVmCount); val.IsValid() && !isEmptyValue(val) { - transformed["vmCount"] = transformedVmCount - } - - transformedAvailabilityDomainCount, err := expandComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(original["availability_domain_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedAvailabilityDomainCount); val.IsValid() && !isEmptyValue(val) { - transformed["availabilityDomainCount"] = transformedAvailabilityDomainCount - } - - transformedCollocation, err := expandComputeResourcePolicyGroupPlacementPolicyCollocation(original["collocation"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedCollocation); val.IsValid() && !isEmptyValue(val) { - transformed["collocation"] = transformedCollocation - } - - return transformed, nil -} - -func expandComputeResourcePolicyGroupPlacementPolicyVmCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyGroupPlacementPolicyAvailabilityDomainCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyGroupPlacementPolicyCollocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedVmStartSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(original["vm_start_schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedVmStartSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["vmStartSchedule"] = transformedVmStartSchedule - } - - transformedVmStopSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(original["vm_stop_schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedVmStopSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["vmStopSchedule"] = transformedVmStopSchedule - } - - transformedTimeZone, err := expandComputeResourcePolicyInstanceSchedulePolicyTimeZone(original["time_zone"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { - transformed["timeZone"] = transformedTimeZone - } - - transformedStartTime, err := expandComputeResourcePolicyInstanceSchedulePolicyStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedExpirationTime, err := expandComputeResourcePolicyInstanceSchedulePolicyExpirationTime(original["expiration_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedExpirationTime); val.IsValid() && !isEmptyValue(val) { - transformed["expirationTime"] = transformedExpirationTime - } - - return transformed, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicyVmStartSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(original["schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["schedule"] = transformedSchedule - } - - return transformed, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicyVmStartScheduleSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicyVmStopSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchedule, err := expandComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(original["schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_resource_policy_reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["schedule"] = transformedSchedule - } - - return transformed, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicyVmStopScheduleSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicyTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicyStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyInstanceSchedulePolicyExpirationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeResourcePolicyRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_resource_policy_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRoute() *resource_compute_route_schema.Resource { - return &resource_compute_route_schema.Resource{ - Create: resourceComputeRouteCreate, - Read: resourceComputeRouteRead, - Delete: resourceComputeRouteDelete, - - Importer: &resource_compute_route_schema.ResourceImporter{ - State: resourceComputeRouteImport, - }, - - Timeouts: &resource_compute_route_schema.ResourceTimeout{ - Create: resource_compute_route_schema.DefaultTimeout(4 * resource_compute_route_time.Minute), - Delete: resource_compute_route_schema.DefaultTimeout(4 * resource_compute_route_time.Minute), - }, - - Schema: map[string]*resource_compute_route_schema.Schema{ - "dest_range": { - Type: resource_compute_route_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The destination range of outgoing packets that this route applies to. -Only IPv4 is supported.`, - }, - "name": { - Type: resource_compute_route_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z]([-a-z0-9]*[a-z0-9])?$`), - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the -last character, which cannot be a dash.`, - }, - "network": { - Type: resource_compute_route_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network that this route applies to.`, - }, - "description": { - Type: resource_compute_route_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property -when you create the resource.`, - }, - "next_hop_gateway": { - Type: resource_compute_route_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to a gateway that should handle matching packets. -Currently, you can only specify the internet gateway, using a full or -partial valid URL: -* 'https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway' -* 'projects/project/global/gateways/default-internet-gateway' -* 'global/gateways/default-internet-gateway' -* The string 'default-internet-gateway'.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_ilb": { - Type: resource_compute_route_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareIpAddressOrSelfLinkOrResourceName, - Description: `The IP address or URL to a forwarding rule of type -loadBalancingScheme=INTERNAL that should handle matching -packets. - -With the GA provider you can only specify the forwarding -rule as a partial or full URL. For example, the following -are all valid values: -* 10.128.0.56 -* https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule -* regions/region/forwardingRules/forwardingRule - -When the beta provider, you can also specify the IP address -of a forwarding rule from the same VPC or any peered VPC. - -Note that this can only be used when the destinationRange is -a public (non-RFC 1918) IP CIDR range.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_instance": { - Type: resource_compute_route_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to an instance that should handle matching packets. -You can specify this as a full or partial URL. For example: -* 'https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance' -* 'projects/project/zones/zone/instances/instance' -* 'zones/zone/instances/instance' -* Just the instance name, with the zone in 'next_hop_instance_zone'.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_ip": { - Type: resource_compute_route_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Network IP address of an instance that should handle matching packets.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "next_hop_vpn_tunnel": { - Type: resource_compute_route_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to a VpnTunnel that should handle matching packets.`, - ExactlyOneOf: []string{"next_hop_gateway", "next_hop_instance", "next_hop_ip", "next_hop_vpn_tunnel", "next_hop_ilb"}, - }, - "priority": { - Type: resource_compute_route_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The priority of this route. Priority is used to break ties in cases -where there is more than one matching route of equal prefix length. - -In the case of two routes with equal prefix length, the one with the -lowest-numbered priority value wins. - -Default value is 1000. Valid range is 0 through 65535.`, - Default: 1000, - }, - "tags": { - Type: resource_compute_route_schema.TypeSet, - Optional: true, - ForceNew: true, - Description: `A list of instance tags to which this route applies.`, - Elem: &resource_compute_route_schema.Schema{ - Type: resource_compute_route_schema.TypeString, - }, - Set: resource_compute_route_schema.HashString, - }, - "next_hop_network": { - Type: resource_compute_route_schema.TypeString, - Computed: true, - Description: `URL to a Network that should handle matching packets.`, - }, - "next_hop_instance_zone": { - Type: resource_compute_route_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: "The zone of the instance specified in next_hop_instance. Omit if next_hop_instance is specified as a URL.", - }, - "project": { - Type: resource_compute_route_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_route_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRouteCreate(d *resource_compute_route_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - destRangeProp, err := expandComputeRouteDestRange(d.Get("dest_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dest_range"); !isEmptyValue(resource_compute_route_reflect.ValueOf(destRangeProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, destRangeProp)) { - obj["destRange"] = destRangeProp - } - descriptionProp, err := expandComputeRouteDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_route_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeRouteName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_route_reflect.ValueOf(nameProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeRouteNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_route_reflect.ValueOf(networkProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - priorityProp, err := expandComputeRoutePriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); ok || !resource_compute_route_reflect.DeepEqual(v, priorityProp) { - obj["priority"] = priorityProp - } - tagsProp, err := expandComputeRouteTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(resource_compute_route_reflect.ValueOf(tagsProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - nextHopGatewayProp, err := expandComputeRouteNextHopGateway(d.Get("next_hop_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_gateway"); !isEmptyValue(resource_compute_route_reflect.ValueOf(nextHopGatewayProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, nextHopGatewayProp)) { - obj["nextHopGateway"] = nextHopGatewayProp - } - nextHopInstanceProp, err := expandComputeRouteNextHopInstance(d.Get("next_hop_instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_instance"); !isEmptyValue(resource_compute_route_reflect.ValueOf(nextHopInstanceProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, nextHopInstanceProp)) { - obj["nextHopInstance"] = nextHopInstanceProp - } - nextHopIpProp, err := expandComputeRouteNextHopIp(d.Get("next_hop_ip"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_ip"); !isEmptyValue(resource_compute_route_reflect.ValueOf(nextHopIpProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, nextHopIpProp)) { - obj["nextHopIp"] = nextHopIpProp - } - nextHopVpnTunnelProp, err := expandComputeRouteNextHopVpnTunnel(d.Get("next_hop_vpn_tunnel"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_vpn_tunnel"); !isEmptyValue(resource_compute_route_reflect.ValueOf(nextHopVpnTunnelProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, nextHopVpnTunnelProp)) { - obj["nextHopVpnTunnel"] = nextHopVpnTunnelProp - } - nextHopIlbProp, err := expandComputeRouteNextHopIlb(d.Get("next_hop_ilb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("next_hop_ilb"); !isEmptyValue(resource_compute_route_reflect.ValueOf(nextHopIlbProp)) && (ok || !resource_compute_route_reflect.DeepEqual(v, nextHopIlbProp)) { - obj["nextHopIlb"] = nextHopIlbProp - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes") - if err != nil { - return err - } - - resource_compute_route_log.Printf("[DEBUG] Creating new Route: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_route_fmt.Errorf("Error fetching project for Route: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_route_schema.TimeoutCreate), isPeeringOperationInProgress) - if err != nil { - return resource_compute_route_fmt.Errorf("Error creating Route: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") - if err != nil { - return resource_compute_route_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Route", userAgent, - d.Timeout(resource_compute_route_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_route_fmt.Errorf("Error waiting to create Route: %s", err) - } - - resource_compute_route_log.Printf("[DEBUG] Finished creating Route %q: %#v", d.Id(), res) - - return resourceComputeRouteRead(d, meta) -} - -func resourceComputeRouteRead(d *resource_compute_route_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_route_fmt.Errorf("Error fetching project for Route: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isPeeringOperationInProgress) - if err != nil { - return handleNotFoundError(err, d, resource_compute_route_fmt.Sprintf("ComputeRoute %q", d.Id())) - } - - res, err = resourceComputeRouteDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_route_log.Printf("[DEBUG] Removing ComputeRoute because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - - if err := d.Set("dest_range", flattenComputeRouteDestRange(res["destRange"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("description", flattenComputeRouteDescription(res["description"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("name", flattenComputeRouteName(res["name"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("network", flattenComputeRouteNetwork(res["network"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("priority", flattenComputeRoutePriority(res["priority"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("tags", flattenComputeRouteTags(res["tags"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_gateway", flattenComputeRouteNextHopGateway(res["nextHopGateway"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_instance", flattenComputeRouteNextHopInstance(res["nextHopInstance"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_ip", flattenComputeRouteNextHopIp(res["nextHopIp"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_vpn_tunnel", flattenComputeRouteNextHopVpnTunnel(res["nextHopVpnTunnel"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_network", flattenComputeRouteNextHopNetwork(res["nextHopNetwork"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("next_hop_ilb", flattenComputeRouteNextHopIlb(res["nextHopIlb"], d, config)); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_route_fmt.Errorf("Error reading Route: %s", err) - } - - return nil -} - -func resourceComputeRouteDelete(d *resource_compute_route_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_route_fmt.Errorf("Error fetching project for Route: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "projects/{{project}}/global/networks/{{network}}/peerings") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/routes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_route_log.Printf("[DEBUG] Deleting Route %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_route_schema.TimeoutDelete), isPeeringOperationInProgress) - if err != nil { - return handleNotFoundError(err, d, "Route") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Route", userAgent, - d.Timeout(resource_compute_route_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_route_log.Printf("[DEBUG] Finished deleting Route %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRouteImport(d *resource_compute_route_schema.ResourceData, meta interface{}) ([]*resource_compute_route_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/routes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/routes/{{name}}") - if err != nil { - return nil, resource_compute_route_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_route_schema.ResourceData{d}, nil -} - -func flattenComputeRouteDestRange(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteDescription(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteName(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNetwork(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRoutePriority(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_route_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRouteTags(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_route_schema.NewSet(resource_compute_route_schema.HashString, v.([]interface{})) -} - -func flattenComputeRouteNextHopGateway(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNextHopInstance(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRouteNextHopIp(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNextHopVpnTunnel(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRouteNextHopNetwork(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouteNextHopIlb(v interface{}, d *resource_compute_route_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeRouteDestRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_route_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRoutePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v.(*resource_compute_route_schema.Set).List(), nil -} - -func expandComputeRouteNextHopGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == "default-internet-gateway" { - return replaceVars(d, config, "projects/{{project}}/global/gateways/default-internet-gateway") - } else { - return v, nil - } -} - -func expandComputeRouteNextHopInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == "" { - return v, nil - } - val, err := parseZonalFieldValue("instances", v.(string), "project", "next_hop_instance_zone", d, config, true) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - nextInstance, err := config.NewComputeClient(userAgent).Instances.Get(val.Project, val.Zone, val.Name).Do() - if err != nil { - return nil, err - } - return nextInstance.SelfLink, nil -} - -func expandComputeRouteNextHopIp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouteNextHopVpnTunnel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("vpnTunnels", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_route_fmt.Errorf("Invalid value for next_hop_vpn_tunnel: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRouteNextHopIlb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeRouteDecoder(d *resource_compute_route_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["nextHopInstance"]; ok { - val, err := parseZonalFieldValue("instances", v.(string), "project", "next_hop_instance_zone", d, meta.(*Config), true) - if err != nil { - return nil, err - } - if err := d.Set("next_hop_instance_zone", val.Zone); err != nil { - return nil, resource_compute_route_fmt.Errorf("Error setting next_hop_instance_zone: %s", err) - } - res["nextHopInstance"] = val.RelativeLink() - } - - return res, nil -} - -func resourceComputeRouterCustomDiff(_ resource_compute_router_context.Context, diff *resource_compute_router_schema.ResourceDiff, meta interface{}) error { - - block := diff.Get("bgp.0").(map[string]interface{}) - advertiseMode := block["advertise_mode"] - advertisedGroups := block["advertised_groups"].([]interface{}) - advertisedIPRanges := block["advertised_ip_ranges"].([]interface{}) - - if advertiseMode == "DEFAULT" && len(advertisedGroups) != 0 { - return resource_compute_router_fmt.Errorf("Error in bgp: advertised_groups cannot be specified when using advertise_mode DEFAULT") - } - if advertiseMode == "DEFAULT" && len(advertisedIPRanges) != 0 { - return resource_compute_router_fmt.Errorf("Error in bgp: advertised_ip_ranges cannot be specified when using advertise_mode DEFAULT") - } - - return nil -} - -func resourceComputeRouter() *resource_compute_router_schema.Resource { - return &resource_compute_router_schema.Resource{ - Create: resourceComputeRouterCreate, - Read: resourceComputeRouterRead, - Update: resourceComputeRouterUpdate, - Delete: resourceComputeRouterDelete, - - Importer: &resource_compute_router_schema.ResourceImporter{ - State: resourceComputeRouterImport, - }, - - Timeouts: &resource_compute_router_schema.ResourceTimeout{ - Create: resource_compute_router_schema.DefaultTimeout(4 * resource_compute_router_time.Minute), - Update: resource_compute_router_schema.DefaultTimeout(4 * resource_compute_router_time.Minute), - Delete: resource_compute_router_schema.DefaultTimeout(4 * resource_compute_router_time.Minute), - }, - - CustomizeDiff: resourceComputeRouterCustomDiff, - - Schema: map[string]*resource_compute_router_schema.Schema{ - "name": { - Type: resource_compute_router_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' -which means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "network": { - Type: resource_compute_router_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the network to which this router belongs.`, - }, - "bgp": { - Type: resource_compute_router_schema.TypeList, - Optional: true, - Description: `BGP information specific to this router.`, - MaxItems: 1, - Elem: &resource_compute_router_schema.Resource{ - Schema: map[string]*resource_compute_router_schema.Schema{ - "asn": { - Type: resource_compute_router_schema.TypeInt, - Required: true, - ValidateFunc: validateRFC6996Asn, - Description: `Local BGP Autonomous System Number (ASN). Must be an RFC6996 -private ASN, either 16-bit or 32-bit. The value will be fixed for -this router resource. All VPN tunnels that link to this router -will have the same local ASN.`, - }, - "advertise_mode": { - Type: resource_compute_router_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_router_validation.StringInSlice([]string{"DEFAULT", "CUSTOM", ""}, false), - Description: `User-specified flag to indicate which mode to use for advertisement. Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, - Default: "DEFAULT", - }, - "advertised_groups": { - Type: resource_compute_router_schema.TypeList, - Optional: true, - Description: `User-specified list of prefix groups to advertise in custom mode. -This field can only be populated if advertiseMode is CUSTOM and -is advertised to all peers of the router. These groups will be -advertised in addition to any specified prefixes. Leave this field -blank to advertise no custom groups. - -This enum field has the one valid value: ALL_SUBNETS`, - Elem: &resource_compute_router_schema.Schema{ - Type: resource_compute_router_schema.TypeString, - }, - }, - "advertised_ip_ranges": { - Type: resource_compute_router_schema.TypeList, - Optional: true, - Description: `User-specified list of individual IP ranges to advertise in -custom mode. This field can only be populated if advertiseMode -is CUSTOM and is advertised to all peers of the router. These IP -ranges will be advertised in addition to any specified groups. -Leave this field blank to advertise no custom IP ranges.`, - Elem: &resource_compute_router_schema.Resource{ - Schema: map[string]*resource_compute_router_schema.Schema{ - "range": { - Type: resource_compute_router_schema.TypeString, - Required: true, - Description: `The IP range to advertise. The value must be a -CIDR-formatted string.`, - }, - "description": { - Type: resource_compute_router_schema.TypeString, - Optional: true, - Description: `User-specified description for the IP range.`, - }, - }, - }, - }, - }, - }, - }, - "description": { - Type: resource_compute_router_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "encrypted_interconnect_router": { - Type: resource_compute_router_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Field to indicate if a router is dedicated to use with encrypted -Interconnect Attachment (IPsec-encrypted Cloud Interconnect feature). - -Not currently available publicly.`, - }, - "region": { - Type: resource_compute_router_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the router resides.`, - }, - "creation_timestamp": { - Type: resource_compute_router_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_router_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_router_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRouterCreate(d *resource_compute_router_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeRouterName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_router_reflect.ValueOf(nameProp)) && (ok || !resource_compute_router_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !resource_compute_router_reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - networkProp, err := expandComputeRouterNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_router_reflect.ValueOf(networkProp)) && (ok || !resource_compute_router_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - bgpProp, err := expandComputeRouterBgp(d.Get("bgp"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bgp"); ok || !resource_compute_router_reflect.DeepEqual(v, bgpProp) { - obj["bgp"] = bgpProp - } - encryptedInterconnectRouterProp, err := expandComputeRouterEncryptedInterconnectRouter(d.Get("encrypted_interconnect_router"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encrypted_interconnect_router"); !isEmptyValue(resource_compute_router_reflect.ValueOf(encryptedInterconnectRouterProp)) && (ok || !resource_compute_router_reflect.DeepEqual(v, encryptedInterconnectRouterProp)) { - obj["encryptedInterconnectRouter"] = encryptedInterconnectRouterProp - } - regionProp, err := expandComputeRouterRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_router_reflect.ValueOf(regionProp)) && (ok || !resource_compute_router_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers") - if err != nil { - return err - } - - resource_compute_router_log.Printf("[DEBUG] Creating new Router: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_schema.TimeoutCreate)) - if err != nil { - return resource_compute_router_fmt.Errorf("Error creating Router: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return resource_compute_router_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Router", userAgent, - d.Timeout(resource_compute_router_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_router_fmt.Errorf("Error waiting to create Router: %s", err) - } - - resource_compute_router_log.Printf("[DEBUG] Finished creating Router %q: %#v", d.Id(), res) - - return resourceComputeRouterRead(d, meta) -} - -func resourceComputeRouterRead(d *resource_compute_router_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_router_fmt.Sprintf("ComputeRouter %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeRouterCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("name", flattenComputeRouterName(res["name"], d, config)); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("description", flattenComputeRouterDescription(res["description"], d, config)); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("network", flattenComputeRouterNetwork(res["network"], d, config)); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("bgp", flattenComputeRouterBgp(res["bgp"], d, config)); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("encrypted_interconnect_router", flattenComputeRouterEncryptedInterconnectRouter(res["encryptedInterconnectRouter"], d, config)); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("region", flattenComputeRouterRegion(res["region"], d, config)); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_router_fmt.Errorf("Error reading Router: %s", err) - } - - return nil -} - -func resourceComputeRouterUpdate(d *resource_compute_router_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeRouterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); ok || !resource_compute_router_reflect.DeepEqual(v, descriptionProp) { - obj["description"] = descriptionProp - } - bgpProp, err := expandComputeRouterBgp(d.Get("bgp"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bgp"); ok || !resource_compute_router_reflect.DeepEqual(v, bgpProp) { - obj["bgp"] = bgpProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return err - } - - resource_compute_router_log.Printf("[DEBUG] Updating Router %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_router_fmt.Errorf("Error updating Router %q: %s", d.Id(), err) - } else { - resource_compute_router_log.Printf("[DEBUG] Finished updating Router %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Router", userAgent, - d.Timeout(resource_compute_router_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRouterRead(d, meta) -} - -func resourceComputeRouterDelete(d *resource_compute_router_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_fmt.Errorf("Error fetching project for Router: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "router/{{region}}/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_router_log.Printf("[DEBUG] Deleting Router %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Router") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Router", userAgent, - d.Timeout(resource_compute_router_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_router_log.Printf("[DEBUG] Finished deleting Router %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRouterImport(d *resource_compute_router_schema.ResourceData, meta interface{}) ([]*resource_compute_router_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{name}}") - if err != nil { - return nil, resource_compute_router_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_router_schema.ResourceData{d}, nil -} - -func flattenComputeRouterCreationTimestamp(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterName(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterDescription(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterNetwork(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeRouterBgp(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["asn"] = - flattenComputeRouterBgpAsn(original["asn"], d, config) - transformed["advertise_mode"] = - flattenComputeRouterBgpAdvertiseMode(original["advertiseMode"], d, config) - transformed["advertised_groups"] = - flattenComputeRouterBgpAdvertisedGroups(original["advertisedGroups"], d, config) - transformed["advertised_ip_ranges"] = - flattenComputeRouterBgpAdvertisedIpRanges(original["advertisedIpRanges"], d, config) - return []interface{}{transformed} -} - -func flattenComputeRouterBgpAsn(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeRouterBgpAdvertiseMode(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterBgpAdvertisedGroups(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterBgpAdvertisedIpRanges(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "range": flattenComputeRouterBgpAdvertisedIpRangesRange(original["range"], d, config), - "description": flattenComputeRouterBgpAdvertisedIpRangesDescription(original["description"], d, config), - }) - } - return transformed -} - -func flattenComputeRouterBgpAdvertisedIpRangesRange(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterBgpAdvertisedIpRangesDescription(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterEncryptedInterconnectRouter(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeRouterRegion(v interface{}, d *resource_compute_router_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeRouterName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_router_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeRouterBgp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAsn, err := expandComputeRouterBgpAsn(original["asn"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_reflect.ValueOf(transformedAsn); val.IsValid() && !isEmptyValue(val) { - transformed["asn"] = transformedAsn - } - - transformedAdvertiseMode, err := expandComputeRouterBgpAdvertiseMode(original["advertise_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_reflect.ValueOf(transformedAdvertiseMode); val.IsValid() && !isEmptyValue(val) { - transformed["advertiseMode"] = transformedAdvertiseMode - } - - transformedAdvertisedGroups, err := expandComputeRouterBgpAdvertisedGroups(original["advertised_groups"], d, config) - if err != nil { - return nil, err - } else { - transformed["advertisedGroups"] = transformedAdvertisedGroups - } - - transformedAdvertisedIpRanges, err := expandComputeRouterBgpAdvertisedIpRanges(original["advertised_ip_ranges"], d, config) - if err != nil { - return nil, err - } else { - transformed["advertisedIpRanges"] = transformedAdvertisedIpRanges - } - - return transformed, nil -} - -func expandComputeRouterBgpAsn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertiseMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertisedGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertisedIpRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRange, err := expandComputeRouterBgpAdvertisedIpRangesRange(original["range"], d, config) - if err != nil { - return nil, err - } else { - transformed["range"] = transformedRange - } - - transformedDescription, err := expandComputeRouterBgpAdvertisedIpRangesDescription(original["description"], d, config) - if err != nil { - return nil, err - } else { - transformed["description"] = transformedDescription - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeRouterBgpAdvertisedIpRangesRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterBgpAdvertisedIpRangesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterEncryptedInterconnectRouter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeRouterRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_router_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeRouterInterface() *resource_compute_router_interface_schema.Resource { - return &resource_compute_router_interface_schema.Resource{ - Create: resourceComputeRouterInterfaceCreate, - Read: resourceComputeRouterInterfaceRead, - Delete: resourceComputeRouterInterfaceDelete, - Importer: &resource_compute_router_interface_schema.ResourceImporter{ - State: resourceComputeRouterInterfaceImportState, - }, - - Timeouts: &resource_compute_router_interface_schema.ResourceTimeout{ - Create: resource_compute_router_interface_schema.DefaultTimeout(4 * resource_compute_router_interface_time.Minute), - Delete: resource_compute_router_interface_schema.DefaultTimeout(4 * resource_compute_router_interface_time.Minute), - }, - - Schema: map[string]*resource_compute_router_interface_schema.Schema{ - "name": { - Type: resource_compute_router_interface_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique name for the interface, required by GCE. Changing this forces a new interface to be created.`, - }, - "router": { - Type: resource_compute_router_interface_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the router this interface will be attached to. Changing this forces a new interface to be created.`, - }, - "vpn_tunnel": { - Type: resource_compute_router_interface_schema.TypeString, - ConflictsWith: []string{"interconnect_attachment"}, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - AtLeastOneOf: []string{"vpn_tunnel", "interconnect_attachment", "ip_range"}, - Description: `The name or resource link to the VPN tunnel this interface will be linked to. Changing this forces a new interface to be created. Only one of vpn_tunnel and interconnect_attachment can be specified.`, - }, - "interconnect_attachment": { - Type: resource_compute_router_interface_schema.TypeString, - ConflictsWith: []string{"vpn_tunnel"}, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - AtLeastOneOf: []string{"vpn_tunnel", "interconnect_attachment", "ip_range"}, - Description: `The name or resource link to the VLAN interconnect for this interface. Changing this forces a new interface to be created. Only one of vpn_tunnel and interconnect_attachment can be specified.`, - }, - "ip_range": { - Type: resource_compute_router_interface_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: []string{"vpn_tunnel", "interconnect_attachment", "ip_range"}, - Description: `IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. Changing this forces a new interface to be created.`, - }, - "project": { - Type: resource_compute_router_interface_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which this interface's router belongs. If it is not provided, the provider project is used. Changing this forces a new interface to be created.`, - }, - - "region": { - Type: resource_compute_router_interface_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The region this interface's router sits in. If not specified, the project region will be used. Changing this forces a new interface to be created.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRouterInterfaceCreate(d *resource_compute_router_interface_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - ifaceName := d.Get("name").(string) - - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - routersService := config.NewComputeClient(userAgent).Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_router_interface_googleapi.Error); ok && gerr.Code == 404 { - resource_compute_router_interface_log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) - d.SetId("") - - return nil - } - - return resource_compute_router_interface_fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) - } - - ifaces := router.Interfaces - for _, iface := range ifaces { - if iface.Name == ifaceName { - d.SetId("") - return resource_compute_router_interface_fmt.Errorf("Router %s has interface %s already", routerName, ifaceName) - } - } - - iface := &resource_compute_router_interface_compute.RouterInterface{Name: ifaceName} - - if ipVal, ok := d.GetOk("ip_range"); ok { - iface.IpRange = ipVal.(string) - } - - if vpnVal, ok := d.GetOk("vpn_tunnel"); ok { - vpnTunnel, err := getVpnTunnelLink(config, project, region, vpnVal.(string), userAgent) - if err != nil { - return err - } - iface.LinkedVpnTunnel = vpnTunnel - } - - if icVal, ok := d.GetOk("interconnect_attachment"); ok { - interconnectAttachment, err := getInterconnectAttachmentLink(config, project, region, icVal.(string), userAgent) - if err != nil { - return err - } - iface.LinkedInterconnectAttachment = interconnectAttachment - } - - resource_compute_router_interface_log.Printf("[INFO] Adding interface %s", ifaceName) - ifaces = append(ifaces, iface) - patchRouter := &resource_compute_router_interface_compute.Router{ - Interfaces: ifaces, - } - - resource_compute_router_interface_log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return resource_compute_router_interface_fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - d.SetId(resource_compute_router_interface_fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) - err = computeOperationWaitTime(config, op, project, "Patching router", userAgent, d.Timeout(resource_compute_router_interface_schema.TimeoutCreate)) - if err != nil { - d.SetId("") - return resource_compute_router_interface_fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - return resourceComputeRouterInterfaceRead(d, meta) -} - -func resourceComputeRouterInterfaceRead(d *resource_compute_router_interface_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - ifaceName := d.Get("name").(string) - - routersService := config.NewComputeClient(userAgent).Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_router_interface_googleapi.Error); ok && gerr.Code == 404 { - resource_compute_router_interface_log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) - d.SetId("") - - return nil - } - - return resource_compute_router_interface_fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) - } - - for _, iface := range router.Interfaces { - - if iface.Name == ifaceName { - d.SetId(resource_compute_router_interface_fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) - if err := d.Set("vpn_tunnel", iface.LinkedVpnTunnel); err != nil { - return resource_compute_router_interface_fmt.Errorf("Error setting vpn_tunnel: %s", err) - } - if err := d.Set("interconnect_attachment", iface.LinkedInterconnectAttachment); err != nil { - return resource_compute_router_interface_fmt.Errorf("Error setting interconnect_attachment: %s", err) - } - if err := d.Set("ip_range", iface.IpRange); err != nil { - return resource_compute_router_interface_fmt.Errorf("Error setting ip_range: %s", err) - } - if err := d.Set("region", region); err != nil { - return resource_compute_router_interface_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_compute_router_interface_fmt.Errorf("Error setting project: %s", err) - } - return nil - } - } - - resource_compute_router_interface_log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) - d.SetId("") - return nil -} - -func resourceComputeRouterInterfaceDelete(d *resource_compute_router_interface_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - routerName := d.Get("router").(string) - ifaceName := d.Get("name").(string) - - routerLock := getRouterLockName(region, routerName) - mutexKV.Lock(routerLock) - defer mutexKV.Unlock(routerLock) - - routersService := config.NewComputeClient(userAgent).Routers - router, err := routersService.Get(project, region, routerName).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_router_interface_googleapi.Error); ok && gerr.Code == 404 { - resource_compute_router_interface_log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) - - return nil - } - - return resource_compute_router_interface_fmt.Errorf("Error Reading Router %s: %s", routerName, err) - } - - var ifaceFound bool - - newIfaces := make([]*resource_compute_router_interface_compute.RouterInterface, 0, len(router.Interfaces)) - for _, iface := range router.Interfaces { - - if iface.Name == ifaceName { - ifaceFound = true - continue - } else { - newIfaces = append(newIfaces, iface) - } - } - - if !ifaceFound { - resource_compute_router_interface_log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName) - d.SetId("") - return nil - } - - resource_compute_router_interface_log.Printf( - "[INFO] Removing interface %s from router %s/%s", ifaceName, region, routerName) - patchRouter := &resource_compute_router_interface_compute.Router{ - Interfaces: newIfaces, - } - - if len(newIfaces) == 0 { - patchRouter.ForceSendFields = append(patchRouter.ForceSendFields, "Interfaces") - } - - resource_compute_router_interface_log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return resource_compute_router_interface_fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - - err = computeOperationWaitTime(config, op, project, "Patching router", userAgent, d.Timeout(resource_compute_router_interface_schema.TimeoutDelete)) - if err != nil { - return resource_compute_router_interface_fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - d.SetId("") - return nil -} - -func resourceComputeRouterInterfaceImportState(d *resource_compute_router_interface_schema.ResourceData, meta interface{}) ([]*resource_compute_router_interface_schema.ResourceData, error) { - parts := resource_compute_router_interface_strings.Split(d.Id(), "/") - if len(parts) != 3 { - return nil, resource_compute_router_interface_fmt.Errorf("Invalid router interface specifier. Expecting {region}/{router}/{interface}") - } - - if err := d.Set("region", parts[0]); err != nil { - return nil, resource_compute_router_interface_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("router", parts[1]); err != nil { - return nil, resource_compute_router_interface_fmt.Errorf("Error setting router: %s", err) - } - if err := d.Set("name", parts[2]); err != nil { - return nil, resource_compute_router_interface_fmt.Errorf("Error setting name: %s", err) - } - - return []*resource_compute_router_interface_schema.ResourceData{d}, nil -} - -func resourceNameSetFromSelfLinkSet(v interface{}) *resource_compute_router_nat_schema.Set { - if v == nil { - return resource_compute_router_nat_schema.NewSet(resource_compute_router_nat_schema.HashString, nil) - } - vSet := v.(*resource_compute_router_nat_schema.Set) - ls := make([]interface{}, 0, vSet.Len()) - for _, v := range vSet.List() { - if v == nil { - continue - } - ls = append(ls, GetResourceNameFromSelfLink(v.(string))) - } - return resource_compute_router_nat_schema.NewSet(resource_compute_router_nat_schema.HashString, ls) -} - -func resourceComputeRouterNatDrainNatIpsCustomDiff(_ resource_compute_router_nat_context.Context, diff *resource_compute_router_nat_schema.ResourceDiff, meta interface{}) error { - o, n := diff.GetChange("drain_nat_ips") - oSet := resourceNameSetFromSelfLinkSet(o) - nSet := resourceNameSetFromSelfLinkSet(n) - addDrainIps := nSet.Difference(oSet) - - if addDrainIps.Len() == 0 { - return nil - } - - if diff.Id() == "" { - return resource_compute_router_nat_fmt.Errorf("New RouterNat cannot have drain_nat_ips, got values %+v", addDrainIps.List()) - } - - o, n = diff.GetChange("nat_ips") - oNatSet := resourceNameSetFromSelfLinkSet(o) - nNatSet := resourceNameSetFromSelfLinkSet(n) - - for _, v := range addDrainIps.List() { - if !oNatSet.Contains(v) { - return resource_compute_router_nat_fmt.Errorf("drain_nat_ip %q was not previously set in nat_ips %+v", v.(string), oNatSet.List()) - } - if nNatSet.Contains(v) { - return resource_compute_router_nat_fmt.Errorf("drain_nat_ip %q cannot be drained if still set in nat_ips %+v", v.(string), nNatSet.List()) - } - } - return nil -} - -func computeRouterNatSubnetworkHash(v interface{}) int { - obj := v.(map[string]interface{}) - name := obj["name"] - sourceIpRanges := obj["source_ip_ranges_to_nat"] - sourceIpRangesHash := 0 - if sourceIpRanges != nil { - sourceIpSet := sourceIpRanges.(*resource_compute_router_nat_schema.Set) - - for _, ipRange := range sourceIpSet.List() { - sourceIpRangesHash += resource_compute_router_nat_schema.HashString(ipRange.(string)) - } - } - - secondaryIpRangeNames := obj["secondary_ip_range_names"] - secondaryIpRangeHash := 0 - if secondaryIpRangeNames != nil { - secondaryIpRangeSet := secondaryIpRangeNames.(*resource_compute_router_nat_schema.Set) - - for _, secondaryIp := range secondaryIpRangeSet.List() { - secondaryIpRangeHash += resource_compute_router_nat_schema.HashString(secondaryIp.(string)) - } - } - - return resource_compute_router_nat_schema.HashString(NameFromSelfLinkStateFunc(name)) + sourceIpRangesHash + secondaryIpRangeHash -} - -func computeRouterNatIPsHash(v interface{}) int { - val := (v.(string)) - newParts := resource_compute_router_nat_strings.Split(val, "/") - if len(newParts) == 1 { - return resource_compute_router_nat_schema.HashString(newParts[0]) - } - return resource_compute_router_nat_schema.HashString(GetResourceNameFromSelfLink(val)) -} - -func resourceComputeRouterNat() *resource_compute_router_nat_schema.Resource { - return &resource_compute_router_nat_schema.Resource{ - Create: resourceComputeRouterNatCreate, - Read: resourceComputeRouterNatRead, - Update: resourceComputeRouterNatUpdate, - Delete: resourceComputeRouterNatDelete, - - Importer: &resource_compute_router_nat_schema.ResourceImporter{ - State: resourceComputeRouterNatImport, - }, - - Timeouts: &resource_compute_router_nat_schema.ResourceTimeout{ - Create: resource_compute_router_nat_schema.DefaultTimeout(10 * resource_compute_router_nat_time.Minute), - Update: resource_compute_router_nat_schema.DefaultTimeout(10 * resource_compute_router_nat_time.Minute), - Delete: resource_compute_router_nat_schema.DefaultTimeout(10 * resource_compute_router_nat_time.Minute), - }, - - CustomizeDiff: resourceComputeRouterNatDrainNatIpsCustomDiff, - - Schema: map[string]*resource_compute_router_nat_schema.Schema{ - "name": { - Type: resource_compute_router_nat_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRFC1035Name(2, 63), - Description: `Name of the NAT service. The name must be 1-63 characters long and -comply with RFC1035.`, - }, - "nat_ip_allocate_option": { - Type: resource_compute_router_nat_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_router_nat_validation.StringInSlice([]string{"MANUAL_ONLY", "AUTO_ONLY"}, false), - Description: `How external IPs should be allocated for this NAT. Valid values are -'AUTO_ONLY' for only allowing NAT IPs allocated by Google Cloud -Platform, or 'MANUAL_ONLY' for only user-allocated NAT IP addresses. Possible values: ["MANUAL_ONLY", "AUTO_ONLY"]`, - }, - "router": { - Type: resource_compute_router_nat_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Cloud Router in which this NAT will be configured.`, - }, - "source_subnetwork_ip_ranges_to_nat": { - Type: resource_compute_router_nat_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_router_nat_validation.StringInSlice([]string{"ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", "LIST_OF_SUBNETWORKS"}, false), - Description: `How NAT should be configured per Subnetwork. -If 'ALL_SUBNETWORKS_ALL_IP_RANGES', all of the -IP ranges in every Subnetwork are allowed to Nat. -If 'ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES', all of the primary IP -ranges in every Subnetwork are allowed to Nat. -'LIST_OF_SUBNETWORKS': A list of Subnetworks are allowed to Nat -(specified in the field subnetwork below). Note that if this field -contains ALL_SUBNETWORKS_ALL_IP_RANGES or -ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any -other RouterNat section in any Router for this network in this region. Possible values: ["ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", "LIST_OF_SUBNETWORKS"]`, - }, - "drain_nat_ips": { - Type: resource_compute_router_nat_schema.TypeSet, - Optional: true, - Description: `A list of URLs of the IP resources to be drained. These IPs must be -valid static external IPs that have been assigned to the NAT.`, - Elem: &resource_compute_router_nat_schema.Schema{ - Type: resource_compute_router_nat_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "enable_endpoint_independent_mapping": { - Type: resource_compute_router_nat_schema.TypeBool, - Optional: true, - Description: `Specifies if endpoint independent mapping is enabled. This is enabled by default. For more information -see the [official documentation](https://cloud.google.com/nat/docs/overview#specs-rfcs).`, - Default: true, - }, - "icmp_idle_timeout_sec": { - Type: resource_compute_router_nat_schema.TypeInt, - Optional: true, - Description: `Timeout (in seconds) for ICMP connections. Defaults to 30s if not set.`, - Default: 30, - }, - "log_config": { - Type: resource_compute_router_nat_schema.TypeList, - Optional: true, - Description: `Configuration for logging on NAT`, - MaxItems: 1, - Elem: &resource_compute_router_nat_schema.Resource{ - Schema: map[string]*resource_compute_router_nat_schema.Schema{ - "enable": { - Type: resource_compute_router_nat_schema.TypeBool, - Required: true, - Description: `Indicates whether or not to export logs.`, - }, - "filter": { - Type: resource_compute_router_nat_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_router_nat_validation.StringInSlice([]string{"ERRORS_ONLY", "TRANSLATIONS_ONLY", "ALL"}, false), - Description: `Specifies the desired filtering of logs on this NAT. Possible values: ["ERRORS_ONLY", "TRANSLATIONS_ONLY", "ALL"]`, - }, - }, - }, - }, - "min_ports_per_vm": { - Type: resource_compute_router_nat_schema.TypeInt, - Optional: true, - Description: `Minimum number of ports allocated to a VM from this NAT.`, - }, - "nat_ips": { - Type: resource_compute_router_nat_schema.TypeSet, - Optional: true, - Description: `Self-links of NAT IPs. Only valid if natIpAllocateOption -is set to MANUAL_ONLY.`, - Elem: &resource_compute_router_nat_schema.Schema{ - Type: resource_compute_router_nat_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - Set: computeRouterNatIPsHash, - }, - "region": { - Type: resource_compute_router_nat_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the router and NAT reside.`, - }, - "subnetwork": { - Type: resource_compute_router_nat_schema.TypeSet, - Optional: true, - Description: `One or more subnetwork NAT configurations. Only used if -'source_subnetwork_ip_ranges_to_nat' is set to 'LIST_OF_SUBNETWORKS'`, - Elem: computeRouterNatSubnetworkSchema(), - Set: computeRouterNatSubnetworkHash, - }, - "tcp_established_idle_timeout_sec": { - Type: resource_compute_router_nat_schema.TypeInt, - Optional: true, - Description: `Timeout (in seconds) for TCP established connections. -Defaults to 1200s if not set.`, - Default: 1200, - }, - "tcp_transitory_idle_timeout_sec": { - Type: resource_compute_router_nat_schema.TypeInt, - Optional: true, - Description: `Timeout (in seconds) for TCP transitory connections. -Defaults to 30s if not set.`, - Default: 30, - }, - "udp_idle_timeout_sec": { - Type: resource_compute_router_nat_schema.TypeInt, - Optional: true, - Description: `Timeout (in seconds) for UDP connections. Defaults to 30s if not set.`, - Default: 30, - }, - "project": { - Type: resource_compute_router_nat_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeRouterNatSubnetworkSchema() *resource_compute_router_nat_schema.Resource { - return &resource_compute_router_nat_schema.Resource{ - Schema: map[string]*resource_compute_router_nat_schema.Schema{ - "name": { - Type: resource_compute_router_nat_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Self-link of subnetwork to NAT`, - }, - "source_ip_ranges_to_nat": { - Type: resource_compute_router_nat_schema.TypeSet, - Required: true, - Description: `List of options for which source IPs in the subnetwork -should have NAT enabled. Supported values include: -'ALL_IP_RANGES', 'LIST_OF_SECONDARY_IP_RANGES', -'PRIMARY_IP_RANGE'.`, - MinItems: 1, - Elem: &resource_compute_router_nat_schema.Schema{ - Type: resource_compute_router_nat_schema.TypeString, - }, - Set: resource_compute_router_nat_schema.HashString, - }, - "secondary_ip_range_names": { - Type: resource_compute_router_nat_schema.TypeSet, - Optional: true, - Description: `List of the secondary ranges of the subnetwork that are allowed -to use NAT. This can be populated only if -'LIST_OF_SECONDARY_IP_RANGES' is one of the values in -sourceIpRangesToNat`, - Elem: &resource_compute_router_nat_schema.Schema{ - Type: resource_compute_router_nat_schema.TypeString, - }, - Set: resource_compute_router_nat_schema.HashString, - }, - }, - } -} - -func resourceComputeRouterNatCreate(d *resource_compute_router_nat_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRouterNatName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(nameProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - natIpAllocateOptionProp, err := expandNestedComputeRouterNatNatIpAllocateOption(d.Get("nat_ip_allocate_option"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_ip_allocate_option"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(natIpAllocateOptionProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, natIpAllocateOptionProp)) { - obj["natIpAllocateOption"] = natIpAllocateOptionProp - } - natIpsProp, err := expandNestedComputeRouterNatNatIps(d.Get("nat_ips"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_ips"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, natIpsProp) { - obj["natIps"] = natIpsProp - } - drainNatIpsProp, err := expandNestedComputeRouterNatDrainNatIps(d.Get("drain_nat_ips"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("drain_nat_ips"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, drainNatIpsProp) { - obj["drainNatIps"] = drainNatIpsProp - } - sourceSubnetworkIpRangesToNatProp, err := expandNestedComputeRouterNatSourceSubnetworkIpRangesToNat(d.Get("source_subnetwork_ip_ranges_to_nat"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_subnetwork_ip_ranges_to_nat"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(sourceSubnetworkIpRangesToNatProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, sourceSubnetworkIpRangesToNatProp)) { - obj["sourceSubnetworkIpRangesToNat"] = sourceSubnetworkIpRangesToNatProp - } - subnetworksProp, err := expandNestedComputeRouterNatSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, subnetworksProp) { - obj["subnetworks"] = subnetworksProp - } - minPortsPerVmProp, err := expandNestedComputeRouterNatMinPortsPerVm(d.Get("min_ports_per_vm"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_ports_per_vm"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(minPortsPerVmProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, minPortsPerVmProp)) { - obj["minPortsPerVm"] = minPortsPerVmProp - } - udpIdleTimeoutSecProp, err := expandNestedComputeRouterNatUdpIdleTimeoutSec(d.Get("udp_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("udp_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(udpIdleTimeoutSecProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, udpIdleTimeoutSecProp)) { - obj["udpIdleTimeoutSec"] = udpIdleTimeoutSecProp - } - icmpIdleTimeoutSecProp, err := expandNestedComputeRouterNatIcmpIdleTimeoutSec(d.Get("icmp_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("icmp_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(icmpIdleTimeoutSecProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, icmpIdleTimeoutSecProp)) { - obj["icmpIdleTimeoutSec"] = icmpIdleTimeoutSecProp - } - tcpEstablishedIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(d.Get("tcp_established_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_established_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(tcpEstablishedIdleTimeoutSecProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, tcpEstablishedIdleTimeoutSecProp)) { - obj["tcpEstablishedIdleTimeoutSec"] = tcpEstablishedIdleTimeoutSecProp - } - tcpTransitoryIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(d.Get("tcp_transitory_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_transitory_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(tcpTransitoryIdleTimeoutSecProp)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, tcpTransitoryIdleTimeoutSecProp)) { - obj["tcpTransitoryIdleTimeoutSec"] = tcpTransitoryIdleTimeoutSecProp - } - logConfigProp, err := expandNestedComputeRouterNatLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - enableEndpointIndependentMappingProp, err := expandNestedComputeRouterNatEnableEndpointIndependentMapping(d.Get("enable_endpoint_independent_mapping"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_endpoint_independent_mapping"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, enableEndpointIndependentMappingProp) { - obj["enableEndpointIndependentMapping"] = enableEndpointIndependentMappingProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - resource_compute_router_nat_log.Printf("[DEBUG] Creating new RouterNat: %#v", obj) - - obj, err = resourceComputeRouterNatPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_nat_fmt.Errorf("Error fetching project for RouterNat: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_nat_schema.TimeoutCreate)) - if err != nil { - return resource_compute_router_nat_fmt.Errorf("Error creating RouterNat: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") - if err != nil { - return resource_compute_router_nat_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RouterNat", userAgent, - d.Timeout(resource_compute_router_nat_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_router_nat_fmt.Errorf("Error waiting to create RouterNat: %s", err) - } - - resource_compute_router_nat_log.Printf("[DEBUG] Finished creating RouterNat %q: %#v", d.Id(), res) - - return resourceComputeRouterNatRead(d, meta) -} - -func resourceComputeRouterNatRead(d *resource_compute_router_nat_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_nat_fmt.Errorf("Error fetching project for RouterNat: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_router_nat_fmt.Sprintf("ComputeRouterNat %q", d.Id())) - } - - res, err = flattenNestedComputeRouterNat(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_router_nat_log.Printf("[DEBUG] Removing ComputeRouterNat because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - - if err := d.Set("name", flattenNestedComputeRouterNatName(res["name"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("nat_ip_allocate_option", flattenNestedComputeRouterNatNatIpAllocateOption(res["natIpAllocateOption"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("nat_ips", flattenNestedComputeRouterNatNatIps(res["natIps"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("drain_nat_ips", flattenNestedComputeRouterNatDrainNatIps(res["drainNatIps"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("source_subnetwork_ip_ranges_to_nat", flattenNestedComputeRouterNatSourceSubnetworkIpRangesToNat(res["sourceSubnetworkIpRangesToNat"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("subnetwork", flattenNestedComputeRouterNatSubnetwork(res["subnetworks"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("min_ports_per_vm", flattenNestedComputeRouterNatMinPortsPerVm(res["minPortsPerVm"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("udp_idle_timeout_sec", flattenNestedComputeRouterNatUdpIdleTimeoutSec(res["udpIdleTimeoutSec"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("icmp_idle_timeout_sec", flattenNestedComputeRouterNatIcmpIdleTimeoutSec(res["icmpIdleTimeoutSec"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("tcp_established_idle_timeout_sec", flattenNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(res["tcpEstablishedIdleTimeoutSec"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("tcp_transitory_idle_timeout_sec", flattenNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(res["tcpTransitoryIdleTimeoutSec"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("log_config", flattenNestedComputeRouterNatLogConfig(res["logConfig"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - if err := d.Set("enable_endpoint_independent_mapping", flattenNestedComputeRouterNatEnableEndpointIndependentMapping(res["enableEndpointIndependentMapping"], d, config)); err != nil { - return resource_compute_router_nat_fmt.Errorf("Error reading RouterNat: %s", err) - } - - return nil -} - -func resourceComputeRouterNatUpdate(d *resource_compute_router_nat_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_nat_fmt.Errorf("Error fetching project for RouterNat: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - natIpAllocateOptionProp, err := expandNestedComputeRouterNatNatIpAllocateOption(d.Get("nat_ip_allocate_option"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_ip_allocate_option"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, natIpAllocateOptionProp)) { - obj["natIpAllocateOption"] = natIpAllocateOptionProp - } - natIpsProp, err := expandNestedComputeRouterNatNatIps(d.Get("nat_ips"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_ips"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, natIpsProp) { - obj["natIps"] = natIpsProp - } - drainNatIpsProp, err := expandNestedComputeRouterNatDrainNatIps(d.Get("drain_nat_ips"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("drain_nat_ips"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, drainNatIpsProp) { - obj["drainNatIps"] = drainNatIpsProp - } - sourceSubnetworkIpRangesToNatProp, err := expandNestedComputeRouterNatSourceSubnetworkIpRangesToNat(d.Get("source_subnetwork_ip_ranges_to_nat"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_subnetwork_ip_ranges_to_nat"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, sourceSubnetworkIpRangesToNatProp)) { - obj["sourceSubnetworkIpRangesToNat"] = sourceSubnetworkIpRangesToNatProp - } - subnetworksProp, err := expandNestedComputeRouterNatSubnetwork(d.Get("subnetwork"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnetwork"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, subnetworksProp) { - obj["subnetworks"] = subnetworksProp - } - minPortsPerVmProp, err := expandNestedComputeRouterNatMinPortsPerVm(d.Get("min_ports_per_vm"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_ports_per_vm"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, minPortsPerVmProp)) { - obj["minPortsPerVm"] = minPortsPerVmProp - } - udpIdleTimeoutSecProp, err := expandNestedComputeRouterNatUdpIdleTimeoutSec(d.Get("udp_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("udp_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, udpIdleTimeoutSecProp)) { - obj["udpIdleTimeoutSec"] = udpIdleTimeoutSecProp - } - icmpIdleTimeoutSecProp, err := expandNestedComputeRouterNatIcmpIdleTimeoutSec(d.Get("icmp_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("icmp_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, icmpIdleTimeoutSecProp)) { - obj["icmpIdleTimeoutSec"] = icmpIdleTimeoutSecProp - } - tcpEstablishedIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(d.Get("tcp_established_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_established_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, tcpEstablishedIdleTimeoutSecProp)) { - obj["tcpEstablishedIdleTimeoutSec"] = tcpEstablishedIdleTimeoutSecProp - } - tcpTransitoryIdleTimeoutSecProp, err := expandNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(d.Get("tcp_transitory_idle_timeout_sec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_transitory_idle_timeout_sec"); !isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) && (ok || !resource_compute_router_nat_reflect.DeepEqual(v, tcpTransitoryIdleTimeoutSecProp)) { - obj["tcpTransitoryIdleTimeoutSec"] = tcpTransitoryIdleTimeoutSecProp - } - logConfigProp, err := expandNestedComputeRouterNatLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - enableEndpointIndependentMappingProp, err := expandNestedComputeRouterNatEnableEndpointIndependentMapping(d.Get("enable_endpoint_independent_mapping"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_endpoint_independent_mapping"); ok || !resource_compute_router_nat_reflect.DeepEqual(v, enableEndpointIndependentMappingProp) { - obj["enableEndpointIndependentMapping"] = enableEndpointIndependentMappingProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - resource_compute_router_nat_log.Printf("[DEBUG] Updating RouterNat %q: %#v", d.Id(), obj) - - obj, err = resourceComputeRouterNatPatchUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_nat_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_router_nat_fmt.Errorf("Error updating RouterNat %q: %s", d.Id(), err) - } else { - resource_compute_router_nat_log.Printf("[DEBUG] Finished updating RouterNat %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RouterNat", userAgent, - d.Timeout(resource_compute_router_nat_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRouterNatRead(d, meta) -} - -func resourceComputeRouterNatDelete(d *resource_compute_router_nat_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_nat_fmt.Errorf("Error fetching project for RouterNat: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceComputeRouterNatPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "RouterNat") - } - resource_compute_router_nat_log.Printf("[DEBUG] Deleting RouterNat %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_nat_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RouterNat") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RouterNat", userAgent, - d.Timeout(resource_compute_router_nat_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_router_nat_log.Printf("[DEBUG] Finished deleting RouterNat %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRouterNatImport(d *resource_compute_router_nat_schema.ResourceData, meta interface{}) ([]*resource_compute_router_nat_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{region}}/{{router}}/{{name}}") - if err != nil { - return nil, resource_compute_router_nat_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_router_nat_schema.ResourceData{d}, nil -} - -func flattenNestedComputeRouterNatName(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterNatNatIpAllocateOption(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterNatNatIps(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenNestedComputeRouterNatDrainNatIps(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenNestedComputeRouterNatSourceSubnetworkIpRangesToNat(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterNatSubnetwork(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_router_nat_schema.NewSet(computeRouterNatSubnetworkHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "name": flattenNestedComputeRouterNatSubnetworkName(original["name"], d, config), - "source_ip_ranges_to_nat": flattenNestedComputeRouterNatSubnetworkSourceIpRangesToNat(original["sourceIpRangesToNat"], d, config), - "secondary_ip_range_names": flattenNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(original["secondaryIpRangeNames"], d, config), - }) - } - return transformed -} - -func flattenNestedComputeRouterNatSubnetworkName(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenNestedComputeRouterNatSubnetworkSourceIpRangesToNat(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_router_nat_schema.NewSet(resource_compute_router_nat_schema.HashString, v.([]interface{})) -} - -func flattenNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_router_nat_schema.NewSet(resource_compute_router_nat_schema.HashString, v.([]interface{})) -} - -func flattenNestedComputeRouterNatMinPortsPerVm(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_nat_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNestedComputeRouterNatUdpIdleTimeoutSec(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) { - return 30 - } - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_nat_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - return v -} - -func flattenNestedComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) { - return 30 - } - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_nat_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - return v -} - -func flattenNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) { - return 1200 - } - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_nat_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - return v -} - -func flattenNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_router_nat_reflect.ValueOf(v)) { - return 30 - } - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_nat_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - return v -} - -func flattenNestedComputeRouterNatLogConfig(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable"] = - flattenNestedComputeRouterNatLogConfigEnable(original["enable"], d, config) - transformed["filter"] = - flattenNestedComputeRouterNatLogConfigFilter(original["filter"], d, config) - return []interface{}{transformed} -} - -func flattenNestedComputeRouterNatLogConfigEnable(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterNatLogConfigFilter(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterNatEnableEndpointIndependentMapping(v interface{}, d *resource_compute_router_nat_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedComputeRouterNatName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatNatIpAllocateOption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatNatIps(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_router_nat_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_router_nat_fmt.Errorf("Invalid value for nat_ips: nil") - } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_router_nat_fmt.Errorf("Invalid value for nat_ips: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandNestedComputeRouterNatDrainNatIps(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_router_nat_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_router_nat_fmt.Errorf("Invalid value for drain_nat_ips: nil") - } - f, err := parseRegionalFieldValue("addresses", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_router_nat_fmt.Errorf("Invalid value for drain_nat_ips: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandNestedComputeRouterNatSourceSubnetworkIpRangesToNat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatSubnetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_router_nat_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandNestedComputeRouterNatSubnetworkName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_nat_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedSourceIpRangesToNat, err := expandNestedComputeRouterNatSubnetworkSourceIpRangesToNat(original["source_ip_ranges_to_nat"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_nat_reflect.ValueOf(transformedSourceIpRangesToNat); val.IsValid() && !isEmptyValue(val) { - transformed["sourceIpRangesToNat"] = transformedSourceIpRangesToNat - } - - transformedSecondaryIpRangeNames, err := expandNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(original["secondary_ip_range_names"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_nat_reflect.ValueOf(transformedSecondaryIpRangeNames); val.IsValid() && !isEmptyValue(val) { - transformed["secondaryIpRangeNames"] = transformedSecondaryIpRangeNames - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNestedComputeRouterNatSubnetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_router_nat_fmt.Errorf("Invalid value for name: %s", err) - } - return f.RelativeLink(), nil -} - -func expandNestedComputeRouterNatSubnetworkSourceIpRangesToNat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_router_nat_schema.Set).List() - return v, nil -} - -func expandNestedComputeRouterNatSubnetworkSecondaryIpRangeNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_router_nat_schema.Set).List() - return v, nil -} - -func expandNestedComputeRouterNatMinPortsPerVm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatUdpIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatIcmpIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatTcpEstablishedIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatTcpTransitoryIdleTimeoutSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnable, err := expandNestedComputeRouterNatLogConfigEnable(original["enable"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_nat_reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { - transformed["enable"] = transformedEnable - } - - transformedFilter, err := expandNestedComputeRouterNatLogConfigFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_nat_reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - return transformed, nil -} - -func expandNestedComputeRouterNatLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatLogConfigFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterNatEnableEndpointIndependentMapping(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedComputeRouterNat(d *resource_compute_router_nat_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["nats"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_router_nat_fmt.Errorf("expected list or map for value nats. Actual value: %v", v) - } - - _, item, err := resourceComputeRouterNatFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeRouterNatFindNestedObjectInList(d *resource_compute_router_nat_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRouterNatName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeRouterNatName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedComputeRouterNatName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_router_nat_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_router_nat_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_router_nat_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_router_nat_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_router_nat_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeRouterNatPatchCreateEncoder(d *resource_compute_router_nat_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeRouterNatListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceComputeRouterNatFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - if found != nil { - return nil, resource_compute_router_nat_fmt.Errorf("Unable to create RouterNat, existing object already found: %+v", found) - } - - res := map[string]interface{}{ - "nats": append(currItems, obj), - } - - return res, nil -} - -func resourceComputeRouterNatPatchUpdateEncoder(d *resource_compute_router_nat_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - items, err := resourceComputeRouterNatListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeRouterNatFindNestedObjectInList(d, meta, items) - if err != nil { - return nil, err - } - - if item == nil { - return nil, resource_compute_router_nat_fmt.Errorf("Unable to update RouterNat %q - not found in list", d.Id()) - } - - for k, v := range obj { - item[k] = v - } - items[idx] = item - - res := map[string]interface{}{ - "nats": items, - } - - return res, nil -} - -func resourceComputeRouterNatPatchDeleteEncoder(d *resource_compute_router_nat_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeRouterNatListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeRouterNatFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - - return nil, &resource_compute_router_nat_googleapi.Error{ - Code: 404, - Message: "RouterNat not found in list", - } - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "nats": updatedItems, - } - - return res, nil -} - -func resourceComputeRouterNatListForPatch(d *resource_compute_router_nat_schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - - v, ok = res["nats"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, resource_compute_router_nat_fmt.Errorf(`expected list for nested field "nats"`) - } - return ls, nil - } - return nil, nil -} - -func resourceComputeRouterBgpPeer() *resource_compute_router_peer_schema.Resource { - return &resource_compute_router_peer_schema.Resource{ - Create: resourceComputeRouterBgpPeerCreate, - Read: resourceComputeRouterBgpPeerRead, - Update: resourceComputeRouterBgpPeerUpdate, - Delete: resourceComputeRouterBgpPeerDelete, - - Importer: &resource_compute_router_peer_schema.ResourceImporter{ - State: resourceComputeRouterBgpPeerImport, - }, - - Timeouts: &resource_compute_router_peer_schema.ResourceTimeout{ - Create: resource_compute_router_peer_schema.DefaultTimeout(10 * resource_compute_router_peer_time.Minute), - Update: resource_compute_router_peer_schema.DefaultTimeout(10 * resource_compute_router_peer_time.Minute), - Delete: resource_compute_router_peer_schema.DefaultTimeout(10 * resource_compute_router_peer_time.Minute), - }, - - Schema: map[string]*resource_compute_router_peer_schema.Schema{ - "interface": { - Type: resource_compute_router_peer_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the interface the BGP peer is associated with.`, - }, - "name": { - Type: resource_compute_router_peer_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRFC1035Name(2, 63), - Description: `Name of this BGP peer. The name must be 1-63 characters long, -and comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which -means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "peer_asn": { - Type: resource_compute_router_peer_schema.TypeInt, - Required: true, - Description: `Peer BGP Autonomous System Number (ASN). -Each BGP interface may use a different value.`, - }, - "peer_ip_address": { - Type: resource_compute_router_peer_schema.TypeString, - Required: true, - Description: `IP address of the BGP interface outside Google Cloud Platform. -Only IPv4 is supported.`, - }, - "router": { - Type: resource_compute_router_peer_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Cloud Router in which this BgpPeer will be configured.`, - }, - "advertise_mode": { - Type: resource_compute_router_peer_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_router_peer_validation.StringInSlice([]string{"DEFAULT", "CUSTOM", ""}, false), - Description: `User-specified flag to indicate which mode to use for advertisement. -Valid values of this enum field are: 'DEFAULT', 'CUSTOM' Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, - Default: "DEFAULT", - }, - "advertised_groups": { - Type: resource_compute_router_peer_schema.TypeList, - Optional: true, - Description: `User-specified list of prefix groups to advertise in custom -mode, which can take one of the following options: - -* 'ALL_SUBNETS': Advertises all available subnets, including peer VPC subnets. -* 'ALL_VPC_SUBNETS': Advertises the router's own VPC subnets. -* 'ALL_PEER_VPC_SUBNETS': Advertises peer subnets of the router's VPC network. - - -Note that this field can only be populated if advertiseMode is 'CUSTOM' -and overrides the list defined for the router (in the "bgp" message). -These groups are advertised in addition to any specified prefixes. -Leave this field blank to advertise no custom groups.`, - Elem: &resource_compute_router_peer_schema.Schema{ - Type: resource_compute_router_peer_schema.TypeString, - }, - }, - "advertised_ip_ranges": { - Type: resource_compute_router_peer_schema.TypeList, - Optional: true, - Description: `User-specified list of individual IP ranges to advertise in -custom mode. This field can only be populated if advertiseMode -is 'CUSTOM' and is advertised to all peers of the router. These IP -ranges will be advertised in addition to any specified groups. -Leave this field blank to advertise no custom IP ranges.`, - Elem: &resource_compute_router_peer_schema.Resource{ - Schema: map[string]*resource_compute_router_peer_schema.Schema{ - "range": { - Type: resource_compute_router_peer_schema.TypeString, - Required: true, - Description: `The IP range to advertise. The value must be a -CIDR-formatted string.`, - }, - "description": { - Type: resource_compute_router_peer_schema.TypeString, - Optional: true, - Description: `User-specified description for the IP range.`, - }, - }, - }, - }, - "advertised_route_priority": { - Type: resource_compute_router_peer_schema.TypeInt, - Optional: true, - Description: `The priority of routes advertised to this BGP peer. -Where there is more than one matching route of maximum -length, the routes with the lowest priority value win.`, - }, - "bfd": { - Type: resource_compute_router_peer_schema.TypeList, - Computed: true, - Optional: true, - Description: `BFD configuration for the BGP peering.`, - MaxItems: 1, - Elem: &resource_compute_router_peer_schema.Resource{ - Schema: map[string]*resource_compute_router_peer_schema.Schema{ - "session_initialization_mode": { - Type: resource_compute_router_peer_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_router_peer_validation.StringInSlice([]string{"ACTIVE", "DISABLED", "PASSIVE"}, false), - Description: `The BFD session initialization mode for this BGP peer. -If set to 'ACTIVE', the Cloud Router will initiate the BFD session -for this BGP peer. If set to 'PASSIVE', the Cloud Router will wait -for the peer router to initiate the BFD session for this BGP peer. -If set to 'DISABLED', BFD is disabled for this BGP peer. Possible values: ["ACTIVE", "DISABLED", "PASSIVE"]`, - }, - "min_receive_interval": { - Type: resource_compute_router_peer_schema.TypeInt, - Optional: true, - Description: `The minimum interval, in milliseconds, between BFD control packets -received from the peer router. The actual value is negotiated -between the two routers and is equal to the greater of this value -and the transmit interval of the other router. If set, this value -must be between 1000 and 30000.`, - Default: 1000, - }, - "min_transmit_interval": { - Type: resource_compute_router_peer_schema.TypeInt, - Optional: true, - Description: `The minimum interval, in milliseconds, between BFD control packets -transmitted to the peer router. The actual value is negotiated -between the two routers and is equal to the greater of this value -and the corresponding receive interval of the other router. If set, -this value must be between 1000 and 30000.`, - Default: 1000, - }, - "multiplier": { - Type: resource_compute_router_peer_schema.TypeInt, - Optional: true, - Description: `The number of consecutive BFD packets that must be missed before -BFD declares that a peer is unavailable. If set, the value must -be a value between 5 and 16.`, - Default: 5, - }, - }, - }, - }, - "enable": { - Type: resource_compute_router_peer_schema.TypeBool, - Optional: true, - Description: `The status of the BGP peer connection. If set to false, any active session -with the peer is terminated and all associated routing information is removed. -If set to true, the peer connection can be established with routing information. -The default is true.`, - Default: true, - }, - "ip_address": { - Type: resource_compute_router_peer_schema.TypeString, - Computed: true, - Optional: true, - Description: `IP address of the interface inside Google Cloud Platform. -Only IPv4 is supported.`, - }, - "region": { - Type: resource_compute_router_peer_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Region where the router and BgpPeer reside. -If it is not provided, the provider region is used.`, - }, - "management_type": { - Type: resource_compute_router_peer_schema.TypeString, - Computed: true, - Description: `The resource that configures and manages this BGP peer. - -* 'MANAGED_BY_USER' is the default value and can be managed by -you or other users -* 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and -managed by Cloud Interconnect, specifically by an -InterconnectAttachment of type PARTNER. Google automatically -creates, updates, and deletes this type of BGP peer when the -PARTNER InterconnectAttachment is created, updated, -or deleted.`, - }, - "project": { - Type: resource_compute_router_peer_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeRouterBgpPeerCreate(d *resource_compute_router_peer_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(nameProp)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - interfaceNameProp, err := expandNestedComputeRouterBgpPeerInterface(d.Get("interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("interface"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(interfaceNameProp)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, interfaceNameProp)) { - obj["interfaceName"] = interfaceNameProp - } - ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(ipAddressProp)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_ip_address"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(peerIpAddressProp)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, peerIpAddressProp)) { - obj["peerIpAddress"] = peerIpAddressProp - } - peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_asn"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(peerAsnProp)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, peerAsnProp)) { - obj["peerAsn"] = peerAsnProp - } - advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertisedRoutePriorityProp) { - obj["advertisedRoutePriority"] = advertisedRoutePriorityProp - } - advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertise_mode"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(advertiseModeProp)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertiseModeProp)) { - obj["advertiseMode"] = advertiseModeProp - } - advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_groups"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertisedGroupsProp) { - obj["advertisedGroups"] = advertisedGroupsProp - } - advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertisedIpRangesProp) { - obj["advertisedIpRanges"] = advertisedIpRangesProp - } - bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bfd"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(bfdProp)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, bfdProp)) { - obj["bfd"] = bfdProp - } - enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, enableProp) { - obj["enable"] = enableProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - resource_compute_router_peer_log.Printf("[DEBUG] Creating new RouterBgpPeer: %#v", obj) - - obj, err = resourceComputeRouterBgpPeerPatchCreateEncoder(d, meta, obj) - if err != nil { - return err - } - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_peer_fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_peer_schema.TimeoutCreate)) - if err != nil { - return resource_compute_router_peer_fmt.Errorf("Error creating RouterBgpPeer: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") - if err != nil { - return resource_compute_router_peer_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating RouterBgpPeer", userAgent, - d.Timeout(resource_compute_router_peer_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_router_peer_fmt.Errorf("Error waiting to create RouterBgpPeer: %s", err) - } - - resource_compute_router_peer_log.Printf("[DEBUG] Finished creating RouterBgpPeer %q: %#v", d.Id(), res) - - return resourceComputeRouterBgpPeerRead(d, meta) -} - -func resourceComputeRouterBgpPeerRead(d *resource_compute_router_peer_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_peer_fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_router_peer_fmt.Sprintf("ComputeRouterBgpPeer %q", d.Id())) - } - - res, err = flattenNestedComputeRouterBgpPeer(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_router_peer_log.Printf("[DEBUG] Removing ComputeRouterBgpPeer because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - - if err := d.Set("name", flattenNestedComputeRouterBgpPeerName(res["name"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("interface", flattenNestedComputeRouterBgpPeerInterface(res["interfaceName"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("ip_address", flattenNestedComputeRouterBgpPeerIpAddress(res["ipAddress"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("peer_ip_address", flattenNestedComputeRouterBgpPeerPeerIpAddress(res["peerIpAddress"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("peer_asn", flattenNestedComputeRouterBgpPeerPeerAsn(res["peerAsn"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertised_route_priority", flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(res["advertisedRoutePriority"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertise_mode", flattenNestedComputeRouterBgpPeerAdvertiseMode(res["advertiseMode"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertised_groups", flattenNestedComputeRouterBgpPeerAdvertisedGroups(res["advertisedGroups"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("advertised_ip_ranges", flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(res["advertisedIpRanges"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("management_type", flattenNestedComputeRouterBgpPeerManagementType(res["managementType"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("bfd", flattenNestedComputeRouterBgpPeerBfd(res["bfd"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - if err := d.Set("enable", flattenNestedComputeRouterBgpPeerEnable(res["enable"], d, config)); err != nil { - return resource_compute_router_peer_fmt.Errorf("Error reading RouterBgpPeer: %s", err) - } - - return nil -} - -func resourceComputeRouterBgpPeerUpdate(d *resource_compute_router_peer_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_peer_fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_address"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(v)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, ipAddressProp)) { - obj["ipAddress"] = ipAddressProp - } - peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_ip_address"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(v)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, peerIpAddressProp)) { - obj["peerIpAddress"] = peerIpAddressProp - } - peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_asn"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(v)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, peerAsnProp)) { - obj["peerAsn"] = peerAsnProp - } - advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertisedRoutePriorityProp) { - obj["advertisedRoutePriority"] = advertisedRoutePriorityProp - } - advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertise_mode"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(v)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertiseModeProp)) { - obj["advertiseMode"] = advertiseModeProp - } - advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_groups"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertisedGroupsProp) { - obj["advertisedGroups"] = advertisedGroupsProp - } - advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, advertisedIpRangesProp) { - obj["advertisedIpRanges"] = advertisedIpRangesProp - } - bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bfd"); !isEmptyValue(resource_compute_router_peer_reflect.ValueOf(v)) && (ok || !resource_compute_router_peer_reflect.DeepEqual(v, bfdProp)) { - obj["bfd"] = bfdProp - } - enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable"); ok || !resource_compute_router_peer_reflect.DeepEqual(v, enableProp) { - obj["enable"] = enableProp - } - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - resource_compute_router_peer_log.Printf("[DEBUG] Updating RouterBgpPeer %q: %#v", d.Id(), obj) - - obj, err = resourceComputeRouterBgpPeerPatchUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_peer_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_router_peer_fmt.Errorf("Error updating RouterBgpPeer %q: %s", d.Id(), err) - } else { - resource_compute_router_peer_log.Printf("[DEBUG] Finished updating RouterBgpPeer %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating RouterBgpPeer", userAgent, - d.Timeout(resource_compute_router_peer_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeRouterBgpPeerRead(d, meta) -} - -func resourceComputeRouterBgpPeerDelete(d *resource_compute_router_peer_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_router_peer_fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "router/{{region}}/{{router}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - obj, err = resourceComputeRouterBgpPeerPatchDeleteEncoder(d, meta, obj) - if err != nil { - return handleNotFoundError(err, d, "RouterBgpPeer") - } - resource_compute_router_peer_log.Printf("[DEBUG] Deleting RouterBgpPeer %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_router_peer_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "RouterBgpPeer") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting RouterBgpPeer", userAgent, - d.Timeout(resource_compute_router_peer_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_router_peer_log.Printf("[DEBUG] Finished deleting RouterBgpPeer %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeRouterBgpPeerImport(d *resource_compute_router_peer_schema.ResourceData, meta interface{}) ([]*resource_compute_router_peer_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") - if err != nil { - return nil, resource_compute_router_peer_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_router_peer_schema.ResourceData{d}, nil -} - -func flattenNestedComputeRouterBgpPeerName(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerInterface(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerIpAddress(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerPeerAsn(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_peer_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_peer_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_router_peer_reflect.ValueOf(v)) { - return "DEFAULT" - } - - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "range": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config), - "description": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config), - }) - } - return transformed -} - -func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerManagementType(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerBfd(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["session_initialization_mode"] = - flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["sessionInitializationMode"], d, config) - transformed["min_transmit_interval"] = - flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["minTransmitInterval"], d, config) - transformed["min_receive_interval"] = - flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["minReceiveInterval"], d, config) - transformed["multiplier"] = - flattenNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) - return []interface{}{transformed} -} - -func flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_peer_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_peer_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_router_peer_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNestedComputeRouterBgpPeerEnable(v interface{}, d *resource_compute_router_peer_schema.ResourceData, config *Config) interface{} { - if v == nil { - return true - } - b, err := resource_compute_router_peer_strconv.ParseBool(v.(string)) - if err != nil { - - return v - } - return b -} - -func expandNestedComputeRouterBgpPeerName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerPeerAsn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRange, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_peer_reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { - transformed["range"] = transformedRange - } - - transformedDescription, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config) - if err != nil { - return nil, err - } else { - transformed["description"] = transformedDescription - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSessionInitializationMode, err := expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["session_initialization_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_peer_reflect.ValueOf(transformedSessionInitializationMode); val.IsValid() && !isEmptyValue(val) { - transformed["sessionInitializationMode"] = transformedSessionInitializationMode - } - - transformedMinTransmitInterval, err := expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["min_transmit_interval"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_peer_reflect.ValueOf(transformedMinTransmitInterval); val.IsValid() && !isEmptyValue(val) { - transformed["minTransmitInterval"] = transformedMinTransmitInterval - } - - transformedMinReceiveInterval, err := expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["min_receive_interval"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_peer_reflect.ValueOf(transformedMinReceiveInterval); val.IsValid() && !isEmptyValue(val) { - transformed["minReceiveInterval"] = transformedMinReceiveInterval - } - - transformedMultiplier, err := expandNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_router_peer_reflect.ValueOf(transformedMultiplier); val.IsValid() && !isEmptyValue(val) { - transformed["multiplier"] = transformedMultiplier - } - - return transformed, nil -} - -func expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedComputeRouterBgpPeerEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return resource_compute_router_peer_strings.ToUpper(resource_compute_router_peer_strconv.FormatBool(v.(bool))), nil -} - -func flattenNestedComputeRouterBgpPeer(d *resource_compute_router_peer_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["bgpPeers"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_compute_router_peer_fmt.Errorf("expected list or map for value bgpPeers. Actual value: %v", v) - } - - _, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceComputeRouterBgpPeerFindNestedObjectInList(d *resource_compute_router_peer_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, meta.(*Config)) - if err != nil { - return -1, nil, err - } - expectedFlattenedName := flattenNestedComputeRouterBgpPeerName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedComputeRouterBgpPeerName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_compute_router_peer_reflect.ValueOf(itemName)) && isEmptyValue(resource_compute_router_peer_reflect.ValueOf(expectedFlattenedName))) && !resource_compute_router_peer_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_compute_router_peer_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_compute_router_peer_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceComputeRouterBgpPeerPatchCreateEncoder(d *resource_compute_router_peer_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) - if err != nil { - return nil, err - } - - _, found, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - - if found != nil { - return nil, resource_compute_router_peer_fmt.Errorf("Unable to create RouterBgpPeer, existing object already found: %+v", found) - } - - res := map[string]interface{}{ - "bgpPeers": append(currItems, obj), - } - - return res, nil -} - -func resourceComputeRouterBgpPeerPatchUpdateEncoder(d *resource_compute_router_peer_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - items, err := resourceComputeRouterBgpPeerListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, items) - if err != nil { - return nil, err - } - - if item == nil { - return nil, resource_compute_router_peer_fmt.Errorf("Unable to update RouterBgpPeer %q - not found in list", d.Id()) - } - - for k, v := range obj { - item[k] = v - } - items[idx] = item - - res := map[string]interface{}{ - "bgpPeers": items, - } - - return res, nil -} - -func resourceComputeRouterBgpPeerPatchDeleteEncoder(d *resource_compute_router_peer_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) - if err != nil { - return nil, err - } - - idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) - if err != nil { - return nil, err - } - if item == nil { - - return nil, &resource_compute_router_peer_googleapi.Error{ - Code: 404, - Message: "RouterBgpPeer not found in list", - } - } - - updatedItems := append(currItems[:idx], currItems[idx+1:]...) - res := map[string]interface{}{ - "bgpPeers": updatedItems, - } - - return res, nil -} - -func resourceComputeRouterBgpPeerListForPatch(d *resource_compute_router_peer_schema.ResourceData, meta interface{}) ([]interface{}, error) { - config := meta.(*Config) - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") - if err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return nil, err - } - - var v interface{} - var ok bool - - v, ok = res["bgpPeers"] - if ok && v != nil { - ls, lsOk := v.([]interface{}) - if !lsOk { - return nil, resource_compute_router_peer_fmt.Errorf(`expected list for nested field "bgpPeers"`) - } - return ls, nil - } - return nil, nil -} - -func resourceComputeSecurityPolicy() *resource_compute_security_policy_schema.Resource { - return &resource_compute_security_policy_schema.Resource{ - Create: resourceComputeSecurityPolicyCreate, - Read: resourceComputeSecurityPolicyRead, - Update: resourceComputeSecurityPolicyUpdate, - Delete: resourceComputeSecurityPolicyDelete, - Importer: &resource_compute_security_policy_schema.ResourceImporter{ - State: resourceSecurityPolicyStateImporter, - }, - CustomizeDiff: rulesCustomizeDiff, - - Timeouts: &resource_compute_security_policy_schema.ResourceTimeout{ - Create: resource_compute_security_policy_schema.DefaultTimeout(4 * resource_compute_security_policy_time.Minute), - Update: resource_compute_security_policy_schema.DefaultTimeout(4 * resource_compute_security_policy_time.Minute), - Delete: resource_compute_security_policy_schema.DefaultTimeout(4 * resource_compute_security_policy_time.Minute), - }, - - Schema: map[string]*resource_compute_security_policy_schema.Schema{ - "name": { - Type: resource_compute_security_policy_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `The name of the security policy.`, - }, - - "description": { - Type: resource_compute_security_policy_schema.TypeString, - Optional: true, - Description: `An optional description of this security policy. Max size is 2048.`, - }, - - "project": { - Type: resource_compute_security_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "rule": { - Type: resource_compute_security_policy_schema.TypeSet, - Optional: true, - Computed: true, - Elem: &resource_compute_security_policy_schema.Resource{ - Schema: map[string]*resource_compute_security_policy_schema.Schema{ - "action": { - Type: resource_compute_security_policy_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_security_policy_validation.StringInSlice([]string{"allow", "deny(403)", "deny(404)", "deny(502)"}, false), - Description: `Action to take when match matches the request. Valid values: "allow" : allow access to target, "deny(status)" : deny access to target, returns the HTTP response code specified (valid values are 403, 404 and 502)`, - }, - - "priority": { - Type: resource_compute_security_policy_schema.TypeInt, - Required: true, - Description: `An unique positive integer indicating the priority of evaluation for a rule. Rules are evaluated from highest priority (lowest numerically) to lowest priority (highest numerically) in order.`, - }, - - "match": { - Type: resource_compute_security_policy_schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &resource_compute_security_policy_schema.Resource{ - Schema: map[string]*resource_compute_security_policy_schema.Schema{ - "config": { - Type: resource_compute_security_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_compute_security_policy_schema.Resource{ - Schema: map[string]*resource_compute_security_policy_schema.Schema{ - "src_ip_ranges": { - Type: resource_compute_security_policy_schema.TypeSet, - Required: true, - MinItems: 1, - MaxItems: 10, - Elem: &resource_compute_security_policy_schema.Schema{Type: resource_compute_security_policy_schema.TypeString}, - Description: `Set of IP addresses or ranges (IPV4 or IPV6) in CIDR notation to match against inbound traffic. There is a limit of 10 IP ranges per rule. A value of '*' matches all IPs (can be used to override the default behavior).`, - }, - }, - }, - Description: `The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified.`, - }, - - "versioned_expr": { - Type: resource_compute_security_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_security_policy_validation.StringInSlice([]string{"SRC_IPS_V1"}, false), - Description: `Predefined rule expression. If this field is specified, config must also be specified. Available options: SRC_IPS_V1: Must specify the corresponding src_ip_ranges field in config.`, - }, - - "expr": { - Type: resource_compute_security_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_compute_security_policy_schema.Resource{ - Schema: map[string]*resource_compute_security_policy_schema.Schema{ - "expression": { - Type: resource_compute_security_policy_schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.`, - }, - }, - }, - Description: `User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header.`, - }, - }, - }, - Description: `A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding action is enforced.`, - }, - - "description": { - Type: resource_compute_security_policy_schema.TypeString, - Optional: true, - Description: `An optional description of this rule. Max size is 64.`, - }, - - "preview": { - Type: resource_compute_security_policy_schema.TypeBool, - Optional: true, - Computed: true, - Description: `When set to true, the action specified above is not enforced. Stackdriver logs for requests that trigger a preview action are annotated as such.`, - }, - }, - }, - Description: `The set of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match "*"). If no rules are provided when creating a security policy, a default rule with action "allow" will be added.`, - }, - - "fingerprint": { - Type: resource_compute_security_policy_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource.`, - }, - - "self_link": { - Type: resource_compute_security_policy_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - }, - - UseJSONNumber: true, - } -} - -func rulesCustomizeDiff(_ resource_compute_security_policy_context.Context, diff *resource_compute_security_policy_schema.ResourceDiff, _ interface{}) error { - _, n := diff.GetChange("rule") - nSet := n.(*resource_compute_security_policy_schema.Set) - - nPriorities := map[int64]bool{} - for _, rule := range nSet.List() { - priority := int64(rule.(map[string]interface{})["priority"].(int)) - if nPriorities[priority] { - return resource_compute_security_policy_fmt.Errorf("Two rules have the same priority, please update one of the priorities to be different.") - } - nPriorities[priority] = true - } - - return nil -} - -func resourceComputeSecurityPolicyCreate(d *resource_compute_security_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - sp := d.Get("name").(string) - securityPolicy := &resource_compute_security_policy_compute.SecurityPolicy{ - Name: sp, - Description: d.Get("description").(string), - } - if v, ok := d.GetOk("rule"); ok { - securityPolicy.Rules = expandSecurityPolicyRules(v.(*resource_compute_security_policy_schema.Set).List()) - } - - resource_compute_security_policy_log.Printf("[DEBUG] SecurityPolicy insert request: %#v", securityPolicy) - - client := config.NewComputeClient(userAgent) - - op, err := client.SecurityPolicies.Insert(project, securityPolicy).Do() - - if err != nil { - return resource_compute_security_policy_errwrap.Wrapf("Error creating SecurityPolicy: {{err}}", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") - if err != nil { - return resource_compute_security_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime(config, op, project, resource_compute_security_policy_fmt.Sprintf("Creating SecurityPolicy %q", sp), userAgent, d.Timeout(resource_compute_security_policy_schema.TimeoutCreate)) - if err != nil { - return err - } - - return resourceComputeSecurityPolicyRead(d, meta) -} - -func resourceComputeSecurityPolicyRead(d *resource_compute_security_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - sp := d.Get("name").(string) - - client := config.NewComputeClient(userAgent) - - securityPolicy, err := client.SecurityPolicies.Get(project, sp).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_security_policy_fmt.Sprintf("SecurityPolicy %q", d.Id())) - } - - if err := d.Set("name", securityPolicy.Name); err != nil { - return resource_compute_security_policy_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", securityPolicy.Description); err != nil { - return resource_compute_security_policy_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("rule", flattenSecurityPolicyRules(securityPolicy.Rules)); err != nil { - return err - } - if err := d.Set("fingerprint", securityPolicy.Fingerprint); err != nil { - return resource_compute_security_policy_fmt.Errorf("Error setting fingerprint: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_compute_security_policy_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(securityPolicy.SelfLink)); err != nil { - return resource_compute_security_policy_fmt.Errorf("Error setting self_link: %s", err) - } - - return nil -} - -func resourceComputeSecurityPolicyUpdate(d *resource_compute_security_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - sp := d.Get("name").(string) - - if d.HasChange("description") { - securityPolicy := &resource_compute_security_policy_compute.SecurityPolicy{ - Description: d.Get("description").(string), - Fingerprint: d.Get("fingerprint").(string), - ForceSendFields: []string{"Description"}, - } - - client := config.NewComputeClient(userAgent) - - op, err := client.SecurityPolicies.Patch(project, sp, securityPolicy).Do() - - if err != nil { - return resource_compute_security_policy_errwrap.Wrapf(resource_compute_security_policy_fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) - } - - err = computeOperationWaitTime(config, op, project, resource_compute_security_policy_fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(resource_compute_security_policy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - if d.HasChange("rule") { - o, n := d.GetChange("rule") - oSet := o.(*resource_compute_security_policy_schema.Set) - nSet := n.(*resource_compute_security_policy_schema.Set) - - oPriorities := map[int64]bool{} - nPriorities := map[int64]bool{} - for _, rule := range oSet.List() { - oPriorities[int64(rule.(map[string]interface{})["priority"].(int))] = true - } - - for _, rule := range nSet.List() { - priority := int64(rule.(map[string]interface{})["priority"].(int)) - nPriorities[priority] = true - if !oPriorities[priority] { - client := config.NewComputeClient(userAgent) - - op, err := client.SecurityPolicies.AddRule(project, sp, expandSecurityPolicyRule(rule)).Do() - - if err != nil { - return resource_compute_security_policy_errwrap.Wrapf(resource_compute_security_policy_fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) - } - - err = computeOperationWaitTime(config, op, project, resource_compute_security_policy_fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(resource_compute_security_policy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } else if !oSet.Contains(rule) { - client := config.NewComputeClient(userAgent) - - op, err := client.SecurityPolicies.PatchRule(project, sp, expandSecurityPolicyRule(rule)).Priority(priority).Do() - - if err != nil { - return resource_compute_security_policy_errwrap.Wrapf(resource_compute_security_policy_fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) - } - - err = computeOperationWaitTime(config, op, project, resource_compute_security_policy_fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(resource_compute_security_policy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - } - - for _, rule := range oSet.List() { - priority := int64(rule.(map[string]interface{})["priority"].(int)) - if !nPriorities[priority] { - client := config.NewComputeClient(userAgent) - - op, err := client.SecurityPolicies.RemoveRule(project, sp).Priority(priority).Do() - - if err != nil { - return resource_compute_security_policy_errwrap.Wrapf(resource_compute_security_policy_fmt.Sprintf("Error updating SecurityPolicy %q: {{err}}", sp), err) - } - - err = computeOperationWaitTime(config, op, project, resource_compute_security_policy_fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(resource_compute_security_policy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - } - } - - return resourceComputeSecurityPolicyRead(d, meta) -} - -func resourceComputeSecurityPolicyDelete(d *resource_compute_security_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - client := config.NewComputeClient(userAgent) - - op, err := client.SecurityPolicies.Delete(project, d.Get("name").(string)).Do() - if err != nil { - return resource_compute_security_policy_errwrap.Wrapf("Error deleting SecurityPolicy: {{err}}", err) - } - - err = computeOperationWaitTime(config, op, project, "Deleting SecurityPolicy", userAgent, d.Timeout(resource_compute_security_policy_schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func expandSecurityPolicyRules(configured []interface{}) []*resource_compute_security_policy_compute.SecurityPolicyRule { - rules := make([]*resource_compute_security_policy_compute.SecurityPolicyRule, 0, len(configured)) - for _, raw := range configured { - rules = append(rules, expandSecurityPolicyRule(raw)) - } - return rules -} - -func expandSecurityPolicyRule(raw interface{}) *resource_compute_security_policy_compute.SecurityPolicyRule { - data := raw.(map[string]interface{}) - return &resource_compute_security_policy_compute.SecurityPolicyRule{ - Description: data["description"].(string), - Priority: int64(data["priority"].(int)), - Action: data["action"].(string), - Preview: data["preview"].(bool), - Match: expandSecurityPolicyMatch(data["match"].([]interface{})), - ForceSendFields: []string{"Description", "Preview"}, - } -} - -func expandSecurityPolicyMatch(configured []interface{}) *resource_compute_security_policy_compute.SecurityPolicyRuleMatcher { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - data := configured[0].(map[string]interface{}) - return &resource_compute_security_policy_compute.SecurityPolicyRuleMatcher{ - VersionedExpr: data["versioned_expr"].(string), - Config: expandSecurityPolicyMatchConfig(data["config"].([]interface{})), - Expr: expandSecurityPolicyMatchExpr(data["expr"].([]interface{})), - } -} - -func expandSecurityPolicyMatchConfig(configured []interface{}) *resource_compute_security_policy_compute.SecurityPolicyRuleMatcherConfig { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - data := configured[0].(map[string]interface{}) - return &resource_compute_security_policy_compute.SecurityPolicyRuleMatcherConfig{ - SrcIpRanges: convertStringArr(data["src_ip_ranges"].(*resource_compute_security_policy_schema.Set).List()), - } -} - -func expandSecurityPolicyMatchExpr(expr []interface{}) *resource_compute_security_policy_compute.Expr { - if len(expr) == 0 || expr[0] == nil { - return nil - } - - data := expr[0].(map[string]interface{}) - return &resource_compute_security_policy_compute.Expr{ - Expression: data["expression"].(string), - } -} - -func flattenSecurityPolicyRules(rules []*resource_compute_security_policy_compute.SecurityPolicyRule) []map[string]interface{} { - rulesSchema := make([]map[string]interface{}, 0, len(rules)) - for _, rule := range rules { - data := map[string]interface{}{ - "description": rule.Description, - "priority": rule.Priority, - "action": rule.Action, - "preview": rule.Preview, - "match": flattenMatch(rule.Match), - } - - rulesSchema = append(rulesSchema, data) - } - return rulesSchema -} - -func flattenMatch(match *resource_compute_security_policy_compute.SecurityPolicyRuleMatcher) []map[string]interface{} { - if match == nil { - return nil - } - - data := map[string]interface{}{ - "versioned_expr": match.VersionedExpr, - "config": flattenMatchConfig(match.Config), - "expr": flattenMatchExpr(match), - } - - return []map[string]interface{}{data} -} - -func flattenMatchConfig(conf *resource_compute_security_policy_compute.SecurityPolicyRuleMatcherConfig) []map[string]interface{} { - if conf == nil { - return nil - } - - data := map[string]interface{}{ - "src_ip_ranges": resource_compute_security_policy_schema.NewSet(resource_compute_security_policy_schema.HashString, convertStringArrToInterface(conf.SrcIpRanges)), - } - - return []map[string]interface{}{data} -} - -func flattenMatchExpr(match *resource_compute_security_policy_compute.SecurityPolicyRuleMatcher) []map[string]interface{} { - if match.Expr == nil { - return nil - } - - data := map[string]interface{}{ - "expression": match.Expr.Expression, - } - - return []map[string]interface{}{data} -} - -func resourceSecurityPolicyStateImporter(d *resource_compute_security_policy_schema.ResourceData, meta interface{}) ([]*resource_compute_security_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{"projects/(?P[^/]+)/global/securityPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/securityPolicies/{{name}}") - if err != nil { - return nil, resource_compute_security_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_security_policy_schema.ResourceData{d}, nil -} - -func resourceComputeServiceAttachment() *resource_compute_service_attachment_schema.Resource { - return &resource_compute_service_attachment_schema.Resource{ - Create: resourceComputeServiceAttachmentCreate, - Read: resourceComputeServiceAttachmentRead, - Update: resourceComputeServiceAttachmentUpdate, - Delete: resourceComputeServiceAttachmentDelete, - - Importer: &resource_compute_service_attachment_schema.ResourceImporter{ - State: resourceComputeServiceAttachmentImport, - }, - - Timeouts: &resource_compute_service_attachment_schema.ResourceTimeout{ - Create: resource_compute_service_attachment_schema.DefaultTimeout(4 * resource_compute_service_attachment_time.Minute), - Update: resource_compute_service_attachment_schema.DefaultTimeout(4 * resource_compute_service_attachment_time.Minute), - Delete: resource_compute_service_attachment_schema.DefaultTimeout(4 * resource_compute_service_attachment_time.Minute), - }, - - Schema: map[string]*resource_compute_service_attachment_schema.Schema{ - "connection_preference": { - Type: resource_compute_service_attachment_schema.TypeString, - Required: true, - Description: `The connection preference to use for this service attachment. Valid -values include "ACCEPT_AUTOMATIC", "ACCEPT_MANUAL".`, - }, - "enable_proxy_protocol": { - Type: resource_compute_service_attachment_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `If true, enable the proxy protocol which is for supplying client TCP/IP -address data in TCP connections that traverse proxies on their way to -destination servers.`, - }, - "name": { - Type: resource_compute_service_attachment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' -which means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "nat_subnets": { - Type: resource_compute_service_attachment_schema.TypeList, - Required: true, - Description: `An array of subnets that is provided for NAT in this service attachment.`, - Elem: &resource_compute_service_attachment_schema.Schema{ - Type: resource_compute_service_attachment_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "target_service": { - Type: resource_compute_service_attachment_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The URL of a forwarding rule that represents the service identified by -this service attachment.`, - }, - "consumer_accept_lists": { - Type: resource_compute_service_attachment_schema.TypeList, - Optional: true, - Description: `An array of projects that are allowed to connect to this service -attachment.`, - Elem: &resource_compute_service_attachment_schema.Resource{ - Schema: map[string]*resource_compute_service_attachment_schema.Schema{ - "connection_limit": { - Type: resource_compute_service_attachment_schema.TypeInt, - Required: true, - Description: `The number of consumer forwarding rules the consumer project can -create.`, - }, - "project_id_or_num": { - Type: resource_compute_service_attachment_schema.TypeString, - Required: true, - Description: `A project that is allowed to connect to this service attachment.`, - }, - }, - }, - }, - "consumer_reject_lists": { - Type: resource_compute_service_attachment_schema.TypeList, - Optional: true, - Description: `An array of projects that are not allowed to connect to this service -attachment.`, - Elem: &resource_compute_service_attachment_schema.Schema{ - Type: resource_compute_service_attachment_schema.TypeString, - }, - }, - "description": { - Type: resource_compute_service_attachment_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: resource_compute_service_attachment_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the region where the resource resides.`, - }, - "connected_endpoints": { - Type: resource_compute_service_attachment_schema.TypeList, - Computed: true, - Description: `An array of the consumer forwarding rules connected to this service -attachment.`, - Elem: &resource_compute_service_attachment_schema.Resource{ - Schema: map[string]*resource_compute_service_attachment_schema.Schema{ - "endpoint": { - Type: resource_compute_service_attachment_schema.TypeString, - Computed: true, - Description: `The URL of the consumer forwarding rule.`, - }, - "status": { - Type: resource_compute_service_attachment_schema.TypeString, - Computed: true, - Description: `The status of the connection from the consumer forwarding rule to -this service attachment.`, - }, - }, - }, - }, - "fingerprint": { - Type: resource_compute_service_attachment_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. This field is used internally during -updates of this resource.`, - }, - "project": { - Type: resource_compute_service_attachment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_service_attachment_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeServiceAttachmentCreate(d *resource_compute_service_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeServiceAttachmentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(nameProp)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeServiceAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeServiceAttachmentFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(fingerprintProp)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - connectionPreferenceProp, err := expandComputeServiceAttachmentConnectionPreference(d.Get("connection_preference"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connection_preference"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(connectionPreferenceProp)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, connectionPreferenceProp)) { - obj["connectionPreference"] = connectionPreferenceProp - } - targetServiceProp, err := expandComputeServiceAttachmentTargetService(d.Get("target_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_service"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(targetServiceProp)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, targetServiceProp)) { - obj["targetService"] = targetServiceProp - } - natSubnetsProp, err := expandComputeServiceAttachmentNatSubnets(d.Get("nat_subnets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_subnets"); ok || !resource_compute_service_attachment_reflect.DeepEqual(v, natSubnetsProp) { - obj["natSubnets"] = natSubnetsProp - } - enableProxyProtocolProp, err := expandComputeServiceAttachmentEnableProxyProtocol(d.Get("enable_proxy_protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_proxy_protocol"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(enableProxyProtocolProp)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, enableProxyProtocolProp)) { - obj["enableProxyProtocol"] = enableProxyProtocolProp - } - consumerRejectListsProp, err := expandComputeServiceAttachmentConsumerRejectLists(d.Get("consumer_reject_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_reject_lists"); ok || !resource_compute_service_attachment_reflect.DeepEqual(v, consumerRejectListsProp) { - obj["consumerRejectLists"] = consumerRejectListsProp - } - consumerAcceptListsProp, err := expandComputeServiceAttachmentConsumerAcceptLists(d.Get("consumer_accept_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_accept_lists"); ok || !resource_compute_service_attachment_reflect.DeepEqual(v, consumerAcceptListsProp) { - obj["consumerAcceptLists"] = consumerAcceptListsProp - } - regionProp, err := expandComputeServiceAttachmentRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(regionProp)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments") - if err != nil { - return err - } - - resource_compute_service_attachment_log.Printf("[DEBUG] Creating new ServiceAttachment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_service_attachment_schema.TimeoutCreate)) - if err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error creating ServiceAttachment: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating ServiceAttachment", userAgent, - d.Timeout(resource_compute_service_attachment_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_service_attachment_fmt.Errorf("Error waiting to create ServiceAttachment: %s", err) - } - - resource_compute_service_attachment_log.Printf("[DEBUG] Finished creating ServiceAttachment %q: %#v", d.Id(), res) - - return resourceComputeServiceAttachmentRead(d, meta) -} - -func resourceComputeServiceAttachmentRead(d *resource_compute_service_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_service_attachment_fmt.Sprintf("ComputeServiceAttachment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - - if err := d.Set("name", flattenComputeServiceAttachmentName(res["name"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("description", flattenComputeServiceAttachmentDescription(res["description"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("fingerprint", flattenComputeServiceAttachmentFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("connection_preference", flattenComputeServiceAttachmentConnectionPreference(res["connectionPreference"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("connected_endpoints", flattenComputeServiceAttachmentConnectedEndpoints(res["connectedEndpoints"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("target_service", flattenComputeServiceAttachmentTargetService(res["targetService"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("nat_subnets", flattenComputeServiceAttachmentNatSubnets(res["natSubnets"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("enable_proxy_protocol", flattenComputeServiceAttachmentEnableProxyProtocol(res["enableProxyProtocol"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("consumer_reject_lists", flattenComputeServiceAttachmentConsumerRejectLists(res["consumerRejectLists"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("consumer_accept_lists", flattenComputeServiceAttachmentConsumerAcceptLists(res["consumerAcceptLists"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("region", flattenComputeServiceAttachmentRegion(res["region"], d, config)); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error reading ServiceAttachment: %s", err) - } - - return nil -} - -func resourceComputeServiceAttachmentUpdate(d *resource_compute_service_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeServiceAttachmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeServiceAttachmentFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - connectionPreferenceProp, err := expandComputeServiceAttachmentConnectionPreference(d.Get("connection_preference"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connection_preference"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, connectionPreferenceProp)) { - obj["connectionPreference"] = connectionPreferenceProp - } - natSubnetsProp, err := expandComputeServiceAttachmentNatSubnets(d.Get("nat_subnets"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_subnets"); ok || !resource_compute_service_attachment_reflect.DeepEqual(v, natSubnetsProp) { - obj["natSubnets"] = natSubnetsProp - } - consumerRejectListsProp, err := expandComputeServiceAttachmentConsumerRejectLists(d.Get("consumer_reject_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_reject_lists"); ok || !resource_compute_service_attachment_reflect.DeepEqual(v, consumerRejectListsProp) { - obj["consumerRejectLists"] = consumerRejectListsProp - } - consumerAcceptListsProp, err := expandComputeServiceAttachmentConsumerAcceptLists(d.Get("consumer_accept_lists"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("consumer_accept_lists"); ok || !resource_compute_service_attachment_reflect.DeepEqual(v, consumerAcceptListsProp) { - obj["consumerAcceptLists"] = consumerAcceptListsProp - } - - obj, err = resourceComputeServiceAttachmentUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return err - } - - resource_compute_service_attachment_log.Printf("[DEBUG] Updating ServiceAttachment %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_service_attachment_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error updating ServiceAttachment %q: %s", d.Id(), err) - } else { - resource_compute_service_attachment_log.Printf("[DEBUG] Finished updating ServiceAttachment %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating ServiceAttachment", userAgent, - d.Timeout(resource_compute_service_attachment_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeServiceAttachmentRead(d, meta) -} - -func resourceComputeServiceAttachmentDelete(d *resource_compute_service_attachment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_service_attachment_fmt.Errorf("Error fetching project for ServiceAttachment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_service_attachment_log.Printf("[DEBUG] Deleting ServiceAttachment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_service_attachment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ServiceAttachment") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting ServiceAttachment", userAgent, - d.Timeout(resource_compute_service_attachment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_service_attachment_log.Printf("[DEBUG] Finished deleting ServiceAttachment %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeServiceAttachmentImport(d *resource_compute_service_attachment_schema.ResourceData, meta interface{}) ([]*resource_compute_service_attachment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/serviceAttachments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/serviceAttachments/{{name}}") - if err != nil { - return nil, resource_compute_service_attachment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_service_attachment_schema.ResourceData{d}, nil -} - -func flattenComputeServiceAttachmentName(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentDescription(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentFingerprint(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConnectionPreference(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConnectedEndpoints(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "endpoint": flattenComputeServiceAttachmentConnectedEndpointsEndpoint(original["endpoint"], d, config), - "status": flattenComputeServiceAttachmentConnectedEndpointsStatus(original["status"], d, config), - }) - } - return transformed -} - -func flattenComputeServiceAttachmentConnectedEndpointsEndpoint(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConnectedEndpointsStatus(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentTargetService(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeServiceAttachmentNatSubnets(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeServiceAttachmentEnableProxyProtocol(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConsumerRejectLists(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConsumerAcceptLists(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "project_id_or_num": flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(original["projectIdOrNum"], d, config), - "connection_limit": flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connectionLimit"], d, config), - }) - } - return transformed -} - -func flattenComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_service_attachment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeServiceAttachmentRegion(v interface{}, d *resource_compute_service_attachment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeServiceAttachmentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConnectionPreference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentTargetService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("forwardingRules", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_service_attachment_fmt.Errorf("Invalid value for target_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeServiceAttachmentNatSubnets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_service_attachment_fmt.Errorf("Invalid value for nat_subnets: nil") - } - f, err := parseRegionalFieldValue("subnetworks", raw.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_service_attachment_fmt.Errorf("Invalid value for nat_subnets: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeServiceAttachmentEnableProxyProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConsumerRejectLists(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConsumerAcceptLists(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectIdOrNum, err := expandComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(original["project_id_or_num"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_service_attachment_reflect.ValueOf(transformedProjectIdOrNum); val.IsValid() && !isEmptyValue(val) { - transformed["projectIdOrNum"] = transformedProjectIdOrNum - } - - transformedConnectionLimit, err := expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(original["connection_limit"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_service_attachment_reflect.ValueOf(transformedConnectionLimit); val.IsValid() && !isEmptyValue(val) { - transformed["connectionLimit"] = transformedConnectionLimit - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeServiceAttachmentConsumerAcceptListsProjectIdOrNum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentConsumerAcceptListsConnectionLimit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeServiceAttachmentRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_service_attachment_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeServiceAttachmentUpdateEncoder(d *resource_compute_service_attachment_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - nameProp := d.Get("name") - if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - enableProxyProtocolProp := d.Get("enable_proxy_protocol") - if v, ok := d.GetOkExists("enable_proxy_protocol"); !isEmptyValue(resource_compute_service_attachment_reflect.ValueOf(v)) && (ok || !resource_compute_service_attachment_reflect.DeepEqual(v, enableProxyProtocolProp)) { - obj["enableProxyProtocol"] = enableProxyProtocolProp - } - - return obj, nil -} - -func resourceComputeSharedVpcHostProject() *resource_compute_shared_vpc_host_project_schema.Resource { - return &resource_compute_shared_vpc_host_project_schema.Resource{ - Create: resourceComputeSharedVpcHostProjectCreate, - Read: resourceComputeSharedVpcHostProjectRead, - Delete: resourceComputeSharedVpcHostProjectDelete, - Importer: &resource_compute_shared_vpc_host_project_schema.ResourceImporter{ - State: resource_compute_shared_vpc_host_project_schema.ImportStatePassthrough, - }, - - Timeouts: &resource_compute_shared_vpc_host_project_schema.ResourceTimeout{ - Create: resource_compute_shared_vpc_host_project_schema.DefaultTimeout(4 * resource_compute_shared_vpc_host_project_time.Minute), - Delete: resource_compute_shared_vpc_host_project_schema.DefaultTimeout(4 * resource_compute_shared_vpc_host_project_time.Minute), - }, - - Schema: map[string]*resource_compute_shared_vpc_host_project_schema.Schema{ - "project": { - Type: resource_compute_shared_vpc_host_project_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the project that will serve as a Shared VPC host project`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSharedVpcHostProjectCreate(d *resource_compute_shared_vpc_host_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - hostProject := d.Get("project").(string) - op, err := config.NewComputeClient(userAgent).Projects.EnableXpnHost(hostProject).Do() - if err != nil { - return resource_compute_shared_vpc_host_project_fmt.Errorf("Error enabling Shared VPC Host %q: %s", hostProject, err) - } - - d.SetId(hostProject) - - err = computeOperationWaitTime(config, op, hostProject, "Enabling Shared VPC Host", userAgent, d.Timeout(resource_compute_shared_vpc_host_project_schema.TimeoutCreate)) - if err != nil { - d.SetId("") - return err - } - - return nil -} - -func resourceComputeSharedVpcHostProjectRead(d *resource_compute_shared_vpc_host_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - hostProject := d.Id() - - project, err := config.NewComputeClient(userAgent).Projects.Get(hostProject).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_shared_vpc_host_project_fmt.Sprintf("Project data for project %q", hostProject)) - } - - if project.XpnProjectStatus != "HOST" { - resource_compute_shared_vpc_host_project_log.Printf("[WARN] Removing Shared VPC host resource %q because it's not enabled server-side", hostProject) - d.SetId("") - } - - if err := d.Set("project", hostProject); err != nil { - return resource_compute_shared_vpc_host_project_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceComputeSharedVpcHostProjectDelete(d *resource_compute_shared_vpc_host_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - hostProject := d.Get("project").(string) - - op, err := config.NewComputeClient(userAgent).Projects.DisableXpnHost(hostProject).Do() - if err != nil { - return resource_compute_shared_vpc_host_project_fmt.Errorf("Error disabling Shared VPC Host %q: %s", hostProject, err) - } - - err = computeOperationWaitTime(config, op, hostProject, "Disabling Shared VPC Host", userAgent, d.Timeout(resource_compute_shared_vpc_host_project_schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceComputeSharedVpcServiceProject() *resource_compute_shared_vpc_service_project_schema.Resource { - return &resource_compute_shared_vpc_service_project_schema.Resource{ - Create: resourceComputeSharedVpcServiceProjectCreate, - Read: resourceComputeSharedVpcServiceProjectRead, - Delete: resourceComputeSharedVpcServiceProjectDelete, - Importer: &resource_compute_shared_vpc_service_project_schema.ResourceImporter{ - State: resource_compute_shared_vpc_service_project_schema.ImportStatePassthrough, - }, - - Timeouts: &resource_compute_shared_vpc_service_project_schema.ResourceTimeout{ - Create: resource_compute_shared_vpc_service_project_schema.DefaultTimeout(4 * resource_compute_shared_vpc_service_project_time.Minute), - Delete: resource_compute_shared_vpc_service_project_schema.DefaultTimeout(4 * resource_compute_shared_vpc_service_project_time.Minute), - }, - - Schema: map[string]*resource_compute_shared_vpc_service_project_schema.Schema{ - "host_project": { - Type: resource_compute_shared_vpc_service_project_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of a host project to associate.`, - }, - "service_project": { - Type: resource_compute_shared_vpc_service_project_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the project that will serve as a Shared VPC service project.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSharedVpcServiceProjectCreate(d *resource_compute_shared_vpc_service_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - hostProject := d.Get("host_project").(string) - serviceProject := d.Get("service_project").(string) - - req := &resource_compute_shared_vpc_service_project_compute.ProjectsEnableXpnResourceRequest{ - XpnResource: &resource_compute_shared_vpc_service_project_compute.XpnResourceId{ - Id: serviceProject, - Type: "PROJECT", - }, - } - op, err := config.NewComputeClient(userAgent).Projects.EnableXpnResource(hostProject, req).Do() - if err != nil { - return err - } - err = computeOperationWaitTime(config, op, hostProject, "Enabling Shared VPC Resource", userAgent, d.Timeout(resource_compute_shared_vpc_service_project_schema.TimeoutCreate)) - if err != nil { - return err - } - - d.SetId(resource_compute_shared_vpc_service_project_fmt.Sprintf("%s/%s", hostProject, serviceProject)) - - return nil -} - -func resourceComputeSharedVpcServiceProjectRead(d *resource_compute_shared_vpc_service_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - split := resource_compute_shared_vpc_service_project_strings.Split(d.Id(), "/") - if len(split) != 2 { - return resource_compute_shared_vpc_service_project_fmt.Errorf("Error parsing resource ID %s", d.Id()) - } - hostProject := split[0] - serviceProject := split[1] - - associatedHostProject, err := config.NewComputeClient(userAgent).Projects.GetXpnHost(serviceProject).Do() - if err != nil { - resource_compute_shared_vpc_service_project_log.Printf("[WARN] Removing shared VPC service. The service project is not associated with any host") - - d.SetId("") - return nil - } - - if hostProject != associatedHostProject.Name { - resource_compute_shared_vpc_service_project_log.Printf("[WARN] Removing shared VPC service. Expected associated host project to be '%s', got '%s'", hostProject, associatedHostProject.Name) - d.SetId("") - return nil - } - - if err := d.Set("host_project", hostProject); err != nil { - return resource_compute_shared_vpc_service_project_fmt.Errorf("Error setting host_project: %s", err) - } - if err := d.Set("service_project", serviceProject); err != nil { - return resource_compute_shared_vpc_service_project_fmt.Errorf("Error setting service_project: %s", err) - } - - return nil -} - -func resourceComputeSharedVpcServiceProjectDelete(d *resource_compute_shared_vpc_service_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - hostProject := d.Get("host_project").(string) - serviceProject := d.Get("service_project").(string) - - if err := disableXpnResource(d, config, hostProject, serviceProject); err != nil { - - if !isDisabledXpnResourceError(err) { - return resource_compute_shared_vpc_service_project_fmt.Errorf("Error disabling Shared VPC Resource %q: %s", serviceProject, err) - } - } - - return nil -} - -func disableXpnResource(d *resource_compute_shared_vpc_service_project_schema.ResourceData, config *Config, hostProject, project string) error { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - req := &resource_compute_shared_vpc_service_project_compute.ProjectsDisableXpnResourceRequest{ - XpnResource: &resource_compute_shared_vpc_service_project_compute.XpnResourceId{ - Id: project, - Type: "PROJECT", - }, - } - op, err := config.NewComputeClient(userAgent).Projects.DisableXpnResource(hostProject, req).Do() - if err != nil { - return err - } - err = computeOperationWaitTime(config, op, hostProject, "Disabling Shared VPC Resource", userAgent, d.Timeout(resource_compute_shared_vpc_service_project_schema.TimeoutDelete)) - if err != nil { - return err - } - return nil -} - -func isDisabledXpnResourceError(err error) bool { - if gerr, ok := err.(*resource_compute_shared_vpc_service_project_googleapi.Error); ok { - if gerr.Code == 400 && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "invalidResourceUsage" { - return true - } - } - return false -} - -func resourceComputeSnapshot() *resource_compute_snapshot_schema.Resource { - return &resource_compute_snapshot_schema.Resource{ - Create: resourceComputeSnapshotCreate, - Read: resourceComputeSnapshotRead, - Update: resourceComputeSnapshotUpdate, - Delete: resourceComputeSnapshotDelete, - - Importer: &resource_compute_snapshot_schema.ResourceImporter{ - State: resourceComputeSnapshotImport, - }, - - Timeouts: &resource_compute_snapshot_schema.ResourceTimeout{ - Create: resource_compute_snapshot_schema.DefaultTimeout(5 * resource_compute_snapshot_time.Minute), - Update: resource_compute_snapshot_schema.DefaultTimeout(5 * resource_compute_snapshot_time.Minute), - Delete: resource_compute_snapshot_schema.DefaultTimeout(5 * resource_compute_snapshot_time.Minute), - }, - - Schema: map[string]*resource_compute_snapshot_schema.Schema{ - "name": { - Type: resource_compute_snapshot_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "source_disk": { - Type: resource_compute_snapshot_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the disk used to create this snapshot.`, - }, - "description": { - Type: resource_compute_snapshot_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "labels": { - Type: resource_compute_snapshot_schema.TypeMap, - Optional: true, - Description: `Labels to apply to this Snapshot.`, - Elem: &resource_compute_snapshot_schema.Schema{Type: resource_compute_snapshot_schema.TypeString}, - }, - "snapshot_encryption_key": { - Type: resource_compute_snapshot_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the snapshot. Required if the -source snapshot is protected by a customer-supplied encryption key.`, - MaxItems: 1, - Elem: &resource_compute_snapshot_schema.Resource{ - Schema: map[string]*resource_compute_snapshot_schema.Schema{ - "kms_key_self_link": { - Type: resource_compute_snapshot_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the encryption key that is stored in Google Cloud KMS.`, - }, - "kms_key_service_account": { - Type: resource_compute_snapshot_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: resource_compute_snapshot_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - "sha256": { - Type: resource_compute_snapshot_schema.TypeString, - Computed: true, - Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied -encryption key that protects this resource.`, - }, - }, - }, - }, - "source_disk_encryption_key": { - Type: resource_compute_snapshot_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The customer-supplied encryption key of the source snapshot. Required -if the source snapshot is protected by a customer-supplied encryption -key.`, - MaxItems: 1, - Elem: &resource_compute_snapshot_schema.Resource{ - Schema: map[string]*resource_compute_snapshot_schema.Schema{ - "kms_key_service_account": { - Type: resource_compute_snapshot_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The service account used for the encryption request for the given KMS key. -If absent, the Compute Engine Service Agent service account is used.`, - }, - "raw_key": { - Type: resource_compute_snapshot_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a 256-bit customer-supplied encryption key, encoded in -RFC 4648 base64 to either encrypt or decrypt this resource.`, - Sensitive: true, - }, - }, - }, - }, - "storage_locations": { - Type: resource_compute_snapshot_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Cloud Storage bucket storage location of the snapshot (regional or multi-regional).`, - Elem: &resource_compute_snapshot_schema.Schema{ - Type: resource_compute_snapshot_schema.TypeString, - }, - }, - "zone": { - Type: resource_compute_snapshot_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the disk is hosted.`, - }, - "creation_timestamp": { - Type: resource_compute_snapshot_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "disk_size_gb": { - Type: resource_compute_snapshot_schema.TypeInt, - Computed: true, - Description: `Size of the snapshot, specified in GB.`, - }, - "label_fingerprint": { - Type: resource_compute_snapshot_schema.TypeString, - Computed: true, - Description: `The fingerprint used for optimistic locking of this resource. Used -internally during updates.`, - }, - "licenses": { - Type: resource_compute_snapshot_schema.TypeList, - Computed: true, - Description: `A list of public visible licenses that apply to this snapshot. This -can be because the original image had licenses attached (such as a -Windows image). snapshotEncryptionKey nested object Encrypts the -snapshot using a customer-supplied encryption key.`, - Elem: &resource_compute_snapshot_schema.Schema{ - Type: resource_compute_snapshot_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "snapshot_id": { - Type: resource_compute_snapshot_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "storage_bytes": { - Type: resource_compute_snapshot_schema.TypeInt, - Computed: true, - Description: `A size of the storage used by the snapshot. As snapshots share -storage, this number is expected to change with snapshot -creation/deletion.`, - }, - "project": { - Type: resource_compute_snapshot_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_snapshot_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSnapshotCreate(d *resource_compute_snapshot_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeSnapshotName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(nameProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeSnapshotDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - storageLocationsProp, err := expandComputeSnapshotStorageLocations(d.Get("storage_locations"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("storage_locations"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(storageLocationsProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, storageLocationsProp)) { - obj["storageLocations"] = storageLocationsProp - } - labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(labelsProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(labelFingerprintProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - sourceDiskProp, err := expandComputeSnapshotSourceDisk(d.Get("source_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(sourceDiskProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, sourceDiskProp)) { - obj["sourceDisk"] = sourceDiskProp - } - zoneProp, err := expandComputeSnapshotZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(zoneProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - snapshotEncryptionKeyProp, err := expandComputeSnapshotSnapshotEncryptionKey(d.Get("snapshot_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("snapshot_encryption_key"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(snapshotEncryptionKeyProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, snapshotEncryptionKeyProp)) { - obj["snapshotEncryptionKey"] = snapshotEncryptionKeyProp - } - sourceDiskEncryptionKeyProp, err := expandComputeSnapshotSourceDiskEncryptionKey(d.Get("source_disk_encryption_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_disk_encryption_key"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(sourceDiskEncryptionKeyProp)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, sourceDiskEncryptionKeyProp)) { - obj["sourceDiskEncryptionKey"] = sourceDiskEncryptionKeyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}PRE_CREATE_REPLACE_ME/createSnapshot") - if err != nil { - return err - } - - resource_compute_snapshot_log.Printf("[DEBUG] Creating new Snapshot: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_snapshot_fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - url = resource_compute_snapshot_regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sourceDiskProp.(string)) - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_snapshot_schema.TimeoutCreate)) - if err != nil { - return resource_compute_snapshot_fmt.Errorf("Error creating Snapshot: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return resource_compute_snapshot_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Snapshot", userAgent, - d.Timeout(resource_compute_snapshot_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_snapshot_fmt.Errorf("Error waiting to create Snapshot: %s", err) - } - - resource_compute_snapshot_log.Printf("[DEBUG] Finished creating Snapshot %q: %#v", d.Id(), res) - - return resourceComputeSnapshotRead(d, meta) -} - -func resourceComputeSnapshotRead(d *resource_compute_snapshot_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_snapshot_fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_snapshot_fmt.Sprintf("ComputeSnapshot %q", d.Id())) - } - - res, err = resourceComputeSnapshotDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_compute_snapshot_log.Printf("[DEBUG] Removing ComputeSnapshot because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeSnapshotCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("snapshot_id", flattenComputeSnapshotSnapshotId(res["id"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("disk_size_gb", flattenComputeSnapshotDiskSizeGb(res["diskSizeGb"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("name", flattenComputeSnapshotName(res["name"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("description", flattenComputeSnapshotDescription(res["description"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("storage_bytes", flattenComputeSnapshotStorageBytes(res["storageBytes"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("storage_locations", flattenComputeSnapshotStorageLocations(res["storageLocations"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("licenses", flattenComputeSnapshotLicenses(res["licenses"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("labels", flattenComputeSnapshotLabels(res["labels"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("label_fingerprint", flattenComputeSnapshotLabelFingerprint(res["labelFingerprint"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("source_disk", flattenComputeSnapshotSourceDisk(res["sourceDisk"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("snapshot_encryption_key", flattenComputeSnapshotSnapshotEncryptionKey(res["snapshotEncryptionKey"], d, config)); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_snapshot_fmt.Errorf("Error reading Snapshot: %s", err) - } - - return nil -} - -func resourceComputeSnapshotUpdate(d *resource_compute_snapshot_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_snapshot_fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("labels") || d.HasChange("label_fingerprint") { - obj := make(map[string]interface{}) - - labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(v)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(resource_compute_snapshot_reflect.ValueOf(v)) && (ok || !resource_compute_snapshot_reflect.DeepEqual(v, labelFingerprintProp)) { - obj["labelFingerprint"] = labelFingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}/setLabels") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_snapshot_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_snapshot_fmt.Errorf("Error updating Snapshot %q: %s", d.Id(), err) - } else { - resource_compute_snapshot_log.Printf("[DEBUG] Finished updating Snapshot %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Snapshot", userAgent, - d.Timeout(resource_compute_snapshot_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeSnapshotRead(d, meta) -} - -func resourceComputeSnapshotDelete(d *resource_compute_snapshot_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_snapshot_fmt.Errorf("Error fetching project for Snapshot: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_snapshot_log.Printf("[DEBUG] Deleting Snapshot %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_snapshot_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Snapshot") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Snapshot", userAgent, - d.Timeout(resource_compute_snapshot_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_snapshot_log.Printf("[DEBUG] Finished deleting Snapshot %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSnapshotImport(d *resource_compute_snapshot_schema.ResourceData, meta interface{}) ([]*resource_compute_snapshot_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/snapshots/{{name}}") - if err != nil { - return nil, resource_compute_snapshot_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_snapshot_schema.ResourceData{d}, nil -} - -func flattenComputeSnapshotCreationTimestamp(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSnapshotId(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_snapshot_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeSnapshotDiskSizeGb(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_snapshot_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeSnapshotName(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotDescription(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotStorageBytes(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_snapshot_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeSnapshotStorageLocations(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotLicenses(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeSnapshotLabels(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotLabelFingerprint(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSourceDisk(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeSnapshotSnapshotEncryptionKey(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["raw_key"] = - flattenComputeSnapshotSnapshotEncryptionKeyRawKey(original["rawKey"], d, config) - transformed["sha256"] = - flattenComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d, config) - transformed["kms_key_self_link"] = - flattenComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(original["kmsKeyName"], d, config) - transformed["kms_key_service_account"] = - flattenComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(original["kmsKeyServiceAccount"], d, config) - return []interface{}{transformed} -} - -func flattenComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return d.Get("snapshot_encryption_key.0.raw_key") -} - -func flattenComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d *resource_compute_snapshot_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSnapshotName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotStorageLocations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandComputeSnapshotLabelFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSourceDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true) - if err != nil { - return nil, resource_compute_snapshot_fmt.Errorf("Invalid value for source_disk: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSnapshotZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_snapshot_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSnapshotSnapshotEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeSnapshotSnapshotEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_snapshot_reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedSha256, err := expandComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_snapshot_reflect.ValueOf(transformedSha256); val.IsValid() && !isEmptyValue(val) { - transformed["sha256"] = transformedSha256 - } - - transformedKmsKeySelfLink, err := expandComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(original["kms_key_self_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_snapshot_reflect.ValueOf(transformedKmsKeySelfLink); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeySelfLink - } - - transformedKmsKeyServiceAccount, err := expandComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_snapshot_reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeyKmsKeySelfLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSnapshotEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSourceDiskEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRawKey, err := expandComputeSnapshotSourceDiskEncryptionKeyRawKey(original["raw_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_snapshot_reflect.ValueOf(transformedRawKey); val.IsValid() && !isEmptyValue(val) { - transformed["rawKey"] = transformedRawKey - } - - transformedKmsKeyServiceAccount, err := expandComputeSnapshotSourceDiskEncryptionKeyKmsKeyServiceAccount(original["kms_key_service_account"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_snapshot_reflect.ValueOf(transformedKmsKeyServiceAccount); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyServiceAccount"] = transformedKmsKeyServiceAccount - } - - return transformed, nil -} - -func expandComputeSnapshotSourceDiskEncryptionKeyRawKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSnapshotSourceDiskEncryptionKeyKmsKeyServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeSnapshotDecoder(d *resource_compute_snapshot_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["snapshotEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("snapshot_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_snapshot_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["snapshotEncryptionKey"] = transformed - } - - if v, ok := res["sourceDiskEncryptionKey"]; ok { - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformed["rawKey"] = d.Get("source_disk_encryption_key.0.raw_key") - transformed["sha256"] = original["sha256"] - - if kmsKeyName, ok := original["kmsKeyName"]; ok { - - transformed["kmsKeyName"] = resource_compute_snapshot_strings.Split(kmsKeyName.(string), "/cryptoKeyVersions")[0] - } - - if kmsKeyServiceAccount, ok := original["kmsKeyServiceAccount"]; ok { - transformed["kmsKeyServiceAccount"] = kmsKeyServiceAccount - } - - res["sourceDiskEncryptionKey"] = transformed - } - - return res, nil -} - -func resourceComputeSslCertificate() *resource_compute_ssl_certificate_schema.Resource { - return &resource_compute_ssl_certificate_schema.Resource{ - Create: resourceComputeSslCertificateCreate, - Read: resourceComputeSslCertificateRead, - Delete: resourceComputeSslCertificateDelete, - - Importer: &resource_compute_ssl_certificate_schema.ResourceImporter{ - State: resourceComputeSslCertificateImport, - }, - - Timeouts: &resource_compute_ssl_certificate_schema.ResourceTimeout{ - Create: resource_compute_ssl_certificate_schema.DefaultTimeout(4 * resource_compute_ssl_certificate_time.Minute), - Delete: resource_compute_ssl_certificate_schema.DefaultTimeout(4 * resource_compute_ssl_certificate_time.Minute), - }, - - Schema: map[string]*resource_compute_ssl_certificate_schema.Schema{ - "certificate": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The certificate in PEM format. -The certificate chain must be no greater than 5 certs long. -The chain must include at least one intermediate cert.`, - Sensitive: true, - }, - "private_key": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: sha256DiffSuppress, - Description: `The write-only private key in PEM format.`, - Sensitive: true, - }, - "description": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "name": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash. - - -These are in the same namespace as the managed SSL certificates.`, - }, - "certificate_id": { - Type: resource_compute_ssl_certificate_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "creation_timestamp": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "name_prefix": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - Description: "Creates a unique name beginning with the specified prefix. Conflicts with name.", - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - - value := v.(string) - if len(value) > 37 { - errors = append(errors, resource_compute_ssl_certificate_fmt.Errorf( - "%q cannot be longer than 37 characters, name is limited to 63", k)) - } - return - }, - }, - "project": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_ssl_certificate_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSslCertificateCreate(d *resource_compute_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - certificateProp, err := expandComputeSslCertificateCertificate(d.Get("certificate"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate"); !isEmptyValue(resource_compute_ssl_certificate_reflect.ValueOf(certificateProp)) && (ok || !resource_compute_ssl_certificate_reflect.DeepEqual(v, certificateProp)) { - obj["certificate"] = certificateProp - } - descriptionProp, err := expandComputeSslCertificateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_ssl_certificate_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_ssl_certificate_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeSslCertificateName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_ssl_certificate_reflect.ValueOf(nameProp)) && (ok || !resource_compute_ssl_certificate_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - privateKeyProp, err := expandComputeSslCertificatePrivateKey(d.Get("private_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_key"); !isEmptyValue(resource_compute_ssl_certificate_reflect.ValueOf(privateKeyProp)) && (ok || !resource_compute_ssl_certificate_reflect.DeepEqual(v, privateKeyProp)) { - obj["privateKey"] = privateKeyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates") - if err != nil { - return err - } - - resource_compute_ssl_certificate_log.Printf("[DEBUG] Creating new SslCertificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error fetching project for SslCertificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_ssl_certificate_schema.TimeoutCreate)) - if err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error creating SslCertificate: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating SslCertificate", userAgent, - d.Timeout(resource_compute_ssl_certificate_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_ssl_certificate_fmt.Errorf("Error waiting to create SslCertificate: %s", err) - } - - resource_compute_ssl_certificate_log.Printf("[DEBUG] Finished creating SslCertificate %q: %#v", d.Id(), res) - - return resourceComputeSslCertificateRead(d, meta) -} - -func resourceComputeSslCertificateRead(d *resource_compute_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error fetching project for SslCertificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_ssl_certificate_fmt.Sprintf("ComputeSslCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error reading SslCertificate: %s", err) - } - - if err := d.Set("certificate", flattenComputeSslCertificateCertificate(res["certificate"], d, config)); err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeSslCertificateCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("description", flattenComputeSslCertificateDescription(res["description"], d, config)); err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("certificate_id", flattenComputeSslCertificateCertificateId(res["id"], d, config)); err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("name", flattenComputeSslCertificateName(res["name"], d, config)); err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error reading SslCertificate: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error reading SslCertificate: %s", err) - } - - return nil -} - -func resourceComputeSslCertificateDelete(d *resource_compute_ssl_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ssl_certificate_fmt.Errorf("Error fetching project for SslCertificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_ssl_certificate_log.Printf("[DEBUG] Deleting SslCertificate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_ssl_certificate_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SslCertificate") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting SslCertificate", userAgent, - d.Timeout(resource_compute_ssl_certificate_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_ssl_certificate_log.Printf("[DEBUG] Finished deleting SslCertificate %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSslCertificateImport(d *resource_compute_ssl_certificate_schema.ResourceData, meta interface{}) ([]*resource_compute_ssl_certificate_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/sslCertificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/sslCertificates/{{name}}") - if err != nil { - return nil, resource_compute_ssl_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_ssl_certificate_schema.ResourceData{d}, nil -} - -func flattenComputeSslCertificateCertificate(v interface{}, d *resource_compute_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslCertificateCreationTimestamp(v interface{}, d *resource_compute_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslCertificateDescription(v interface{}, d *resource_compute_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslCertificateCertificateId(v interface{}, d *resource_compute_ssl_certificate_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_ssl_certificate_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeSslCertificateName(v interface{}, d *resource_compute_ssl_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSslCertificateCertificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslCertificateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslCertificateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - var certName string - if v, ok := d.GetOk("name"); ok { - certName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - certName = resource_compute_ssl_certificate_resource.PrefixedUniqueId(v.(string)) - } else { - certName = resource_compute_ssl_certificate_resource.UniqueId() - } - - if err := d.Set("name", certName); err != nil { - return nil, resource_compute_ssl_certificate_fmt.Errorf("Error setting name: %s", err) - } - - return certName, nil -} - -func expandComputeSslCertificatePrivateKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func sslPolicyCustomizeDiff(_ resource_compute_ssl_policy_context.Context, diff *resource_compute_ssl_policy_schema.ResourceDiff, v interface{}) error { - profile := diff.Get("profile") - customFeaturesCount := diff.Get("custom_features.#") - - if diff.HasChange("profile") || diff.HasChange("custom_features") { - if profile.(string) == "CUSTOM" { - if customFeaturesCount.(int) == 0 { - return resource_compute_ssl_policy_fmt.Errorf("Error in SSL Policy %s: the profile is set to %s but no custom_features are set.", diff.Get("name"), profile.(string)) - } - } else { - if customFeaturesCount != 0 { - return resource_compute_ssl_policy_fmt.Errorf("Error in SSL Policy %s: the profile is set to %s but using custom_features requires the profile to be CUSTOM.", diff.Get("name"), profile.(string)) - } - } - return nil - } - return nil -} - -func resourceComputeSslPolicy() *resource_compute_ssl_policy_schema.Resource { - return &resource_compute_ssl_policy_schema.Resource{ - Create: resourceComputeSslPolicyCreate, - Read: resourceComputeSslPolicyRead, - Update: resourceComputeSslPolicyUpdate, - Delete: resourceComputeSslPolicyDelete, - - Importer: &resource_compute_ssl_policy_schema.ResourceImporter{ - State: resourceComputeSslPolicyImport, - }, - - Timeouts: &resource_compute_ssl_policy_schema.ResourceTimeout{ - Create: resource_compute_ssl_policy_schema.DefaultTimeout(4 * resource_compute_ssl_policy_time.Minute), - Update: resource_compute_ssl_policy_schema.DefaultTimeout(4 * resource_compute_ssl_policy_time.Minute), - Delete: resource_compute_ssl_policy_schema.DefaultTimeout(4 * resource_compute_ssl_policy_time.Minute), - }, - - CustomizeDiff: sslPolicyCustomizeDiff, - - Schema: map[string]*resource_compute_ssl_policy_schema.Schema{ - "name": { - Type: resource_compute_ssl_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "custom_features": { - Type: resource_compute_ssl_policy_schema.TypeSet, - Optional: true, - Description: `Profile specifies the set of SSL features that can be used by the -load balancer when negotiating SSL with clients. This can be one of -'COMPATIBLE', 'MODERN', 'RESTRICTED', or 'CUSTOM'. If using 'CUSTOM', -the set of SSL features to enable must be specified in the -'customFeatures' field. - -See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) -for which ciphers are available to use. **Note**: this argument -*must* be present when using the 'CUSTOM' profile. This argument -*must not* be present when using any other profile.`, - Elem: &resource_compute_ssl_policy_schema.Schema{ - Type: resource_compute_ssl_policy_schema.TypeString, - }, - Set: resource_compute_ssl_policy_schema.HashString, - }, - "description": { - Type: resource_compute_ssl_policy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "min_tls_version": { - Type: resource_compute_ssl_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_ssl_policy_validation.StringInSlice([]string{"TLS_1_0", "TLS_1_1", "TLS_1_2", ""}, false), - Description: `The minimum version of SSL protocol that can be used by the clients -to establish a connection with the load balancer. Default value: "TLS_1_0" Possible values: ["TLS_1_0", "TLS_1_1", "TLS_1_2"]`, - Default: "TLS_1_0", - }, - "profile": { - Type: resource_compute_ssl_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_ssl_policy_validation.StringInSlice([]string{"COMPATIBLE", "MODERN", "RESTRICTED", "CUSTOM", ""}, false), - Description: `Profile specifies the set of SSL features that can be used by the -load balancer when negotiating SSL with clients. If using 'CUSTOM', -the set of SSL features to enable must be specified in the -'customFeatures' field. - -See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) -for information on what cipher suites each profile provides. If -'CUSTOM' is used, the 'custom_features' attribute **must be set**. Default value: "COMPATIBLE" Possible values: ["COMPATIBLE", "MODERN", "RESTRICTED", "CUSTOM"]`, - Default: "COMPATIBLE", - }, - "creation_timestamp": { - Type: resource_compute_ssl_policy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "enabled_features": { - Type: resource_compute_ssl_policy_schema.TypeSet, - Computed: true, - Description: `The list of features enabled in the SSL policy.`, - Elem: &resource_compute_ssl_policy_schema.Schema{ - Type: resource_compute_ssl_policy_schema.TypeString, - }, - Set: resource_compute_ssl_policy_schema.HashString, - }, - "fingerprint": { - Type: resource_compute_ssl_policy_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. A hash of the contents stored in this -object. This field is used in optimistic locking.`, - }, - "project": { - Type: resource_compute_ssl_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_ssl_policy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSslPolicyCreate(d *resource_compute_ssl_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeSslPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeSslPolicyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - profileProp, err := expandComputeSslPolicyProfile(d.Get("profile"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("profile"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(profileProp)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, profileProp)) { - obj["profile"] = profileProp - } - minTlsVersionProp, err := expandComputeSslPolicyMinTlsVersion(d.Get("min_tls_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_tls_version"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(minTlsVersionProp)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, minTlsVersionProp)) { - obj["minTlsVersion"] = minTlsVersionProp - } - customFeaturesProp, err := expandComputeSslPolicyCustomFeatures(d.Get("custom_features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_features"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(customFeaturesProp)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, customFeaturesProp)) { - obj["customFeatures"] = customFeaturesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies") - if err != nil { - return err - } - - resource_compute_ssl_policy_log.Printf("[DEBUG] Creating new SslPolicy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_ssl_policy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error creating SslPolicy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating SslPolicy", userAgent, - d.Timeout(resource_compute_ssl_policy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_ssl_policy_fmt.Errorf("Error waiting to create SslPolicy: %s", err) - } - - resource_compute_ssl_policy_log.Printf("[DEBUG] Finished creating SslPolicy %q: %#v", d.Id(), res) - - return resourceComputeSslPolicyRead(d, meta) -} - -func resourceComputeSslPolicyRead(d *resource_compute_ssl_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_ssl_policy_fmt.Sprintf("ComputeSslPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeSslPolicyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("description", flattenComputeSslPolicyDescription(res["description"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("name", flattenComputeSslPolicyName(res["name"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("profile", flattenComputeSslPolicyProfile(res["profile"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("min_tls_version", flattenComputeSslPolicyMinTlsVersion(res["minTlsVersion"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("enabled_features", flattenComputeSslPolicyEnabledFeatures(res["enabledFeatures"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("custom_features", flattenComputeSslPolicyCustomFeatures(res["customFeatures"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("fingerprint", flattenComputeSslPolicyFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error reading SslPolicy: %s", err) - } - - return nil -} - -func resourceComputeSslPolicyUpdate(d *resource_compute_ssl_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - profileProp, err := expandComputeSslPolicyProfile(d.Get("profile"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("profile"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(v)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, profileProp)) { - obj["profile"] = profileProp - } - minTlsVersionProp, err := expandComputeSslPolicyMinTlsVersion(d.Get("min_tls_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_tls_version"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(v)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, minTlsVersionProp)) { - obj["minTlsVersion"] = minTlsVersionProp - } - customFeaturesProp, err := expandComputeSslPolicyCustomFeatures(d.Get("custom_features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_features"); !isEmptyValue(resource_compute_ssl_policy_reflect.ValueOf(v)) && (ok || !resource_compute_ssl_policy_reflect.DeepEqual(v, customFeaturesProp)) { - obj["customFeatures"] = customFeaturesProp - } - - obj, err = resourceComputeSslPolicyUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return err - } - - resource_compute_ssl_policy_log.Printf("[DEBUG] Updating SslPolicy %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_ssl_policy_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error updating SslPolicy %q: %s", d.Id(), err) - } else { - resource_compute_ssl_policy_log.Printf("[DEBUG] Finished updating SslPolicy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating SslPolicy", userAgent, - d.Timeout(resource_compute_ssl_policy_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeSslPolicyRead(d, meta) -} - -func resourceComputeSslPolicyDelete(d *resource_compute_ssl_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_ssl_policy_fmt.Errorf("Error fetching project for SslPolicy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_ssl_policy_log.Printf("[DEBUG] Deleting SslPolicy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_ssl_policy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SslPolicy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting SslPolicy", userAgent, - d.Timeout(resource_compute_ssl_policy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_ssl_policy_log.Printf("[DEBUG] Finished deleting SslPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSslPolicyImport(d *resource_compute_ssl_policy_schema.ResourceData, meta interface{}) ([]*resource_compute_ssl_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/sslPolicies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/sslPolicies/{{name}}") - if err != nil { - return nil, resource_compute_ssl_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_ssl_policy_schema.ResourceData{d}, nil -} - -func flattenComputeSslPolicyCreationTimestamp(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyDescription(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyName(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyProfile(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyMinTlsVersion(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSslPolicyEnabledFeatures(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_ssl_policy_schema.NewSet(resource_compute_ssl_policy_schema.HashString, v.([]interface{})) -} - -func flattenComputeSslPolicyCustomFeatures(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_ssl_policy_schema.NewSet(resource_compute_ssl_policy_schema.HashString, v.([]interface{})) -} - -func flattenComputeSslPolicyFingerprint(v interface{}, d *resource_compute_ssl_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSslPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyProfile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyMinTlsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSslPolicyCustomFeatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_ssl_policy_schema.Set).List() - return v, nil -} - -func resourceComputeSslPolicyUpdateEncoder(d *resource_compute_ssl_policy_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - obj["fingerprint"] = d.Get("fingerprint") - - if v, ok := obj["customFeatures"]; ok && len(v.([]interface{})) == 0 { - obj["customFeatures"] = nil - } - - return obj, nil -} - -func isShrinkageIpCidr(_ resource_compute_subnetwork_context.Context, old, new, _ interface{}) bool { - _, oldCidr, oldErr := resource_compute_subnetwork_net.ParseCIDR(old.(string)) - _, newCidr, newErr := resource_compute_subnetwork_net.ParseCIDR(new.(string)) - - if oldErr != nil || newErr != nil { - - return false - } - - oldStart, oldEnd := resource_compute_subnetwork_cidr.AddressRange(oldCidr) - - if newCidr.Contains(oldStart) && newCidr.Contains(oldEnd) { - - return false - } - - return true -} - -func resourceComputeSubnetwork() *resource_compute_subnetwork_schema.Resource { - return &resource_compute_subnetwork_schema.Resource{ - Create: resourceComputeSubnetworkCreate, - Read: resourceComputeSubnetworkRead, - Update: resourceComputeSubnetworkUpdate, - Delete: resourceComputeSubnetworkDelete, - - Importer: &resource_compute_subnetwork_schema.ResourceImporter{ - State: resourceComputeSubnetworkImport, - }, - - Timeouts: &resource_compute_subnetwork_schema.ResourceTimeout{ - Create: resource_compute_subnetwork_schema.DefaultTimeout(6 * resource_compute_subnetwork_time.Minute), - Update: resource_compute_subnetwork_schema.DefaultTimeout(6 * resource_compute_subnetwork_time.Minute), - Delete: resource_compute_subnetwork_schema.DefaultTimeout(6 * resource_compute_subnetwork_time.Minute), - }, - - CustomizeDiff: resource_compute_subnetwork_customdiff.All( - resource_compute_subnetwork_customdiff.ForceNewIfChange("ip_cidr_range", isShrinkageIpCidr), - resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff, - ), - - Schema: map[string]*resource_compute_subnetwork_schema.Schema{ - "ip_cidr_range": { - Type: resource_compute_subnetwork_schema.TypeString, - Required: true, - ValidateFunc: validateIpCidrRange, - Description: `The range of internal addresses that are owned by this subnetwork. -Provide this property when you create the subnetwork. For example, -10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and -non-overlapping within a network. Only IPv4 is supported.`, - }, - "name": { - Type: resource_compute_subnetwork_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateGCPName, - Description: `The name of the resource, provided by the client when initially -creating the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 characters -long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which -means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "network": { - Type: resource_compute_subnetwork_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network this subnet belongs to. -Only networks that are in the distributed mode can have subnetworks.`, - }, - "description": { - Type: resource_compute_subnetwork_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource. Provide this property when -you create the resource. This field can be set only at resource -creation time.`, - }, - "ipv6_access_type": { - Type: resource_compute_subnetwork_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_subnetwork_validation.StringInSlice([]string{"EXTERNAL", ""}, false), - Description: `The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation -or the first time the subnet is updated into IPV4_IPV6 dual stack. If the ipv6_type is EXTERNAL then this subnet -cannot enable direct path. Possible values: ["EXTERNAL"]`, - }, - "log_config": { - Type: resource_compute_subnetwork_schema.TypeList, - Optional: true, - Description: `Denotes the logging options for the subnetwork flow logs. If logging is enabled -logs will be exported to Stackdriver. This field cannot be set if the 'purpose' of this -subnetwork is 'INTERNAL_HTTPS_LOAD_BALANCER'`, - MaxItems: 1, - Elem: &resource_compute_subnetwork_schema.Resource{ - Schema: map[string]*resource_compute_subnetwork_schema.Schema{ - "aggregation_interval": { - Type: resource_compute_subnetwork_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_subnetwork_validation.StringInSlice([]string{"INTERVAL_5_SEC", "INTERVAL_30_SEC", "INTERVAL_1_MIN", "INTERVAL_5_MIN", "INTERVAL_10_MIN", "INTERVAL_15_MIN", ""}, false), - Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. -Toggles the aggregation interval for collecting flow logs. Increasing the -interval time will reduce the amount of generated flow logs for long -lasting connections. Default is an interval of 5 seconds per connection. Default value: "INTERVAL_5_SEC" Possible values: ["INTERVAL_5_SEC", "INTERVAL_30_SEC", "INTERVAL_1_MIN", "INTERVAL_5_MIN", "INTERVAL_10_MIN", "INTERVAL_15_MIN"]`, - Default: "INTERVAL_5_SEC", - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "filter_expr": { - Type: resource_compute_subnetwork_schema.TypeString, - Optional: true, - Description: `Export filter used to define which VPC flow logs should be logged, as as CEL expression. See -https://cloud.google.com/vpc/docs/flow-logs#filtering for details on how to format this field. -The default value is 'true', which evaluates to include everything.`, - Default: "true", - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "flow_sampling": { - Type: resource_compute_subnetwork_schema.TypeFloat, - Optional: true, - Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. -The value of the field must be in [0, 1]. Set the sampling rate of VPC -flow logs within the subnetwork where 1.0 means all collected logs are -reported and 0.0 means no logs are reported. Default is 0.5 which means -half of all collected logs are reported.`, - Default: 0.5, - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "metadata": { - Type: resource_compute_subnetwork_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_subnetwork_validation.StringInSlice([]string{"EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA", "CUSTOM_METADATA", ""}, false), - Description: `Can only be specified if VPC flow logging for this subnetwork is enabled. -Configures whether metadata fields should be added to the reported VPC -flow logs. Default value: "INCLUDE_ALL_METADATA" Possible values: ["EXCLUDE_ALL_METADATA", "INCLUDE_ALL_METADATA", "CUSTOM_METADATA"]`, - Default: "INCLUDE_ALL_METADATA", - AtLeastOneOf: []string{"log_config.0.aggregation_interval", "log_config.0.flow_sampling", "log_config.0.metadata", "log_config.0.filter_expr"}, - }, - "metadata_fields": { - Type: resource_compute_subnetwork_schema.TypeSet, - Optional: true, - Description: `List of metadata fields that should be added to reported logs. -Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" is set to CUSTOM_METADATA.`, - Elem: &resource_compute_subnetwork_schema.Schema{ - Type: resource_compute_subnetwork_schema.TypeString, - }, - Set: resource_compute_subnetwork_schema.HashString, - }, - }, - }, - }, - "private_ip_google_access": { - Type: resource_compute_subnetwork_schema.TypeBool, - Optional: true, - Description: `When enabled, VMs in this subnetwork without external IP addresses can -access Google APIs and services by using Private Google Access.`, - }, - "private_ipv6_google_access": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - Description: `The private IPv6 google access type for the VMs in this subnet.`, - }, - "purpose": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The purpose of the resource. This field can be either PRIVATE -or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to -INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is -reserved for Internal HTTP(S) Load Balancing. If unspecified, the -purpose defaults to PRIVATE. - -If set to INTERNAL_HTTPS_LOAD_BALANCER you must also set 'role'.`, - }, - "region": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The GCP region for this subnetwork.`, - }, - "role": { - Type: resource_compute_subnetwork_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_subnetwork_validation.StringInSlice([]string{"ACTIVE", "BACKUP", ""}, false), - Description: `The role of subnetwork. Currently, this field is only used when -purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE -or BACKUP. An ACTIVE subnetwork is one that is currently being used -for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that -is ready to be promoted to ACTIVE or is currently draining. Possible values: ["ACTIVE", "BACKUP"]`, - }, - "secondary_ip_range": { - Type: resource_compute_subnetwork_schema.TypeList, - Computed: true, - Optional: true, - ConfigMode: resource_compute_subnetwork_schema.SchemaConfigModeAttr, - Description: `An array of configurations for secondary IP ranges for VM instances -contained in this subnetwork. The primary IP of such VM must belong -to the primary ipCidrRange of the subnetwork. The alias IPs may belong -to either primary or secondary ranges. - -**Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid -breaking users during the 0.12 upgrade. To explicitly send a list -of zero objects you must use the following syntax: -'example=[]' -For more details about this behavior, see [this section](https://www.terraform.io/docs/configuration/attr-as-blocks.html#defining-a-fixed-object-collection-value).`, - Elem: &resource_compute_subnetwork_schema.Resource{ - Schema: map[string]*resource_compute_subnetwork_schema.Schema{ - "ip_cidr_range": { - Type: resource_compute_subnetwork_schema.TypeString, - Required: true, - ValidateFunc: validateIpCidrRange, - Description: `The range of IP addresses belonging to this subnetwork secondary -range. Provide this property when you create the subnetwork. -Ranges must be unique and non-overlapping with all primary and -secondary IP ranges within a network. Only IPv4 is supported.`, - }, - "range_name": { - Type: resource_compute_subnetwork_schema.TypeString, - Required: true, - ValidateFunc: validateGCPName, - Description: `The name associated with this subnetwork secondary range, used -when adding an alias IP range to a VM instance. The name must -be 1-63 characters long, and comply with RFC1035. The name -must be unique within the subnetwork.`, - }, - }, - }, - }, - "stack_type": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_compute_subnetwork_validation.StringInSlice([]string{"IPV4_ONLY", "IPV4_IPV6", ""}, false), - Description: `The stack type for this subnet to identify whether the IPv6 feature is enabled or not. -If not specified IPV4_ONLY will be used. Possible values: ["IPV4_ONLY", "IPV4_IPV6"]`, - }, - "creation_timestamp": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "external_ipv6_prefix": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Description: `The range of external IPv6 addresses that are owned by this subnetwork.`, - }, - "gateway_address": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Description: `The gateway address for default routes to reach destination addresses -outside this subnetwork.`, - }, - "ipv6_cidr_range": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Description: `The range of internal IPv6 addresses that are owned by this subnetwork.`, - }, - "fingerprint": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - Description: "Fingerprint of this resource. This field is used internally during updates of this resource.", - Deprecated: "This field is not useful for users, and has been removed as an output.", - }, - "project": { - Type: resource_compute_subnetwork_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_subnetwork_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeSubnetworkSecondaryIpRangeSetStyleDiff(_ resource_compute_subnetwork_context.Context, diff *resource_compute_subnetwork_schema.ResourceDiff, meta interface{}) error { - keys := diff.GetChangedKeysPrefix("secondary_ip_range") - if len(keys) == 0 { - return nil - } - oldCount, newCount := diff.GetChange("secondary_ip_range.#") - var count int - - if oldCount.(int) < newCount.(int) { - count = newCount.(int) - } else { - count = oldCount.(int) - } - - if count < 1 { - return nil - } - old := make([]interface{}, count) - new := make([]interface{}, count) - for i := 0; i < count; i++ { - o, n := diff.GetChange(resource_compute_subnetwork_fmt.Sprintf("secondary_ip_range.%d", i)) - - if o != nil { - old = append(old, o) - } - if n != nil { - new = append(new, n) - } - } - - oldSet := resource_compute_subnetwork_schema.NewSet(resource_compute_subnetwork_schema.HashResource(resourceComputeSubnetwork().Schema["secondary_ip_range"].Elem.(*resource_compute_subnetwork_schema.Resource)), old) - newSet := resource_compute_subnetwork_schema.NewSet(resource_compute_subnetwork_schema.HashResource(resourceComputeSubnetwork().Schema["secondary_ip_range"].Elem.(*resource_compute_subnetwork_schema.Resource)), new) - - if oldSet.Equal(newSet) { - if err := diff.Clear("secondary_ip_range"); err != nil { - return err - } - } - - return nil -} - -func resourceComputeSubnetworkCreate(d *resource_compute_subnetwork_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeSubnetworkDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - ipCidrRangeProp, err := expandComputeSubnetworkIpCidrRange(d.Get("ip_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_cidr_range"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(ipCidrRangeProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, ipCidrRangeProp)) { - obj["ipCidrRange"] = ipCidrRangeProp - } - nameProp, err := expandComputeSubnetworkName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(nameProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeSubnetworkNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(networkProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - purposeProp, err := expandComputeSubnetworkPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(purposeProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - roleProp, err := expandComputeSubnetworkRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(roleProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - secondaryIpRangesProp, err := expandComputeSubnetworkSecondaryIpRange(d.Get("secondary_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_ip_range"); ok || !resource_compute_subnetwork_reflect.DeepEqual(v, secondaryIpRangesProp) { - obj["secondaryIpRanges"] = secondaryIpRangesProp - } - privateIpGoogleAccessProp, err := expandComputeSubnetworkPrivateIpGoogleAccess(d.Get("private_ip_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ip_google_access"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(privateIpGoogleAccessProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, privateIpGoogleAccessProp)) { - obj["privateIpGoogleAccess"] = privateIpGoogleAccessProp - } - privateIpv6GoogleAccessProp, err := expandComputeSubnetworkPrivateIpv6GoogleAccess(d.Get("private_ipv6_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ipv6_google_access"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(privateIpv6GoogleAccessProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, privateIpv6GoogleAccessProp)) { - obj["privateIpv6GoogleAccess"] = privateIpv6GoogleAccessProp - } - regionProp, err := expandComputeSubnetworkRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(regionProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !resource_compute_subnetwork_reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - stackTypeProp, err := expandComputeSubnetworkStackType(d.Get("stack_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stack_type"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(stackTypeProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, stackTypeProp)) { - obj["stackType"] = stackTypeProp - } - ipv6AccessTypeProp, err := expandComputeSubnetworkIpv6AccessType(d.Get("ipv6_access_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ipv6_access_type"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(ipv6AccessTypeProp)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, ipv6AccessTypeProp)) { - obj["ipv6AccessType"] = ipv6AccessTypeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks") - if err != nil { - return err - } - - resource_compute_subnetwork_log.Printf("[DEBUG] Creating new Subnetwork: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutCreate)) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error creating Subnetwork: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_subnetwork_fmt.Errorf("Error waiting to create Subnetwork: %s", err) - } - - resource_compute_subnetwork_log.Printf("[DEBUG] Finished creating Subnetwork %q: %#v", d.Id(), res) - - return resourceComputeSubnetworkRead(d, meta) -} - -func resourceComputeSubnetworkRead(d *resource_compute_subnetwork_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_subnetwork_fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeSubnetworkCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("description", flattenComputeSubnetworkDescription(res["description"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("gateway_address", flattenComputeSubnetworkGatewayAddress(res["gatewayAddress"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("ip_cidr_range", flattenComputeSubnetworkIpCidrRange(res["ipCidrRange"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("name", flattenComputeSubnetworkName(res["name"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("network", flattenComputeSubnetworkNetwork(res["network"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("purpose", flattenComputeSubnetworkPurpose(res["purpose"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("role", flattenComputeSubnetworkRole(res["role"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("secondary_ip_range", flattenComputeSubnetworkSecondaryIpRange(res["secondaryIpRanges"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("private_ip_google_access", flattenComputeSubnetworkPrivateIpGoogleAccess(res["privateIpGoogleAccess"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("private_ipv6_google_access", flattenComputeSubnetworkPrivateIpv6GoogleAccess(res["privateIpv6GoogleAccess"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("region", flattenComputeSubnetworkRegion(res["region"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("log_config", flattenComputeSubnetworkLogConfig(res["logConfig"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("stack_type", flattenComputeSubnetworkStackType(res["stackType"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("ipv6_access_type", flattenComputeSubnetworkIpv6AccessType(res["ipv6AccessType"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("ipv6_cidr_range", flattenComputeSubnetworkIpv6CidrRange(res["ipv6CidrRange"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("external_ipv6_prefix", flattenComputeSubnetworkExternalIpv6Prefix(res["externalIpv6Prefix"], d, config)); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error reading Subnetwork: %s", err) - } - - return nil -} - -func resourceComputeSubnetworkUpdate(d *resource_compute_subnetwork_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("ip_cidr_range") { - obj := make(map[string]interface{}) - - ipCidrRangeProp, err := expandComputeSubnetworkIpCidrRange(d.Get("ip_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_cidr_range"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(v)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, ipCidrRangeProp)) { - obj["ipCidrRange"] = ipCidrRangeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/expandIpCidrRange") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - resource_compute_subnetwork_log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("private_ip_google_access") { - obj := make(map[string]interface{}) - - privateIpGoogleAccessProp, err := expandComputeSubnetworkPrivateIpGoogleAccess(d.Get("private_ip_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ip_google_access"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(v)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, privateIpGoogleAccessProp)) { - obj["privateIpGoogleAccess"] = privateIpGoogleAccessProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/setPrivateIpGoogleAccess") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - resource_compute_subnetwork_log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("private_ipv6_google_access") || d.HasChange("stack_type") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := sendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_subnetwork_fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - privateIpv6GoogleAccessProp, err := expandComputeSubnetworkPrivateIpv6GoogleAccess(d.Get("private_ipv6_google_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_ipv6_google_access"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(v)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, privateIpv6GoogleAccessProp)) { - obj["privateIpv6GoogleAccess"] = privateIpv6GoogleAccessProp - } - stackTypeProp, err := expandComputeSubnetworkStackType(d.Get("stack_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stack_type"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(v)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, stackTypeProp)) { - obj["stackType"] = stackTypeProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - resource_compute_subnetwork_log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("log_config") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := sendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_subnetwork_fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - logConfigProp, err := expandComputeSubnetworkLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); ok || !resource_compute_subnetwork_reflect.DeepEqual(v, logConfigProp) { - obj["logConfig"] = logConfigProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - resource_compute_subnetwork_log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("role") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := sendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_subnetwork_fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - roleProp, err := expandComputeSubnetworkRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_compute_subnetwork_reflect.ValueOf(v)) && (ok || !resource_compute_subnetwork_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - resource_compute_subnetwork_log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("secondary_ip_range") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := sendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_subnetwork_fmt.Sprintf("ComputeSubnetwork %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - secondaryIpRangesProp, err := expandComputeSubnetworkSecondaryIpRange(d.Get("secondary_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_ip_range"); ok || !resource_compute_subnetwork_reflect.DeepEqual(v, secondaryIpRangesProp) { - obj["secondaryIpRanges"] = secondaryIpRangesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error updating Subnetwork %q: %s", d.Id(), err) - } else { - resource_compute_subnetwork_log.Printf("[DEBUG] Finished updating Subnetwork %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeSubnetworkRead(d, meta) -} - -func resourceComputeSubnetworkDelete(d *resource_compute_subnetwork_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_subnetwork_fmt.Errorf("Error fetching project for Subnetwork: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_subnetwork_log.Printf("[DEBUG] Deleting Subnetwork %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_subnetwork_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Subnetwork") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting Subnetwork", userAgent, - d.Timeout(resource_compute_subnetwork_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_subnetwork_log.Printf("[DEBUG] Finished deleting Subnetwork %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeSubnetworkImport(d *resource_compute_subnetwork_schema.ResourceData, meta interface{}) ([]*resource_compute_subnetwork_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/subnetworks/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/subnetworks/{{name}}") - if err != nil { - return nil, resource_compute_subnetwork_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_subnetwork_schema.ResourceData{d}, nil -} - -func flattenComputeSubnetworkCreationTimestamp(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkDescription(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkGatewayAddress(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkIpCidrRange(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkName(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkNetwork(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeSubnetworkPurpose(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkRole(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkSecondaryIpRange(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "range_name": flattenComputeSubnetworkSecondaryIpRangeRangeName(original["rangeName"], d, config), - "ip_cidr_range": flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ipCidrRange"], d, config), - }) - } - return transformed -} - -func flattenComputeSubnetworkSecondaryIpRangeRangeName(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkPrivateIpv6GoogleAccess(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkRegion(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenComputeSubnetworkLogConfig(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - - v, ok := original["enable"] - if ok && !v.(bool) { - return nil - } - - transformed := make(map[string]interface{}) - transformed["flow_sampling"] = original["flowSampling"] - transformed["aggregation_interval"] = original["aggregationInterval"] - transformed["metadata"] = original["metadata"] - if original["metadata"].(string) == "CUSTOM_METADATA" { - transformed["metadata_fields"] = original["metadataFields"] - } else { - - transformed["metadata_fields"] = nil - } - transformed["filter_expr"] = original["filterExpr"] - - return []interface{}{transformed} -} - -func flattenComputeSubnetworkStackType(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkIpv6AccessType(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkIpv6CidrRange(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeSubnetworkExternalIpv6Prefix(v interface{}, d *resource_compute_subnetwork_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeSubnetworkDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkIpCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_subnetwork_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSubnetworkPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkSecondaryIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRangeName, err := expandComputeSubnetworkSecondaryIpRangeRangeName(original["range_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_subnetwork_reflect.ValueOf(transformedRangeName); val.IsValid() && !isEmptyValue(val) { - transformed["rangeName"] = transformedRangeName - } - - transformedIpCidrRange, err := expandComputeSubnetworkSecondaryIpRangeIpCidrRange(original["ip_cidr_range"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_subnetwork_reflect.ValueOf(transformedIpCidrRange); val.IsValid() && !isEmptyValue(val) { - transformed["ipCidrRange"] = transformedIpCidrRange - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeSubnetworkSecondaryIpRangeRangeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkSecondaryIpRangeIpCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkPrivateIpGoogleAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkPrivateIpv6GoogleAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_subnetwork_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeSubnetworkLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - transformed := make(map[string]interface{}) - if len(l) == 0 || l[0] == nil { - purpose, ok := d.GetOkExists("purpose") - - if ok && purpose.(string) == "INTERNAL_HTTPS_LOAD_BALANCER" { - - return nil, nil - } - - transformed["enable"] = false - return transformed, nil - } - - raw := l[0] - original := raw.(map[string]interface{}) - - transformed["enable"] = true - transformed["aggregationInterval"] = original["aggregation_interval"] - transformed["flowSampling"] = original["flow_sampling"] - transformed["metadata"] = original["metadata"] - transformed["filterExpr"] = original["filter_expr"] - - transformed["metadataFields"] = original["metadata_fields"].(*resource_compute_subnetwork_schema.Set).List() - - return transformed, nil -} - -func expandComputeSubnetworkStackType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeSubnetworkIpv6AccessType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeTargetGrpcProxy() *resource_compute_target_grpc_proxy_schema.Resource { - return &resource_compute_target_grpc_proxy_schema.Resource{ - Create: resourceComputeTargetGrpcProxyCreate, - Read: resourceComputeTargetGrpcProxyRead, - Update: resourceComputeTargetGrpcProxyUpdate, - Delete: resourceComputeTargetGrpcProxyDelete, - - Importer: &resource_compute_target_grpc_proxy_schema.ResourceImporter{ - State: resourceComputeTargetGrpcProxyImport, - }, - - Timeouts: &resource_compute_target_grpc_proxy_schema.ResourceTimeout{ - Create: resource_compute_target_grpc_proxy_schema.DefaultTimeout(4 * resource_compute_target_grpc_proxy_time.Minute), - Update: resource_compute_target_grpc_proxy_schema.DefaultTimeout(4 * resource_compute_target_grpc_proxy_time.Minute), - Delete: resource_compute_target_grpc_proxy_schema.DefaultTimeout(4 * resource_compute_target_grpc_proxy_time.Minute), - }, - - Schema: map[string]*resource_compute_target_grpc_proxy_schema.Schema{ - "name": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource -is created. The name must be 1-63 characters long, and comply -with RFC1035. Specifically, the name must be 1-63 characters long -and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which -means the first character must be a lowercase letter, and all -following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "description": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Optional: true, - Description: `An optional description of this resource.`, - }, - "url_map": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL to the UrlMap resource that defines the mapping from URL to -the BackendService. The protocol field in the BackendService -must be set to GRPC.`, - }, - "validate_for_proxyless": { - Type: resource_compute_target_grpc_proxy_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, indicates that the BackendServices referenced by -the urlMap may be accessed by gRPC applications without using -a sidecar proxy. This will enable configuration checks on urlMap -and its referenced BackendServices to not allow unsupported features. -A gRPC application must use "xds:///" scheme in the target URI -of the service it is connecting to. If false, indicates that the -BackendServices referenced by the urlMap will be accessed by gRPC -applications via a sidecar proxy. In this case, a gRPC application -must not use "xds:///" scheme in the target URI of the service -it is connecting to`, - }, - "creation_timestamp": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "fingerprint": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. A hash of the contents stored in -this object. This field is used in optimistic locking. This field -will be ignored when inserting a TargetGrpcProxy. An up-to-date -fingerprint must be provided in order to patch/update the -TargetGrpcProxy; otherwise, the request will fail with error -412 conditionNotMet. To see the latest fingerprint, make a get() -request to retrieve the TargetGrpcProxy. A base64-encoded string.`, - }, - "self_link_with_id": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Computed: true, - Description: `Server-defined URL with id for the resource.`, - }, - "project": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_target_grpc_proxy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetGrpcProxyCreate(d *resource_compute_target_grpc_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeTargetGrpcProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_target_grpc_proxy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_target_grpc_proxy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeTargetGrpcProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_target_grpc_proxy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_target_grpc_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - urlMapProp, err := expandComputeTargetGrpcProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_target_grpc_proxy_reflect.ValueOf(urlMapProp)) && (ok || !resource_compute_target_grpc_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - validateForProxylessProp, err := expandComputeTargetGrpcProxyValidateForProxyless(d.Get("validate_for_proxyless"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("validate_for_proxyless"); !isEmptyValue(resource_compute_target_grpc_proxy_reflect.ValueOf(validateForProxylessProp)) && (ok || !resource_compute_target_grpc_proxy_reflect.DeepEqual(v, validateForProxylessProp)) { - obj["validateForProxyless"] = validateForProxylessProp - } - fingerprintProp, err := expandComputeTargetGrpcProxyFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_target_grpc_proxy_reflect.ValueOf(fingerprintProp)) && (ok || !resource_compute_target_grpc_proxy_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies") - if err != nil { - return err - } - - resource_compute_target_grpc_proxy_log.Printf("[DEBUG] Creating new TargetGrpcProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_grpc_proxy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error creating TargetGrpcProxy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating TargetGrpcProxy", userAgent, - d.Timeout(resource_compute_target_grpc_proxy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_target_grpc_proxy_fmt.Errorf("Error waiting to create TargetGrpcProxy: %s", err) - } - - resource_compute_target_grpc_proxy_log.Printf("[DEBUG] Finished creating TargetGrpcProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetGrpcProxyRead(d, meta) -} - -func resourceComputeTargetGrpcProxyRead(d *resource_compute_target_grpc_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_target_grpc_proxy_fmt.Sprintf("ComputeTargetGrpcProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetGrpcProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetGrpcProxyName(res["name"], d, config)); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetGrpcProxyDescription(res["description"], d, config)); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("self_link_with_id", flattenComputeTargetGrpcProxySelfLinkWithId(res["selfLinkWithId"], d, config)); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeTargetGrpcProxyUrlMap(res["urlMap"], d, config)); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("validate_for_proxyless", flattenComputeTargetGrpcProxyValidateForProxyless(res["validateForProxyless"], d, config)); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("fingerprint", flattenComputeTargetGrpcProxyFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error reading TargetGrpcProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetGrpcProxyUpdate(d *resource_compute_target_grpc_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetGrpcProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_target_grpc_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_grpc_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeTargetGrpcProxyFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_target_grpc_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_grpc_proxy_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return err - } - - resource_compute_target_grpc_proxy_log.Printf("[DEBUG] Updating TargetGrpcProxy %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_grpc_proxy_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error updating TargetGrpcProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_grpc_proxy_log.Printf("[DEBUG] Finished updating TargetGrpcProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetGrpcProxy", userAgent, - d.Timeout(resource_compute_target_grpc_proxy_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeTargetGrpcProxyRead(d, meta) -} - -func resourceComputeTargetGrpcProxyDelete(d *resource_compute_target_grpc_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_grpc_proxy_fmt.Errorf("Error fetching project for TargetGrpcProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_target_grpc_proxy_log.Printf("[DEBUG] Deleting TargetGrpcProxy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_grpc_proxy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetGrpcProxy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting TargetGrpcProxy", userAgent, - d.Timeout(resource_compute_target_grpc_proxy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_target_grpc_proxy_log.Printf("[DEBUG] Finished deleting TargetGrpcProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetGrpcProxyImport(d *resource_compute_target_grpc_proxy_schema.ResourceData, meta interface{}) ([]*resource_compute_target_grpc_proxy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetGrpcProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetGrpcProxies/{{name}}") - if err != nil { - return nil, resource_compute_target_grpc_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_target_grpc_proxy_schema.ResourceData{d}, nil -} - -func flattenComputeTargetGrpcProxyCreationTimestamp(v interface{}, d *resource_compute_target_grpc_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyName(v interface{}, d *resource_compute_target_grpc_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyDescription(v interface{}, d *resource_compute_target_grpc_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxySelfLinkWithId(v interface{}, d *resource_compute_target_grpc_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyUrlMap(v interface{}, d *resource_compute_target_grpc_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyValidateForProxyless(v interface{}, d *resource_compute_target_grpc_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetGrpcProxyFingerprint(v interface{}, d *resource_compute_target_grpc_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetGrpcProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyValidateForProxyless(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetGrpcProxyFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeTargetHttpProxy() *resource_compute_target_http_proxy_schema.Resource { - return &resource_compute_target_http_proxy_schema.Resource{ - Create: resourceComputeTargetHttpProxyCreate, - Read: resourceComputeTargetHttpProxyRead, - Update: resourceComputeTargetHttpProxyUpdate, - Delete: resourceComputeTargetHttpProxyDelete, - - Importer: &resource_compute_target_http_proxy_schema.ResourceImporter{ - State: resourceComputeTargetHttpProxyImport, - }, - - Timeouts: &resource_compute_target_http_proxy_schema.ResourceTimeout{ - Create: resource_compute_target_http_proxy_schema.DefaultTimeout(4 * resource_compute_target_http_proxy_time.Minute), - Update: resource_compute_target_http_proxy_schema.DefaultTimeout(4 * resource_compute_target_http_proxy_time.Minute), - Delete: resource_compute_target_http_proxy_schema.DefaultTimeout(4 * resource_compute_target_http_proxy_time.Minute), - }, - - Schema: map[string]*resource_compute_target_http_proxy_schema.Schema{ - "name": { - Type: resource_compute_target_http_proxy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "url_map": { - Type: resource_compute_target_http_proxy_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the UrlMap resource that defines the mapping from URL -to the BackendService.`, - }, - "description": { - Type: resource_compute_target_http_proxy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_bind": { - Type: resource_compute_target_http_proxy_schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `This field only applies when the forwarding rule that references -this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - }, - "creation_timestamp": { - Type: resource_compute_target_http_proxy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: resource_compute_target_http_proxy_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_target_http_proxy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_target_http_proxy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetHttpProxyCreate(d *resource_compute_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetHttpProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_target_http_proxy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_target_http_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetHttpProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_target_http_proxy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_target_http_proxy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - urlMapProp, err := expandComputeTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_target_http_proxy_reflect.ValueOf(urlMapProp)) && (ok || !resource_compute_target_http_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - proxyBindProp, err := expandComputeTargetHttpProxyProxyBind(d.Get("proxy_bind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_bind"); !isEmptyValue(resource_compute_target_http_proxy_reflect.ValueOf(proxyBindProp)) && (ok || !resource_compute_target_http_proxy_reflect.DeepEqual(v, proxyBindProp)) { - obj["proxyBind"] = proxyBindProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies") - if err != nil { - return err - } - - resource_compute_target_http_proxy_log.Printf("[DEBUG] Creating new TargetHttpProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_http_proxy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error creating TargetHttpProxy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating TargetHttpProxy", userAgent, - d.Timeout(resource_compute_target_http_proxy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_target_http_proxy_fmt.Errorf("Error waiting to create TargetHttpProxy: %s", err) - } - - resource_compute_target_http_proxy_log.Printf("[DEBUG] Finished creating TargetHttpProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetHttpProxyRead(d, meta) -} - -func resourceComputeTargetHttpProxyRead(d *resource_compute_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_target_http_proxy_fmt.Sprintf("ComputeTargetHttpProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetHttpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetHttpProxyDescription(res["description"], d, config)); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetHttpProxyProxyId(res["id"], d, config)); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetHttpProxyName(res["name"], d, config)); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeTargetHttpProxyUrlMap(res["urlMap"], d, config)); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("proxy_bind", flattenComputeTargetHttpProxyProxyBind(res["proxyBind"], d, config)); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error reading TargetHttpProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetHttpProxyUpdate(d *resource_compute_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeTargetHttpProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_target_http_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_http_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_http_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error updating TargetHttpProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_http_proxy_log.Printf("[DEBUG] Finished updating TargetHttpProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetHttpProxy", userAgent, - d.Timeout(resource_compute_target_http_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetHttpProxyRead(d, meta) -} - -func resourceComputeTargetHttpProxyDelete(d *resource_compute_target_http_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_http_proxy_fmt.Errorf("Error fetching project for TargetHttpProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_target_http_proxy_log.Printf("[DEBUG] Deleting TargetHttpProxy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_http_proxy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetHttpProxy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting TargetHttpProxy", userAgent, - d.Timeout(resource_compute_target_http_proxy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_target_http_proxy_log.Printf("[DEBUG] Finished deleting TargetHttpProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetHttpProxyImport(d *resource_compute_target_http_proxy_schema.ResourceData, meta interface{}) ([]*resource_compute_target_http_proxy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetHttpProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpProxies/{{name}}") - if err != nil { - return nil, resource_compute_target_http_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_target_http_proxy_schema.ResourceData{d}, nil -} - -func flattenComputeTargetHttpProxyCreationTimestamp(v interface{}, d *resource_compute_target_http_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpProxyDescription(v interface{}, d *resource_compute_target_http_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpProxyProxyId(v interface{}, d *resource_compute_target_http_proxy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_target_http_proxy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeTargetHttpProxyName(v interface{}, d *resource_compute_target_http_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpProxyUrlMap(v interface{}, d *resource_compute_target_http_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetHttpProxyProxyBind(v interface{}, d *resource_compute_target_http_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetHttpProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("urlMaps", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_http_proxy_fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetHttpProxyProxyBind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeTargetHttpsProxy() *resource_compute_target_https_proxy_schema.Resource { - return &resource_compute_target_https_proxy_schema.Resource{ - Create: resourceComputeTargetHttpsProxyCreate, - Read: resourceComputeTargetHttpsProxyRead, - Update: resourceComputeTargetHttpsProxyUpdate, - Delete: resourceComputeTargetHttpsProxyDelete, - - Importer: &resource_compute_target_https_proxy_schema.ResourceImporter{ - State: resourceComputeTargetHttpsProxyImport, - }, - - Timeouts: &resource_compute_target_https_proxy_schema.ResourceTimeout{ - Create: resource_compute_target_https_proxy_schema.DefaultTimeout(4 * resource_compute_target_https_proxy_time.Minute), - Update: resource_compute_target_https_proxy_schema.DefaultTimeout(4 * resource_compute_target_https_proxy_time.Minute), - Delete: resource_compute_target_https_proxy_schema.DefaultTimeout(4 * resource_compute_target_https_proxy_time.Minute), - }, - - Schema: map[string]*resource_compute_target_https_proxy_schema.Schema{ - "name": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "ssl_certificates": { - Type: resource_compute_target_https_proxy_schema.TypeList, - Required: true, - Description: `A list of SslCertificate resources that are used to authenticate -connections between users and the load balancer. At least one SSL -certificate must be specified.`, - Elem: &resource_compute_target_https_proxy_schema.Schema{ - Type: resource_compute_target_https_proxy_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "url_map": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the UrlMap resource that defines the mapping from URL -to the BackendService.`, - }, - "description": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_bind": { - Type: resource_compute_target_https_proxy_schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `This field only applies when the forwarding rule that references -this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - }, - "quic_override": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_target_https_proxy_validation.StringInSlice([]string{"NONE", "ENABLE", "DISABLE", ""}, false), - Description: `Specifies the QUIC override policy for this resource. This determines -whether the load balancer will attempt to negotiate QUIC with clients -or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is -specified, uses the QUIC policy with no user overrides, which is -equivalent to DISABLE. Default value: "NONE" Possible values: ["NONE", "ENABLE", "DISABLE"]`, - Default: "NONE", - }, - "ssl_policy": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the SslPolicy resource that will be associated with -the TargetHttpsProxy resource. If not set, the TargetHttpsProxy -resource will not have any SSL policy configured.`, - }, - "creation_timestamp": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: resource_compute_target_https_proxy_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_target_https_proxy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetHttpsProxyCreate(d *resource_compute_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetHttpsProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetHttpsProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - quicOverrideProp, err := expandComputeTargetHttpsProxyQuicOverride(d.Get("quic_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("quic_override"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(quicOverrideProp)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, quicOverrideProp)) { - obj["quicOverride"] = quicOverrideProp - } - sslCertificatesProp, err := expandComputeTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(sslCertificatesProp)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(sslPolicyProp)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - urlMapProp, err := expandComputeTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(urlMapProp)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - proxyBindProp, err := expandComputeTargetHttpsProxyProxyBind(d.Get("proxy_bind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_bind"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(proxyBindProp)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, proxyBindProp)) { - obj["proxyBind"] = proxyBindProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies") - if err != nil { - return err - } - - resource_compute_target_https_proxy_log.Printf("[DEBUG] Creating new TargetHttpsProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_https_proxy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error creating TargetHttpsProxy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating TargetHttpsProxy", userAgent, - d.Timeout(resource_compute_target_https_proxy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_target_https_proxy_fmt.Errorf("Error waiting to create TargetHttpsProxy: %s", err) - } - - resource_compute_target_https_proxy_log.Printf("[DEBUG] Finished creating TargetHttpsProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetHttpsProxyRead(d, meta) -} - -func resourceComputeTargetHttpsProxyRead(d *resource_compute_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_target_https_proxy_fmt.Sprintf("ComputeTargetHttpsProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetHttpsProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetHttpsProxyDescription(res["description"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetHttpsProxyProxyId(res["id"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetHttpsProxyName(res["name"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("quic_override", flattenComputeTargetHttpsProxyQuicOverride(res["quicOverride"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("ssl_certificates", flattenComputeTargetHttpsProxySslCertificates(res["sslCertificates"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("ssl_policy", flattenComputeTargetHttpsProxySslPolicy(res["sslPolicy"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("url_map", flattenComputeTargetHttpsProxyUrlMap(res["urlMap"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("proxy_bind", flattenComputeTargetHttpsProxyProxyBind(res["proxyBind"], d, config)); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error reading TargetHttpsProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetHttpsProxyUpdate(d *resource_compute_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("quic_override") { - obj := make(map[string]interface{}) - - quicOverrideProp, err := expandComputeTargetHttpsProxyQuicOverride(d.Get("quic_override"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("quic_override"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, quicOverrideProp)) { - obj["quicOverride"] = quicOverrideProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setQuicOverride") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_https_proxy_log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_certificates") { - obj := make(map[string]interface{}) - - sslCertificatesProp, err := expandComputeTargetHttpsProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpsProxies/{{name}}/setSslCertificates") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_https_proxy_log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_policy") { - obj := make(map[string]interface{}) - - sslPolicyProp, err := expandComputeTargetHttpsProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}/setSslPolicy") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_https_proxy_log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("url_map") { - obj := make(map[string]interface{}) - - urlMapProp, err := expandComputeTargetHttpsProxyUrlMap(d.Get("url_map"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("url_map"); !isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_https_proxy_reflect.DeepEqual(v, urlMapProp)) { - obj["urlMap"] = urlMapProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/targetHttpsProxies/{{name}}/setUrlMap") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error updating TargetHttpsProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_https_proxy_log.Printf("[DEBUG] Finished updating TargetHttpsProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetHttpsProxy", userAgent, - d.Timeout(resource_compute_target_https_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetHttpsProxyRead(d, meta) -} - -func resourceComputeTargetHttpsProxyDelete(d *resource_compute_target_https_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_https_proxy_fmt.Errorf("Error fetching project for TargetHttpsProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_target_https_proxy_log.Printf("[DEBUG] Deleting TargetHttpsProxy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_https_proxy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetHttpsProxy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting TargetHttpsProxy", userAgent, - d.Timeout(resource_compute_target_https_proxy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_target_https_proxy_log.Printf("[DEBUG] Finished deleting TargetHttpsProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetHttpsProxyImport(d *resource_compute_target_https_proxy_schema.ResourceData, meta interface{}) ([]*resource_compute_target_https_proxy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetHttpsProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetHttpsProxies/{{name}}") - if err != nil { - return nil, resource_compute_target_https_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_target_https_proxy_schema.ResourceData{d}, nil -} - -func flattenComputeTargetHttpsProxyCreationTimestamp(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpsProxyDescription(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpsProxyProxyId(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_target_https_proxy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeTargetHttpsProxyName(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetHttpsProxyQuicOverride(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_compute_target_https_proxy_reflect.ValueOf(v)) { - return "NONE" - } - - return v -} - -func flattenComputeTargetHttpsProxySslCertificates(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeTargetHttpsProxySslPolicy(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetHttpsProxyUrlMap(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetHttpsProxyProxyBind(v interface{}, d *resource_compute_target_https_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetHttpsProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpsProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpsProxyQuicOverride(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetHttpsProxySslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_target_https_proxy_fmt.Errorf("Invalid value for ssl_certificates: nil") - } - f, err := parseGlobalFieldValue("sslCertificates", raw.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_https_proxy_fmt.Errorf("Invalid value for ssl_certificates: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeTargetHttpsProxySslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_https_proxy_fmt.Errorf("Invalid value for ssl_policy: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetHttpsProxyUrlMap(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("urlMaps", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_https_proxy_fmt.Errorf("Invalid value for url_map: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetHttpsProxyProxyBind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeTargetInstance() *resource_compute_target_instance_schema.Resource { - return &resource_compute_target_instance_schema.Resource{ - Create: resourceComputeTargetInstanceCreate, - Read: resourceComputeTargetInstanceRead, - Delete: resourceComputeTargetInstanceDelete, - - Importer: &resource_compute_target_instance_schema.ResourceImporter{ - State: resourceComputeTargetInstanceImport, - }, - - Timeouts: &resource_compute_target_instance_schema.ResourceTimeout{ - Create: resource_compute_target_instance_schema.DefaultTimeout(4 * resource_compute_target_instance_time.Minute), - Delete: resource_compute_target_instance_schema.DefaultTimeout(4 * resource_compute_target_instance_time.Minute), - }, - - Schema: map[string]*resource_compute_target_instance_schema.Schema{ - "instance": { - Type: resource_compute_target_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Compute instance VM handling traffic for this target instance. -Accepts the instance self-link, relative path -(e.g. 'projects/project/zones/zone/instances/instance') or name. If -name is given, the zone will default to the given zone or -the provider-default zone and the project will default to the -provider-level project.`, - }, - "name": { - Type: resource_compute_target_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: resource_compute_target_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "nat_policy": { - Type: resource_compute_target_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_compute_target_instance_validation.StringInSlice([]string{"NO_NAT", ""}, false), - Description: `NAT option controlling how IPs are NAT'ed to the instance. -Currently only NO_NAT (default value) is supported. Default value: "NO_NAT" Possible values: ["NO_NAT"]`, - Default: "NO_NAT", - }, - "zone": { - Type: resource_compute_target_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the zone where the target instance resides.`, - }, - "creation_timestamp": { - Type: resource_compute_target_instance_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "project": { - Type: resource_compute_target_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_target_instance_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetInstanceCreate(d *resource_compute_target_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeTargetInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_target_instance_reflect.ValueOf(nameProp)) && (ok || !resource_compute_target_instance_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeTargetInstanceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_target_instance_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_target_instance_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - instanceProp, err := expandComputeTargetInstanceInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(resource_compute_target_instance_reflect.ValueOf(instanceProp)) && (ok || !resource_compute_target_instance_reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - natPolicyProp, err := expandComputeTargetInstanceNatPolicy(d.Get("nat_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nat_policy"); !isEmptyValue(resource_compute_target_instance_reflect.ValueOf(natPolicyProp)) && (ok || !resource_compute_target_instance_reflect.DeepEqual(v, natPolicyProp)) { - obj["natPolicy"] = natPolicyProp - } - zoneProp, err := expandComputeTargetInstanceZone(d.Get("zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(resource_compute_target_instance_reflect.ValueOf(zoneProp)) && (ok || !resource_compute_target_instance_reflect.DeepEqual(v, zoneProp)) { - obj["zone"] = zoneProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances") - if err != nil { - return err - } - - resource_compute_target_instance_log.Printf("[DEBUG] Creating new TargetInstance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_instance_fmt.Errorf("Error fetching project for TargetInstance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_instance_schema.TimeoutCreate)) - if err != nil { - return resource_compute_target_instance_fmt.Errorf("Error creating TargetInstance: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return resource_compute_target_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating TargetInstance", userAgent, - d.Timeout(resource_compute_target_instance_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_target_instance_fmt.Errorf("Error waiting to create TargetInstance: %s", err) - } - - resource_compute_target_instance_log.Printf("[DEBUG] Finished creating TargetInstance %q: %#v", d.Id(), res) - - return resourceComputeTargetInstanceRead(d, meta) -} - -func resourceComputeTargetInstanceRead(d *resource_compute_target_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_instance_fmt.Errorf("Error fetching project for TargetInstance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_target_instance_fmt.Sprintf("ComputeTargetInstance %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - - if err := d.Set("name", flattenComputeTargetInstanceName(res["name"], d, config)); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeTargetInstanceCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("description", flattenComputeTargetInstanceDescription(res["description"], d, config)); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("instance", flattenComputeTargetInstanceInstance(res["instance"], d, config)); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("nat_policy", flattenComputeTargetInstanceNatPolicy(res["natPolicy"], d, config)); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("zone", flattenComputeTargetInstanceZone(res["zone"], d, config)); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_target_instance_fmt.Errorf("Error reading TargetInstance: %s", err) - } - - return nil -} - -func resourceComputeTargetInstanceDelete(d *resource_compute_target_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_instance_fmt.Errorf("Error fetching project for TargetInstance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_target_instance_log.Printf("[DEBUG] Deleting TargetInstance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_instance_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetInstance") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting TargetInstance", userAgent, - d.Timeout(resource_compute_target_instance_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_target_instance_log.Printf("[DEBUG] Finished deleting TargetInstance %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetInstanceImport(d *resource_compute_target_instance_schema.ResourceData, meta interface{}) ([]*resource_compute_target_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/zones/(?P[^/]+)/targetInstances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}") - if err != nil { - return nil, resource_compute_target_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_target_instance_schema.ResourceData{d}, nil -} - -func flattenComputeTargetInstanceName(v interface{}, d *resource_compute_target_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceCreationTimestamp(v interface{}, d *resource_compute_target_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceDescription(v interface{}, d *resource_compute_target_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceInstance(v interface{}, d *resource_compute_target_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetInstanceNatPolicy(v interface{}, d *resource_compute_target_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetInstanceZone(v interface{}, d *resource_compute_target_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeTargetInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetInstanceInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_target_instance_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_target_instance_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_target_instance_strings.HasPrefix(v.(string), "regions/") || resource_compute_target_instance_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil -} - -func expandComputeTargetInstanceNatPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetInstanceZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_instance_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -var instancesSelfLinkPattern = resource_compute_target_pool_regexp.MustCompile(resource_compute_target_pool_fmt.Sprintf(zonalLinkBasePattern, "instances")) - -func resourceComputeTargetPool() *resource_compute_target_pool_schema.Resource { - return &resource_compute_target_pool_schema.Resource{ - Create: resourceComputeTargetPoolCreate, - Read: resourceComputeTargetPoolRead, - Delete: resourceComputeTargetPoolDelete, - Update: resourceComputeTargetPoolUpdate, - Importer: &resource_compute_target_pool_schema.ResourceImporter{ - State: resourceTargetPoolStateImporter, - }, - - Timeouts: &resource_compute_target_pool_schema.ResourceTimeout{ - Create: resource_compute_target_pool_schema.DefaultTimeout(4 * resource_compute_target_pool_time.Minute), - Update: resource_compute_target_pool_schema.DefaultTimeout(4 * resource_compute_target_pool_time.Minute), - Delete: resource_compute_target_pool_schema.DefaultTimeout(4 * resource_compute_target_pool_time.Minute), - }, - - Schema: map[string]*resource_compute_target_pool_schema.Schema{ - "name": { - Type: resource_compute_target_pool_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique name for the resource, required by GCE. Changing this forces a new resource to be created.`, - }, - - "backup_pool": { - Type: resource_compute_target_pool_schema.TypeString, - Optional: true, - ForceNew: false, - Description: `URL to the backup target pool. Must also set failover_ratio.`, - }, - - "description": { - Type: resource_compute_target_pool_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Textual description field.`, - }, - - "failover_ratio": { - Type: resource_compute_target_pool_schema.TypeFloat, - Optional: true, - ForceNew: true, - Description: `Ratio (0 to 1) of failed nodes before using the backup pool (which must also be set).`, - }, - - "health_checks": { - Type: resource_compute_target_pool_schema.TypeList, - Optional: true, - ForceNew: false, - MaxItems: 1, - Elem: &resource_compute_target_pool_schema.Schema{ - Type: resource_compute_target_pool_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - Description: `List of zero or one health check name or self_link. Only legacy google_compute_http_health_check is supported.`, - }, - - "instances": { - Type: resource_compute_target_pool_schema.TypeSet, - Optional: true, - Computed: true, - ForceNew: false, - Elem: &resource_compute_target_pool_schema.Schema{ - Type: resource_compute_target_pool_schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeInstanceRef(v.(string)) - }, - }, - Set: func(v interface{}) int { - return resource_compute_target_pool_schema.HashString(canonicalizeInstanceRef(v.(string))) - }, - Description: `List of instances in the pool. They can be given as URLs, or in the form of "zone/name". Note that the instances need not exist at the time of target pool creation, so there is no need to use the Terraform interpolators to create a dependency on the instances from the target pool.`, - }, - - "project": { - Type: resource_compute_target_pool_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "region": { - Type: resource_compute_target_pool_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `Where the target pool resides. Defaults to project region.`, - }, - - "self_link": { - Type: resource_compute_target_pool_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - "session_affinity": { - Type: resource_compute_target_pool_schema.TypeString, - Optional: true, - ForceNew: true, - Default: "NONE", - Description: `How to distribute load. Options are "NONE" (no affinity). "CLIENT_IP" (hash of the source/dest addresses / ports), and "CLIENT_IP_PROTO" also includes the protocol (default "NONE").`, - }, - }, - UseJSONNumber: true, - } -} - -func canonicalizeInstanceRef(instanceRef string) string { - - parts := instancesSelfLinkPattern.FindStringSubmatch(instanceRef) - - if len(parts) < 4 { - return instanceRef - } - - return resource_compute_target_pool_fmt.Sprintf("%s/%s", parts[2], parts[3]) - -} - -func convertHealthChecks(healthChecks []interface{}, d *resource_compute_target_pool_schema.ResourceData, config *Config) ([]string, error) { - if len(healthChecks) == 0 { - return []string{}, nil - } - - hc, err := ParseHttpHealthCheckFieldValue(healthChecks[0].(string), d, config) - if err != nil { - return nil, err - } - - return []string{hc.RelativeLink()}, nil -} - -func convertInstancesToUrls(d *resource_compute_target_pool_schema.ResourceData, config *Config, project string, names *resource_compute_target_pool_schema.Set) ([]string, error) { - urls := make([]string, len(names.List())) - for i, nameI := range names.List() { - name := nameI.(string) - - if resource_compute_target_pool_strings.HasPrefix(name, "https://") { - urls[i] = name - } else { - splitName := resource_compute_target_pool_strings.Split(name, "/") - if len(splitName) != 2 { - return nil, resource_compute_target_pool_fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) - } else { - url, err := replaceVars(d, config, resource_compute_target_pool_fmt.Sprintf( - "{{ComputeBasePath}}projects/%s/zones/%s/instances/%s", - project, splitName[0], splitName[1])) - if err != nil { - return nil, err - } - urls[i] = url - } - } - } - return urls, nil -} - -func resourceComputeTargetPoolCreate(d *resource_compute_target_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - hchkUrls, err := convertHealthChecks(d.Get("health_checks").([]interface{}), d, config) - if err != nil { - return err - } - - instanceUrls, err := convertInstancesToUrls(d, config, project, d.Get("instances").(*resource_compute_target_pool_schema.Set)) - if err != nil { - return err - } - - tpool := &resource_compute_target_pool_compute.TargetPool{ - BackupPool: d.Get("backup_pool").(string), - Description: d.Get("description").(string), - HealthChecks: hchkUrls, - Instances: instanceUrls, - Name: d.Get("name").(string), - SessionAffinity: d.Get("session_affinity").(string), - } - if d.Get("failover_ratio") != nil { - tpool.FailoverRatio = d.Get("failover_ratio").(float64) - } - resource_compute_target_pool_log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) - op, err := config.NewComputeClient(userAgent).TargetPools.Insert( - project, region, tpool).Do() - if err != nil { - if gerr, ok := err.(*resource_compute_target_pool_googleapi.Error); ok && gerr.Code == 404 && resource_compute_target_pool_strings.Contains(gerr.Message, "httpHealthChecks") { - return resource_compute_target_pool_fmt.Errorf("Health check %s is not a valid HTTP health check", d.Get("health_checks").([]interface{})[0]) - } - return resource_compute_target_pool_fmt.Errorf("Error creating TargetPool: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") - if err != nil { - return resource_compute_target_pool_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime(config, op, project, "Creating Target Pool", userAgent, d.Timeout(resource_compute_target_pool_schema.TimeoutCreate)) - if err != nil { - return err - } - return resourceComputeTargetPoolRead(d, meta) -} - -func resourceComputeTargetPoolUpdate(d *resource_compute_target_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - - d.Partial(true) - - if d.HasChange("health_checks") { - - from_, to_ := d.GetChange("health_checks") - fromUrls, err := convertHealthChecks(from_.([]interface{}), d, config) - if err != nil { - return err - } - toUrls, err := convertHealthChecks(to_.([]interface{}), d, config) - if err != nil { - return err - } - add, remove := calcAddRemove(fromUrls, toUrls) - - removeReq := &resource_compute_target_pool_compute.TargetPoolsRemoveHealthCheckRequest{ - HealthChecks: make([]*resource_compute_target_pool_compute.HealthCheckReference, len(remove)), - } - for i, v := range remove { - removeReq.HealthChecks[i] = &resource_compute_target_pool_compute.HealthCheckReference{HealthCheck: v} - } - op, err := config.NewComputeClient(userAgent).TargetPools.RemoveHealthCheck( - project, region, name, removeReq).Do() - if err != nil { - return resource_compute_target_pool_fmt.Errorf("Error updating health_check: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(resource_compute_target_pool_schema.TimeoutUpdate)) - if err != nil { - return err - } - addReq := &resource_compute_target_pool_compute.TargetPoolsAddHealthCheckRequest{ - HealthChecks: make([]*resource_compute_target_pool_compute.HealthCheckReference, len(add)), - } - for i, v := range add { - addReq.HealthChecks[i] = &resource_compute_target_pool_compute.HealthCheckReference{HealthCheck: v} - } - op, err = config.NewComputeClient(userAgent).TargetPools.AddHealthCheck( - project, region, name, addReq).Do() - if err != nil { - return resource_compute_target_pool_fmt.Errorf("Error updating health_check: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(resource_compute_target_pool_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - if d.HasChange("instances") { - - old_, new_ := d.GetChange("instances") - old := old_.(*resource_compute_target_pool_schema.Set) - new := new_.(*resource_compute_target_pool_schema.Set) - - addUrls, err := convertInstancesToUrls(d, config, project, new.Difference(old)) - if err != nil { - return err - } - removeUrls, err := convertInstancesToUrls(d, config, project, old.Difference(new)) - if err != nil { - return err - } - - addReq := &resource_compute_target_pool_compute.TargetPoolsAddInstanceRequest{ - Instances: make([]*resource_compute_target_pool_compute.InstanceReference, len(addUrls)), - } - for i, v := range addUrls { - addReq.Instances[i] = &resource_compute_target_pool_compute.InstanceReference{Instance: v} - } - op, err := config.NewComputeClient(userAgent).TargetPools.AddInstance( - project, region, name, addReq).Do() - if err != nil { - return resource_compute_target_pool_fmt.Errorf("Error updating instances: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(resource_compute_target_pool_schema.TimeoutUpdate)) - if err != nil { - return err - } - removeReq := &resource_compute_target_pool_compute.TargetPoolsRemoveInstanceRequest{ - Instances: make([]*resource_compute_target_pool_compute.InstanceReference, len(removeUrls)), - } - for i, v := range removeUrls { - removeReq.Instances[i] = &resource_compute_target_pool_compute.InstanceReference{Instance: v} - } - op, err = config.NewComputeClient(userAgent).TargetPools.RemoveInstance( - project, region, name, removeReq).Do() - if err != nil { - return resource_compute_target_pool_fmt.Errorf("Error updating instances: %s", err) - } - err = computeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(resource_compute_target_pool_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - if d.HasChange("backup_pool") { - bpool_name := d.Get("backup_pool").(string) - tref := &resource_compute_target_pool_compute.TargetReference{ - Target: bpool_name, - } - op, err := config.NewComputeClient(userAgent).TargetPools.SetBackup( - project, region, name, tref).Do() - if err != nil { - return resource_compute_target_pool_fmt.Errorf("Error updating backup_pool: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(resource_compute_target_pool_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetPoolRead(d, meta) -} - -func convertInstancesFromUrls(urls []string) []string { - result := make([]string, 0, len(urls)) - for _, url := range urls { - urlArray := resource_compute_target_pool_strings.Split(url, "/") - instance := resource_compute_target_pool_fmt.Sprintf("%s/%s", urlArray[len(urlArray)-3], urlArray[len(urlArray)-1]) - result = append(result, instance) - } - return result -} - -func resourceComputeTargetPoolRead(d *resource_compute_target_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - tpool, err := config.NewComputeClient(userAgent).TargetPools.Get( - project, region, d.Get("name").(string)).Do() - if err != nil { - return handleNotFoundError(err, d, resource_compute_target_pool_fmt.Sprintf("Target Pool %q", d.Get("name").(string))) - } - - if err := d.Set("self_link", tpool.SelfLink); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("backup_pool", tpool.BackupPool); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting backup_pool: %s", err) - } - if err := d.Set("description", tpool.Description); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("failover_ratio", tpool.FailoverRatio); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting failover_ratio: %s", err) - } - if err := d.Set("health_checks", tpool.HealthChecks); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting health_checks: %s", err) - } - if tpool.Instances != nil { - if err := d.Set("instances", convertInstancesFromUrls(tpool.Instances)); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting instances: %s", err) - } - } else { - if err := d.Set("instances", nil); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting instances: %s", err) - } - } - if err := d.Set("name", tpool.Name); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("region", GetResourceNameFromSelfLink(tpool.Region)); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("session_affinity", tpool.SessionAffinity); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting session_affinity: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_compute_target_pool_fmt.Errorf("Error setting project: %s", err) - } - return nil -} - -func resourceComputeTargetPoolDelete(d *resource_compute_target_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - op, err := config.NewComputeClient(userAgent).TargetPools.Delete( - project, region, d.Get("name").(string)).Do() - if err != nil { - return resource_compute_target_pool_fmt.Errorf("Error deleting TargetPool: %s", err) - } - - err = computeOperationWaitTime(config, op, project, "Deleting Target Pool", userAgent, d.Timeout(resource_compute_target_pool_schema.TimeoutDelete)) - if err != nil { - return err - } - d.SetId("") - return nil -} - -func resourceTargetPoolStateImporter(d *resource_compute_target_pool_schema.ResourceData, meta interface{}) ([]*resource_compute_target_pool_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetPools/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetPools/{{name}}") - if err != nil { - return nil, resource_compute_target_pool_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_target_pool_schema.ResourceData{d}, nil -} - -func resourceComputeTargetSslProxy() *resource_compute_target_ssl_proxy_schema.Resource { - return &resource_compute_target_ssl_proxy_schema.Resource{ - Create: resourceComputeTargetSslProxyCreate, - Read: resourceComputeTargetSslProxyRead, - Update: resourceComputeTargetSslProxyUpdate, - Delete: resourceComputeTargetSslProxyDelete, - - Importer: &resource_compute_target_ssl_proxy_schema.ResourceImporter{ - State: resourceComputeTargetSslProxyImport, - }, - - Timeouts: &resource_compute_target_ssl_proxy_schema.ResourceTimeout{ - Create: resource_compute_target_ssl_proxy_schema.DefaultTimeout(4 * resource_compute_target_ssl_proxy_time.Minute), - Update: resource_compute_target_ssl_proxy_schema.DefaultTimeout(4 * resource_compute_target_ssl_proxy_time.Minute), - Delete: resource_compute_target_ssl_proxy_schema.DefaultTimeout(4 * resource_compute_target_ssl_proxy_time.Minute), - }, - - Schema: map[string]*resource_compute_target_ssl_proxy_schema.Schema{ - "backend_service": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the BackendService resource.`, - }, - "name": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "ssl_certificates": { - Type: resource_compute_target_ssl_proxy_schema.TypeList, - Required: true, - Description: `A list of SslCertificate resources that are used to authenticate -connections between users and the load balancer. At least one -SSL certificate must be specified.`, - Elem: &resource_compute_target_ssl_proxy_schema.Schema{ - Type: resource_compute_target_ssl_proxy_schema.TypeString, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - }, - "description": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_header": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_target_ssl_proxy_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to -the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - }, - "ssl_policy": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the SslPolicy resource that will be associated with -the TargetSslProxy resource. If not set, the TargetSslProxy -resource will not have any SSL policy configured.`, - }, - "creation_timestamp": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: resource_compute_target_ssl_proxy_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_target_ssl_proxy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetSslProxyCreate(d *resource_compute_target_ssl_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetSslProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetSslProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - proxyHeaderProp, err := expandComputeTargetSslProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(proxyHeaderProp)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - serviceProp, err := expandComputeTargetSslProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(serviceProp)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - sslCertificatesProp, err := expandComputeTargetSslProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(sslCertificatesProp)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - sslPolicyProp, err := expandComputeTargetSslProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(sslPolicyProp)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies") - if err != nil { - return err - } - - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Creating new TargetSslProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error creating TargetSslProxy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating TargetSslProxy", userAgent, - d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_target_ssl_proxy_fmt.Errorf("Error waiting to create TargetSslProxy: %s", err) - } - - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Finished creating TargetSslProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetSslProxyRead(d, meta) -} - -func resourceComputeTargetSslProxyRead(d *resource_compute_target_ssl_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_target_ssl_proxy_fmt.Sprintf("ComputeTargetSslProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetSslProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetSslProxyDescription(res["description"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetSslProxyProxyId(res["id"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetSslProxyName(res["name"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("proxy_header", flattenComputeTargetSslProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("backend_service", flattenComputeTargetSslProxyBackendService(res["service"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("ssl_certificates", flattenComputeTargetSslProxySslCertificates(res["sslCertificates"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("ssl_policy", flattenComputeTargetSslProxySslPolicy(res["sslPolicy"], d, config)); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error reading TargetSslProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetSslProxyUpdate(d *resource_compute_target_ssl_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("proxy_header") { - obj := make(map[string]interface{}) - - proxyHeaderProp, err := expandComputeTargetSslProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setProxyHeader") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("backend_service") { - obj := make(map[string]interface{}) - - serviceProp, err := expandComputeTargetSslProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setBackendService") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_certificates") { - obj := make(map[string]interface{}) - - sslCertificatesProp, err := expandComputeTargetSslProxySslCertificates(d.Get("ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_certificates"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, sslCertificatesProp)) { - obj["sslCertificates"] = sslCertificatesProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setSslCertificates") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("ssl_policy") { - obj := make(map[string]interface{}) - - sslPolicyProp, err := expandComputeTargetSslProxySslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(resource_compute_target_ssl_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_ssl_proxy_reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}/setSslPolicy") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error updating TargetSslProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Finished updating TargetSslProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetSslProxy", userAgent, - d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetSslProxyRead(d, meta) -} - -func resourceComputeTargetSslProxyDelete(d *resource_compute_target_ssl_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_ssl_proxy_fmt.Errorf("Error fetching project for TargetSslProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Deleting TargetSslProxy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetSslProxy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting TargetSslProxy", userAgent, - d.Timeout(resource_compute_target_ssl_proxy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_target_ssl_proxy_log.Printf("[DEBUG] Finished deleting TargetSslProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetSslProxyImport(d *resource_compute_target_ssl_proxy_schema.ResourceData, meta interface{}) ([]*resource_compute_target_ssl_proxy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetSslProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetSslProxies/{{name}}") - if err != nil { - return nil, resource_compute_target_ssl_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_target_ssl_proxy_schema.ResourceData{d}, nil -} - -func flattenComputeTargetSslProxyCreationTimestamp(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyDescription(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyProxyId(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_target_ssl_proxy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeTargetSslProxyName(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyProxyHeader(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetSslProxyBackendService(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetSslProxySslCertificates(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) -} - -func flattenComputeTargetSslProxySslPolicy(v interface{}, d *resource_compute_target_ssl_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandComputeTargetSslProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetSslProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetSslProxyProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetSslProxyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_ssl_proxy_fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetSslProxySslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - return nil, resource_compute_target_ssl_proxy_fmt.Errorf("Invalid value for ssl_certificates: nil") - } - f, err := parseGlobalFieldValue("sslCertificates", raw.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_ssl_proxy_fmt.Errorf("Invalid value for ssl_certificates: %s", err) - } - req = append(req, f.RelativeLink()) - } - return req, nil -} - -func expandComputeTargetSslProxySslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("sslPolicies", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_ssl_proxy_fmt.Errorf("Invalid value for ssl_policy: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeTargetTcpProxy() *resource_compute_target_tcp_proxy_schema.Resource { - return &resource_compute_target_tcp_proxy_schema.Resource{ - Create: resourceComputeTargetTcpProxyCreate, - Read: resourceComputeTargetTcpProxyRead, - Update: resourceComputeTargetTcpProxyUpdate, - Delete: resourceComputeTargetTcpProxyDelete, - - Importer: &resource_compute_target_tcp_proxy_schema.ResourceImporter{ - State: resourceComputeTargetTcpProxyImport, - }, - - Timeouts: &resource_compute_target_tcp_proxy_schema.ResourceTimeout{ - Create: resource_compute_target_tcp_proxy_schema.DefaultTimeout(4 * resource_compute_target_tcp_proxy_time.Minute), - Update: resource_compute_target_tcp_proxy_schema.DefaultTimeout(4 * resource_compute_target_tcp_proxy_time.Minute), - Delete: resource_compute_target_tcp_proxy_schema.DefaultTimeout(4 * resource_compute_target_tcp_proxy_time.Minute), - }, - - Schema: map[string]*resource_compute_target_tcp_proxy_schema.Schema{ - "backend_service": { - Type: resource_compute_target_tcp_proxy_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the BackendService resource.`, - }, - "name": { - Type: resource_compute_target_tcp_proxy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and match -the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the -first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "description": { - Type: resource_compute_target_tcp_proxy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "proxy_bind": { - Type: resource_compute_target_tcp_proxy_schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `This field only applies when the forwarding rule that references -this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - }, - "proxy_header": { - Type: resource_compute_target_tcp_proxy_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_target_tcp_proxy_validation.StringInSlice([]string{"NONE", "PROXY_V1", ""}, false), - Description: `Specifies the type of proxy header to append before sending data to -the backend. Default value: "NONE" Possible values: ["NONE", "PROXY_V1"]`, - Default: "NONE", - }, - "creation_timestamp": { - Type: resource_compute_target_tcp_proxy_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "proxy_id": { - Type: resource_compute_target_tcp_proxy_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_target_tcp_proxy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_target_tcp_proxy_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeTargetTcpProxyCreate(d *resource_compute_target_tcp_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeTargetTcpProxyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_target_tcp_proxy_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_target_tcp_proxy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeTargetTcpProxyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_target_tcp_proxy_reflect.ValueOf(nameProp)) && (ok || !resource_compute_target_tcp_proxy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - proxyHeaderProp, err := expandComputeTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(resource_compute_target_tcp_proxy_reflect.ValueOf(proxyHeaderProp)) && (ok || !resource_compute_target_tcp_proxy_reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - serviceProp, err := expandComputeTargetTcpProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(resource_compute_target_tcp_proxy_reflect.ValueOf(serviceProp)) && (ok || !resource_compute_target_tcp_proxy_reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - proxyBindProp, err := expandComputeTargetTcpProxyProxyBind(d.Get("proxy_bind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_bind"); !isEmptyValue(resource_compute_target_tcp_proxy_reflect.ValueOf(proxyBindProp)) && (ok || !resource_compute_target_tcp_proxy_reflect.DeepEqual(v, proxyBindProp)) { - obj["proxyBind"] = proxyBindProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies") - if err != nil { - return err - } - - resource_compute_target_tcp_proxy_log.Printf("[DEBUG] Creating new TargetTcpProxy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutCreate)) - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error creating TargetTcpProxy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating TargetTcpProxy", userAgent, - d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_target_tcp_proxy_fmt.Errorf("Error waiting to create TargetTcpProxy: %s", err) - } - - resource_compute_target_tcp_proxy_log.Printf("[DEBUG] Finished creating TargetTcpProxy %q: %#v", d.Id(), res) - - return resourceComputeTargetTcpProxyRead(d, meta) -} - -func resourceComputeTargetTcpProxyRead(d *resource_compute_target_tcp_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_target_tcp_proxy_fmt.Sprintf("ComputeTargetTcpProxy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeTargetTcpProxyCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("description", flattenComputeTargetTcpProxyDescription(res["description"], d, config)); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("proxy_id", flattenComputeTargetTcpProxyProxyId(res["id"], d, config)); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("name", flattenComputeTargetTcpProxyName(res["name"], d, config)); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("proxy_header", flattenComputeTargetTcpProxyProxyHeader(res["proxyHeader"], d, config)); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("backend_service", flattenComputeTargetTcpProxyBackendService(res["service"], d, config)); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("proxy_bind", flattenComputeTargetTcpProxyProxyBind(res["proxyBind"], d, config)); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error reading TargetTcpProxy: %s", err) - } - - return nil -} - -func resourceComputeTargetTcpProxyUpdate(d *resource_compute_target_tcp_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("proxy_header") { - obj := make(map[string]interface{}) - - proxyHeaderProp, err := expandComputeTargetTcpProxyProxyHeader(d.Get("proxy_header"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("proxy_header"); !isEmptyValue(resource_compute_target_tcp_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_tcp_proxy_reflect.DeepEqual(v, proxyHeaderProp)) { - obj["proxyHeader"] = proxyHeaderProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}/setProxyHeader") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error updating TargetTcpProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_tcp_proxy_log.Printf("[DEBUG] Finished updating TargetTcpProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetTcpProxy", userAgent, - d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("backend_service") { - obj := make(map[string]interface{}) - - serviceProp, err := expandComputeTargetTcpProxyBackendService(d.Get("backend_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("backend_service"); !isEmptyValue(resource_compute_target_tcp_proxy_reflect.ValueOf(v)) && (ok || !resource_compute_target_tcp_proxy_reflect.DeepEqual(v, serviceProp)) { - obj["service"] = serviceProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}/setBackendService") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutUpdate)) - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error updating TargetTcpProxy %q: %s", d.Id(), err) - } else { - resource_compute_target_tcp_proxy_log.Printf("[DEBUG] Finished updating TargetTcpProxy %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating TargetTcpProxy", userAgent, - d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceComputeTargetTcpProxyRead(d, meta) -} - -func resourceComputeTargetTcpProxyDelete(d *resource_compute_target_tcp_proxy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_target_tcp_proxy_fmt.Errorf("Error fetching project for TargetTcpProxy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_target_tcp_proxy_log.Printf("[DEBUG] Deleting TargetTcpProxy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TargetTcpProxy") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting TargetTcpProxy", userAgent, - d.Timeout(resource_compute_target_tcp_proxy_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_target_tcp_proxy_log.Printf("[DEBUG] Finished deleting TargetTcpProxy %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeTargetTcpProxyImport(d *resource_compute_target_tcp_proxy_schema.ResourceData, meta interface{}) ([]*resource_compute_target_tcp_proxy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/targetTcpProxies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/targetTcpProxies/{{name}}") - if err != nil { - return nil, resource_compute_target_tcp_proxy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_target_tcp_proxy_schema.ResourceData{d}, nil -} - -func flattenComputeTargetTcpProxyCreationTimestamp(v interface{}, d *resource_compute_target_tcp_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyDescription(v interface{}, d *resource_compute_target_tcp_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyProxyId(v interface{}, d *resource_compute_target_tcp_proxy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_target_tcp_proxy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeTargetTcpProxyName(v interface{}, d *resource_compute_target_tcp_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyProxyHeader(v interface{}, d *resource_compute_target_tcp_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeTargetTcpProxyBackendService(v interface{}, d *resource_compute_target_tcp_proxy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeTargetTcpProxyProxyBind(v interface{}, d *resource_compute_target_tcp_proxy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeTargetTcpProxyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetTcpProxyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetTcpProxyProxyHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeTargetTcpProxyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_target_tcp_proxy_fmt.Errorf("Invalid value for backend_service: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeTargetTcpProxyProxyBind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeUrlMap() *resource_compute_url_map_schema.Resource { - return &resource_compute_url_map_schema.Resource{ - Create: resourceComputeUrlMapCreate, - Read: resourceComputeUrlMapRead, - Update: resourceComputeUrlMapUpdate, - Delete: resourceComputeUrlMapDelete, - - Importer: &resource_compute_url_map_schema.ResourceImporter{ - State: resourceComputeUrlMapImport, - }, - - Timeouts: &resource_compute_url_map_schema.ResourceTimeout{ - Create: resource_compute_url_map_schema.DefaultTimeout(4 * resource_compute_url_map_time.Minute), - Update: resource_compute_url_map_schema.DefaultTimeout(4 * resource_compute_url_map_time.Minute), - Delete: resource_compute_url_map_schema.DefaultTimeout(4 * resource_compute_url_map_time.Minute), - }, - - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is created. The -name must be 1-63 characters long, and comply with RFC1035. Specifically, the -name must be 1-63 characters long and match the regular expression -'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase -letter, and all following characters must be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "default_route_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions -like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. -If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService -is set, defaultRouteAction cannot contain any weightedBackendServices. - -Only one of defaultRouteAction or defaultUrlRedirect must be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "cors_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for allowing client side cross-origin requests. Please see -[W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/)`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "allow_credentials": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `In response to a preflight request, setting this to true indicates that the actual request can include user credentials. -This translates to the Access-Control-Allow-Credentials header.`, - Default: false, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - "allow_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - "allow_methods": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Methods header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - "allow_origin_regexes": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the regular expression patterns that match allowed origins. For regular expression grammar -please see en.cppreference.com/w/cpp/regex/ecmascript -An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - "allow_origins": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the list of origins that will be allowed to do CORS requests. -An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - "disabled": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect.`, - Default: false, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - "expose_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Expose-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - "max_age": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies how long results of a preflight request can be cached in seconds. -This translates to the Access-Control-Max-Age header.`, - AtLeastOneOf: []string{"default_route_action.0.cors_policy.0.allow_origins", "default_route_action.0.cors_policy.0.allow_origin_regexes", "default_route_action.0.cors_policy.0.allow_methods", "default_route_action.0.cors_policy.0.allow_headers", "default_route_action.0.cors_policy.0.expose_headers", "default_route_action.0.cors_policy.0.max_age", "default_route_action.0.cors_policy.0.allow_credentials", "default_route_action.0.cors_policy.0.disabled"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.weighted_backend_services", "default_route_action.0.url_rewrite", "default_route_action.0.timeout", "default_route_action.0.retry_policy", "default_route_action.0.request_mirror_policy", "default_route_action.0.cors_policy", "default_route_action.0.fault_injection_policy"}, - }, - "fault_injection_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. -As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a -percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted -by the Loadbalancer for a percentage of requests. - -timeout and retryPolicy will be ignored by clients that are configured with a faultInjectionPolicy.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "abort": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are aborted as part of fault injection.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "http_status": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.IntBetween(200, 599), - Description: `The HTTP status code used to abort the request. -The value must be between 200 and 599 inclusive.`, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.abort.0.http_status", "default_route_action.0.fault_injection_policy.0.abort.0.percentage"}, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.FloatBetween(0, 100), - Description: `The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. -The value must be between 0.0 and 100.0 inclusive.`, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.abort.0.http_status", "default_route_action.0.fault_injection_policy.0.abort.0.percentage"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.delay", "default_route_action.0.fault_injection_policy.0.abort"}, - }, - "delay": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are delayed as part of fault injection, before being sent to a backend service.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "fixed_delay": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the value of the fixed delay interval.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are -represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive.`, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.seconds", "default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.nanos"}, - }, - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. -Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years`, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.seconds", "default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.nanos"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay", "default_route_action.0.fault_injection_policy.0.delay.0.percentage"}, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.FloatBetween(0, 100), - Description: `The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. -The value must be between 0.0 and 100.0 inclusive.`, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay", "default_route_action.0.fault_injection_policy.0.delay.0.percentage"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.fault_injection_policy.0.delay", "default_route_action.0.fault_injection_policy.0.abort"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.weighted_backend_services", "default_route_action.0.url_rewrite", "default_route_action.0.timeout", "default_route_action.0.retry_policy", "default_route_action.0.request_mirror_policy", "default_route_action.0.cors_policy", "default_route_action.0.fault_injection_policy"}, - }, - "request_mirror_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. -Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, -the host / authority header is suffixed with -shadow.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full or partial URL to the BackendService resource being mirrored to.`, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.weighted_backend_services", "default_route_action.0.url_rewrite", "default_route_action.0.timeout", "default_route_action.0.retry_policy", "default_route_action.0.request_mirror_policy", "default_route_action.0.cors_policy", "default_route_action.0.fault_injection_policy"}, - }, - "retry_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the retry policy associated with this route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "num_retries": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.IntAtLeast(1), - Description: `Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1.`, - Default: 1, - AtLeastOneOf: []string{"default_route_action.0.retry_policy.0.retry_conditions", "default_route_action.0.retry_policy.0.num_retries", "default_route_action.0.retry_policy.0.per_try_timeout"}, - }, - "per_try_timeout": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a non-zero timeout per retry attempt. - -If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, -will use the largest timeout among all backend services associated with the route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are -represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive.`, - AtLeastOneOf: []string{"default_route_action.0.retry_policy.0.per_try_timeout.0.seconds", "default_route_action.0.retry_policy.0.per_try_timeout.0.nanos"}, - }, - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. -Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years`, - AtLeastOneOf: []string{"default_route_action.0.retry_policy.0.per_try_timeout.0.seconds", "default_route_action.0.retry_policy.0.per_try_timeout.0.nanos"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.retry_policy.0.retry_conditions", "default_route_action.0.retry_policy.0.num_retries", "default_route_action.0.retry_policy.0.per_try_timeout"}, - }, - "retry_conditions": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specfies one or more conditions when this retry rule applies. Valid values are: - -* 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, - or if the backend service does not respond at all, example: disconnects, reset, read timeout, -* connection failure, and refused streams. -* gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. -* connect-failure: Loadbalancer will retry on failures connecting to backend services, - for example due to connection timeouts. -* retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. - Currently the only retriable error supported is 409. -* refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. - This reset type indicates that it is safe to retry. -* cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled -* deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded -* resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted -* unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"default_route_action.0.retry_policy.0.retry_conditions", "default_route_action.0.retry_policy.0.num_retries", "default_route_action.0.retry_policy.0.per_try_timeout"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.weighted_backend_services", "default_route_action.0.url_rewrite", "default_route_action.0.timeout", "default_route_action.0.retry_policy", "default_route_action.0.request_mirror_policy", "default_route_action.0.cors_policy", "default_route_action.0.fault_injection_policy"}, - }, - "timeout": { - Type: resource_compute_url_map_schema.TypeList, - Computed: true, - Optional: true, - Description: `Specifies the timeout for the selected route. Timeout is computed from the time the request has been -fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries. - -If not specified, will use the largest timeout among all backend services associated with the route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented -with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive.`, - AtLeastOneOf: []string{"default_route_action.0.timeout.0.seconds", "default_route_action.0.timeout.0.nanos"}, - }, - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. -Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years`, - AtLeastOneOf: []string{"default_route_action.0.timeout.0.seconds", "default_route_action.0.timeout.0.nanos"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.weighted_backend_services", "default_route_action.0.url_rewrite", "default_route_action.0.timeout", "default_route_action.0.retry_policy", "default_route_action.0.request_mirror_policy", "default_route_action.0.cors_policy", "default_route_action.0.fault_injection_policy"}, - }, - "url_rewrite": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The spec to modify the URL of the request, prior to forwarding the request to the matched service.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "host_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected service, the request's host header is replaced -with contents of hostRewrite. - -The value must be between 1 and 255 characters.`, - AtLeastOneOf: []string{"default_route_action.0.url_rewrite.0.path_prefix_rewrite", "default_route_action.0.url_rewrite.0.host_rewrite"}, - }, - "path_prefix_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected backend service, the matching portion of the -request's path is replaced by pathPrefixRewrite. - -The value must be between 1 and 1024 characters.`, - AtLeastOneOf: []string{"default_route_action.0.url_rewrite.0.path_prefix_rewrite", "default_route_action.0.url_rewrite.0.host_rewrite"}, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.weighted_backend_services", "default_route_action.0.url_rewrite", "default_route_action.0.timeout", "default_route_action.0.retry_policy", "default_route_action.0.request_mirror_policy", "default_route_action.0.cors_policy", "default_route_action.0.fault_injection_policy"}, - }, - "weighted_backend_services": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of weighted backend services to send traffic to when a route match occurs. -The weights determine the fraction of traffic that flows to their corresponding backend service. -If all traffic needs to go to a single backend service, there must be one weightedBackendService -with weight set to a non 0 number. - -Once a backendService is identified and before forwarding the request to the backend service, -advanced routing actions like Url rewrites and header transformations are applied depending on -additional settings specified in this HttpRouteAction.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full or partial URL to the default BackendService resource. Before forwarding the -request to backendService, the loadbalancer applies any relevant headerActions -specified as part of this backendServiceWeight.`, - }, - "header_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. - -headerAction specified here take effect before headerAction in the enclosing -HttpRouteRule, PathMatcher and UrlMap.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The name of the header to add.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If false, headerValue is appended to any values that already exist for the header. -If true, headerValue is set for the header, discarding any values that were set for that header.`, - Default: false, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request prior to -forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The name of the header to add.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If false, headerValue is appended to any values that already exist for the header. -If true, headerValue is set for the header, discarding any values that were set for that header.`, - Default: false, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response prior to sending the -response back to the client.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "weight": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.IntBetween(0, 1000), - Description: `Specifies the fraction of traffic sent to backendService, computed as -weight / (sum of all weightedBackendService weights in routeAction) . - -The selection of a backend service is determined only for new traffic. Once a user's request -has been directed to a backendService, subsequent requests will be sent to the same backendService -as determined by the BackendService's session affinity policy. - -The value must be between 0 and 1000`, - }, - }, - }, - AtLeastOneOf: []string{"default_route_action.0.weighted_backend_services", "default_route_action.0.url_rewrite", "default_route_action.0.timeout", "default_route_action.0.retry_policy", "default_route_action.0.request_mirror_policy", "default_route_action.0.cors_policy", "default_route_action.0.fault_injection_policy"}, - ExactlyOneOf: []string{"default_service", "default_url_redirect", "default_route_action.0.weighted_backend_services"}, - }, - }, - }, - ConflictsWith: []string{"default_url_redirect"}, - }, - "default_service": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend service or backend bucket to use when none of the given rules match.`, - ExactlyOneOf: []string{"default_service", "default_url_redirect", "default_route_action.0.weighted_backend_services"}, - }, - "default_url_redirect": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `When none of the specified hostRules match, the request is redirected to a URL specified -by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or -defaultRouteAction must not be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "strip_query": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If set to true, any accompanying query portion of the original URL is removed prior -to redirecting the request. If set to false, the query portion of the original URL is -retained. The default is set to false. - This field is required to ensure an empty block is not set. The normal default value is false.`, - }, - "host_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one that was -supplied in the request. The value must be between 1 and 255 characters.`, - }, - "https_redirect": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. If set to -false, the URL scheme of the redirected request will remain the same as that of the -request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this -true for TargetHttpsProxy is not permitted. The default is set to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one that was -supplied in the request. pathRedirect cannot be supplied together with -prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the -original request will be used for the redirect. The value must be between 1 and 1024 -characters.`, - }, - "prefix_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, -retaining the remaining portion of the URL before redirecting the request. -prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or -neither. If neither is supplied, the path of the original request will be used for -the redirect. The value must be between 1 and 1024 characters.`, - }, - "redirect_response_code": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method -will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, -the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - }, - }, - ConflictsWith: []string{"default_route_action"}, - ExactlyOneOf: []string{"default_service", "default_url_redirect", "default_route_action.0.weighted_backend_services"}, - }, - "description": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when you create -the resource.`, - }, - "header_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. The headerAction specified here take effect after -headerAction specified under pathMatcher.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - AtLeastOneOf: []string{"header_action.0.request_headers_to_add", "header_action.0.request_headers_to_remove", "header_action.0.response_headers_to_add", "header_action.0.response_headers_to_remove"}, - }, - "request_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"header_action.0.request_headers_to_add", "header_action.0.request_headers_to_remove", "header_action.0.response_headers_to_add", "header_action.0.response_headers_to_remove"}, - }, - "response_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - AtLeastOneOf: []string{"header_action.0.request_headers_to_add", "header_action.0.request_headers_to_remove", "header_action.0.response_headers_to_add", "header_action.0.response_headers_to_remove"}, - }, - "response_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - AtLeastOneOf: []string{"header_action.0.request_headers_to_add", "header_action.0.request_headers_to_remove", "header_action.0.response_headers_to_add", "header_action.0.response_headers_to_remove"}, - }, - }, - }, - }, - "host_rule": { - Type: resource_compute_url_map_schema.TypeSet, - Optional: true, - Description: `The list of HostRules to use against the URL.`, - Elem: computeUrlMapHostRuleSchema(), - }, - "path_matcher": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The list of named PathMatchers to use against the URL.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name to which this PathMatcher is referred by the HostRule.`, - }, - "default_route_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs -advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request -to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. -Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. - -Only one of defaultRouteAction or defaultUrlRedirect must be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "cors_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for allowing client side cross-origin requests. Please see -[W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/)`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "allow_credentials": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `In response to a preflight request, setting this to true indicates that the actual request can include user credentials. -This translates to the Access-Control-Allow-Credentials header.`, - Default: false, - }, - "allow_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_methods": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Methods header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_origin_regexes": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the regular expression patterns that match allowed origins. For regular expression grammar -please see en.cppreference.com/w/cpp/regex/ecmascript -An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_origins": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the list of origins that will be allowed to do CORS requests. -An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "disabled": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect.`, - Default: false, - }, - "expose_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Expose-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "max_age": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies how long results of a preflight request can be cached in seconds. -This translates to the Access-Control-Max-Age header.`, - }, - }, - }, - }, - "fault_injection_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. -As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a -percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted -by the Loadbalancer for a percentage of requests. - -timeout and retryPolicy will be ignored by clients that are configured with a faultInjectionPolicy.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "abort": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are aborted as part of fault injection.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "http_status": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.IntBetween(200, 599), - Description: `The HTTP status code used to abort the request. -The value must be between 200 and 599 inclusive.`, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.FloatBetween(0, 100), - Description: `The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. -The value must be between 0.0 and 100.0 inclusive.`, - }, - }, - }, - }, - "delay": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are delayed as part of fault injection, before being sent to a backend service.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "fixed_delay": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the value of the fixed delay interval.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are -represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive.`, - }, - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. -Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years`, - }, - }, - }, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.FloatBetween(0, 100), - Description: `The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. -The value must be between 0.0 and 100.0 inclusive.`, - }, - }, - }, - }, - }, - }, - }, - "request_mirror_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. -Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, -the host / authority header is suffixed with -shadow.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full or partial URL to the BackendService resource being mirrored to.`, - }, - }, - }, - }, - "retry_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the retry policy associated with this route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "num_retries": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.IntAtLeast(1), - Description: `Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1.`, - Default: 1, - }, - "per_try_timeout": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a non-zero timeout per retry attempt. - -If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, -will use the largest timeout among all backend services associated with the route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are -represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive.`, - }, - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. -Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years`, - }, - }, - }, - }, - "retry_conditions": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specfies one or more conditions when this retry rule applies. Valid values are: - -* 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, - or if the backend service does not respond at all, example: disconnects, reset, read timeout, -* connection failure, and refused streams. -* gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. -* connect-failure: Loadbalancer will retry on failures connecting to backend services, - for example due to connection timeouts. -* retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. - Currently the only retriable error supported is 409. -* refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. - This reset type indicates that it is safe to retry. -* cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled -* deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded -* resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted -* unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "timeout": { - Type: resource_compute_url_map_schema.TypeList, - Computed: true, - Optional: true, - Description: `Specifies the timeout for the selected route. Timeout is computed from the time the request has been -fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries. - -If not specified, will use the largest timeout among all backend services associated with the route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented -with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive.`, - }, - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. -Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years`, - }, - }, - }, - }, - "url_rewrite": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The spec to modify the URL of the request, prior to forwarding the request to the matched service.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "host_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected service, the request's host header is replaced -with contents of hostRewrite. - -The value must be between 1 and 255 characters.`, - }, - "path_prefix_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected backend service, the matching portion of the -request's path is replaced by pathPrefixRewrite. - -The value must be between 1 and 1024 characters.`, - }, - }, - }, - }, - "weighted_backend_services": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of weighted backend services to send traffic to when a route match occurs. -The weights determine the fraction of traffic that flows to their corresponding backend service. -If all traffic needs to go to a single backend service, there must be one weightedBackendService -with weight set to a non 0 number. - -Once a backendService is identified and before forwarding the request to the backend service, -advanced routing actions like Url rewrites and header transformations are applied depending on -additional settings specified in this HttpRouteAction.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full or partial URL to the default BackendService resource. Before forwarding the -request to backendService, the loadbalancer applies any relevant headerActions -specified as part of this backendServiceWeight.`, - }, - "header_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. - -headerAction specified here take effect before headerAction in the enclosing -HttpRouteRule, PathMatcher and UrlMap.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The name of the header to add.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If false, headerValue is appended to any values that already exist for the header. -If true, headerValue is set for the header, discarding any values that were set for that header.`, - Default: false, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request prior to -forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The name of the header to add.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If false, headerValue is appended to any values that already exist for the header. -If true, headerValue is set for the header, discarding any values that were set for that header.`, - Default: false, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response prior to sending the -response back to the client.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "weight": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.IntBetween(0, 1000), - Description: `Specifies the fraction of traffic sent to backendService, computed as -weight / (sum of all weightedBackendService weights in routeAction) . - -The selection of a backend service is determined only for new traffic. Once a user's request -has been directed to a backendService, subsequent requests will be sent to the same backendService -as determined by the BackendService's session affinity policy. - -The value must be between 0 and 1000`, - }, - }, - }, - }, - }, - }, - }, - "default_service": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend service or backend bucket to use when none of the given paths match.`, - }, - "default_url_redirect": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `When none of the specified hostRules match, the request is redirected to a URL specified -by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or -defaultRouteAction must not be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "strip_query": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If set to true, any accompanying query portion of the original URL is removed prior -to redirecting the request. If set to false, the query portion of the original URL is -retained. - This field is required to ensure an empty block is not set. The normal default value is false.`, - }, - "host_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one that was -supplied in the request. The value must be between 1 and 255 characters.`, - }, - "https_redirect": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. If set to -false, the URL scheme of the redirected request will remain the same as that of the -request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this -true for TargetHttpsProxy is not permitted. The default is set to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one that was -supplied in the request. pathRedirect cannot be supplied together with -prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the -original request will be used for the redirect. The value must be between 1 and 1024 -characters.`, - }, - "prefix_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, -retaining the remaining portion of the URL before redirecting the request. -prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or -neither. If neither is supplied, the path of the original request will be used for -the redirect. The value must be between 1 and 1024 characters.`, - }, - "redirect_response_code": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method -will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, -the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - }, - }, - }, - "description": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when you create -the resource.`, - }, - "header_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. HeaderAction specified here are applied after the -matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "path_rule": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The list of path rules. Use this list instead of routeRules when routing based -on simple path matching is all that's required. The order by which path rules -are specified does not matter. Matches are always done on the longest-path-first -basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* -irrespective of the order in which those paths appear in this list. Within a -given pathMatcher, only one of pathRules or routeRules must be set.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "paths": { - Type: resource_compute_url_map_schema.TypeSet, - Required: true, - Description: `The list of path patterns to match. Each must start with / and the only place a -\* is allowed is at the end following a /. The string fed to the path matcher -does not include any text after the first ? or #, and those chars are not -allowed here.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - Set: resource_compute_url_map_schema.HashString, - }, - "route_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `In response to a matching path, the load balancer performs advanced routing -actions like URL rewrites, header transformations, etc. prior to forwarding the -request to the selected backend. If routeAction specifies any -weightedBackendServices, service must not be set. Conversely if service is set, -routeAction cannot contain any weightedBackendServices. Only one of routeAction -or urlRedirect must be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "cors_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for allowing client side cross-origin requests. Please see W3C -Recommendation for Cross Origin Resource Sharing`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "disabled": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If true, specifies the CORS policy is disabled.`, - }, - "allow_credentials": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `In response to a preflight request, setting this to true indicates that the -actual request can include user credentials. This translates to the Access- -Control-Allow-Credentials header. Defaults to false.`, - Default: false, - }, - "allow_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_methods": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Methods header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_origin_regexes": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the regular expression patterns that match allowed origins. For -regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript -An origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_origins": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the list of origins that will be allowed to do CORS requests. An -origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "expose_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Expose-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "max_age": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies how long the results of a preflight request can be cached. This -translates to the content for the Access-Control-Max-Age header.`, - }, - }, - }, - }, - "fault_injection_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for fault injection introduced into traffic to test the -resiliency of clients to backend service failure. As part of fault injection, -when clients send requests to a backend service, delays can be introduced by -Loadbalancer on a percentage of requests before sending those request to the -backend service. Similarly requests from clients can be aborted by the -Loadbalancer for a percentage of requests. timeout and retry_policy will be -ignored by clients that are configured with a fault_injection_policy.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "abort": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are aborted as part of fault -injection.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "http_status": { - Type: resource_compute_url_map_schema.TypeInt, - Required: true, - Description: `The HTTP status code used to abort the request. The value must be between 200 -and 599 inclusive.`, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Required: true, - Description: `The percentage of traffic (connections/operations/requests) which will be -aborted as part of fault injection. The value must be between 0.0 and 100.0 -inclusive.`, - }, - }, - }, - }, - "delay": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are delayed as part of fault -injection, before being sent to a backend service.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "fixed_delay": { - Type: resource_compute_url_map_schema.TypeList, - Required: true, - Description: `Specifies the value of the fixed delay interval.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Required: true, - Description: `The percentage of traffic (connections/operations/requests) on which delay will -be introduced as part of fault injection. The value must be between 0.0 and -100.0 inclusive.`, - }, - }, - }, - }, - }, - }, - }, - "request_mirror_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the policy on how requests intended for the route's backends are -shadowed to a separate mirrored backend service. Loadbalancer does not wait for -responses from the shadow service. Prior to sending traffic to the shadow -service, the host / authority header is suffixed with -shadow.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The BackendService resource being mirrored to.`, - }, - }, - }, - }, - "retry_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the retry policy associated with this route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "num_retries": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies the allowed number retries. This number must be > 0.`, - }, - "per_try_timeout": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a non-zero timeout per retry attempt.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "retry_conditions": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies one or more conditions when this retry rule applies. Valid values are: - -* 5xx: Loadbalancer will attempt a retry if the backend service responds with -any 5xx response code, or if the backend service does not respond at all, -example: disconnects, reset, read timeout, connection failure, and refused -streams. -* gateway-error: Similar to 5xx, but only applies to response codes -502, 503 or 504. -* connect-failure: Loadbalancer will retry on failures -connecting to backend services, for example due to connection timeouts. -* retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. -Currently the only retriable error supported is 409. -* refused-stream: Loadbalancer will retry if the backend service resets the stream with a -REFUSED_STREAM error code. This reset type indicates that it is safe to retry. -* cancelled: Loadbalancer will retry if the gRPC status code in the response -header is set to cancelled -* deadline-exceeded: Loadbalancer will retry if the -gRPC status code in the response header is set to deadline-exceeded -* resource-exhausted: Loadbalancer will retry if the gRPC status code in the response -header is set to resource-exhausted -* unavailable: Loadbalancer will retry if -the gRPC status code in the response header is set to unavailable`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "timeout": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the timeout for the selected route. Timeout is computed from the time -the request is has been fully processed (i.e. end-of-stream) up until the -response has been completely processed. Timeout includes all retries. If not -specified, the default value is 15 seconds.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "url_rewrite": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The spec to modify the URL of the request, prior to forwarding the request to -the matched service`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "host_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected service, the request's host -header is replaced with contents of hostRewrite. The value must be between 1 and -255 characters.`, - }, - "path_prefix_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected backend service, the matching -portion of the request's path is replaced by pathPrefixRewrite. The value must -be between 1 and 1024 characters.`, - }, - }, - }, - }, - "weighted_backend_services": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of weighted backend services to send traffic to when a route match -occurs. The weights determine the fraction of traffic that flows to their -corresponding backend service. If all traffic needs to go to a single backend -service, there must be one weightedBackendService with weight set to a non 0 -number. Once a backendService is identified and before forwarding the request to -the backend service, advanced routing actions like Url rewrites and header -transformations are applied depending on additional settings specified in this -HttpRouteAction.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The default BackendService resource. Before -forwarding the request to backendService, the loadbalancer applies any relevant -headerActions specified as part of this backendServiceWeight.`, - }, - "weight": { - Type: resource_compute_url_map_schema.TypeInt, - Required: true, - Description: `Specifies the fraction of traffic sent to backendService, computed as weight / -(sum of all weightedBackendService weights in routeAction) . The selection of a -backend service is determined only for new traffic. Once a user's request has -been directed to a backendService, subsequent requests will be sent to the same -backendService as determined by the BackendService's session affinity policy. -The value must be between 0 and 1000`, - }, - "header_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. headerAction specified here take effect before -headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "service": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend service or backend bucket to use if any of the given paths match.`, - }, - "url_redirect": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `When a path pattern is matched, the request is redirected to a URL specified -by urlRedirect. If urlRedirect is specified, service or routeAction must not -be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "strip_query": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If set to true, any accompanying query portion of the original URL is -removed prior to redirecting the request. If set to false, the query -portion of the original URL is retained. - This field is required to ensure an empty block is not set. The normal default value is false.`, - }, - "host_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one -that was supplied in the request. The value must be between 1 and 255 -characters.`, - }, - "https_redirect": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. -If set to false, the URL scheme of the redirected request will remain the -same as that of the request. This must only be set for UrlMaps used in -TargetHttpProxys. Setting this true for TargetHttpsProxy is not -permitted. The default is set to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one -that was supplied in the request. pathRedirect cannot be supplied -together with prefixRedirect. Supply one alone or neither. If neither is -supplied, the path of the original request will be used for the redirect. -The value must be between 1 and 1024 characters.`, - }, - "prefix_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the -HttpRouteRuleMatch, retaining the remaining portion of the URL before -redirecting the request. prefixRedirect cannot be supplied together with -pathRedirect. Supply one alone or neither. If neither is supplied, the -path of the original request will be used for the redirect. The value -must be between 1 and 1024 characters.`, - }, - "redirect_response_code": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method -will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, -the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - }, - }, - }, - }, - }, - }, - "route_rules": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The list of ordered HTTP route rules. Use this list instead of pathRules when -advanced route matching and routing actions are desired. The order of specifying -routeRules matters: the first rule that matches will cause its specified routing -action to take effect. Within a given pathMatcher, only one of pathRules or -routeRules must be set. routeRules are not supported in UrlMaps intended for -External load balancers.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "priority": { - Type: resource_compute_url_map_schema.TypeInt, - Required: true, - Description: `For routeRules within a given pathMatcher, priority determines the order -in which load balancer will interpret routeRules. RouteRules are evaluated -in order of priority, from the lowest to highest number. The priority of -a rule decreases as its number increases (1, 2, 3, N+1). The first rule -that matches the request is applied. - -You cannot configure two or more routeRules with the same priority. -Priority for each rule must be set to a number between 0 and -2147483647 inclusive. - -Priority numbers can have gaps, which enable you to add or remove rules -in the future without affecting the rest of the rules. For example, -1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which -you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the -future without any impact on existing rules.`, - }, - "header_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. The headerAction specified here are applied before -the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r -outeAction.weightedBackendService.backendServiceWeightAction[].headerAction`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "match_rules": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The rules for determining a match.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "full_path_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the path of the request must exactly -match the value specified in fullPathMatch after removing any query parameters -and anchor that may be part of the original URL. FullPathMatch must be between 1 -and 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must -be specified.`, - }, - "header_matches": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a list of header match criteria, all of which must match corresponding -headers in the request.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the HTTP header to match. For matching against the HTTP request's -authority, use a headerMatch with the header name ":authority". For matching a -request's method, use the headerName ":method".`, - }, - "exact_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value should exactly match contents of exactMatch. Only one of exactMatch, -prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.`, - }, - "invert_match": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If set to false, the headerMatch is considered a match if the match criteria -above are met. If set to true, the headerMatch is considered a match if the -match criteria above are NOT met. Defaults to false.`, - Default: false, - }, - "prefix_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header must start with the contents of prefixMatch. Only one of -exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch -must be set.`, - }, - "present_match": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `A header with the contents of headerName must exist. The match takes place -whether or not the request's header has a value or not. Only one of exactMatch, -prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set.`, - }, - "range_match": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The header value must be an integer and its value must be in the range specified -in rangeMatch. If the header does not contain an integer, number or is empty, -the match fails. For example for a range [-5, 0] - -3 will match. - 0 will -not match. - 0.25 will not match. - -3someString will not match. Only one of -exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch -must be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "range_end": { - Type: resource_compute_url_map_schema.TypeInt, - Required: true, - Description: `The end of the range (exclusive).`, - }, - "range_start": { - Type: resource_compute_url_map_schema.TypeInt, - Required: true, - Description: `The start of the range (inclusive).`, - }, - }, - }, - }, - "regex_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header must match the regular expression specified in -regexMatch. For regular expression grammar, please see: -en.cppreference.com/w/cpp/regex/ecmascript For matching against a port -specified in the HTTP request, use a headerMatch with headerName set to PORT and -a regular expression that satisfies the RFC2616 Host header's port specifier. -Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or -rangeMatch must be set.`, - }, - "suffix_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The value of the header must end with the contents of suffixMatch. Only one of -exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch -must be set.`, - }, - }, - }, - }, - "ignore_case": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `Specifies that prefixMatch and fullPathMatch matches are case sensitive. -Defaults to false.`, - Default: false, - }, - "metadata_filters": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Opaque filter criteria used by Loadbalancer to restrict routing configuration to -a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS -clients present node metadata. If a match takes place, the relevant routing -configuration is made available to those proxies. For each metadataFilter in -this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the -filterLabels must match the corresponding label provided in the metadata. If its -filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match -with corresponding labels in the provided metadata. metadataFilters specified -here can be overrides those specified in ForwardingRule that refers to this -UrlMap. metadataFilters only applies to Loadbalancers that have their -loadBalancingScheme set to INTERNAL_SELF_MANAGED.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "filter_labels": { - Type: resource_compute_url_map_schema.TypeList, - Required: true, - Description: `The list of label value pairs that must match labels in the provided metadata -based on filterMatchCriteria This list must not be empty and can have at the -most 64 entries.`, - MinItems: 1, - MaxItems: 64, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Name of metadata label. The name can have a maximum length of 1024 characters -and must be at least 1 character long.`, - }, - "value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the label must match the specified value. value can have a maximum -length of 1024 characters.`, - }, - }, - }, - }, - "filter_match_criteria": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - ValidateFunc: resource_compute_url_map_validation.StringInSlice([]string{"MATCH_ALL", "MATCH_ANY"}, false), - Description: `Specifies how individual filterLabel matches within the list of filterLabels -contribute towards the overall metadataFilter match. Supported values are: - - MATCH_ANY: At least one of the filterLabels must have a matching label in the -provided metadata. - - MATCH_ALL: All filterLabels must have matching labels in -the provided metadata. Possible values: ["MATCH_ALL", "MATCH_ANY"]`, - }, - }, - }, - }, - "prefix_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the request's path must begin with the -specified prefixMatch. prefixMatch must begin with a /. The value must be -between 1 and 1024 characters. Only one of prefixMatch, fullPathMatch or -regexMatch must be specified.`, - }, - "query_parameter_matches": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a list of query parameter match criteria, all of which must match -corresponding query parameters in the request.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the query parameter to match. The query parameter must exist in the -request, in the absence of which the request match fails.`, - }, - "exact_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The queryParameterMatch matches if the value of the parameter exactly matches -the contents of exactMatch. Only one of presentMatch, exactMatch and regexMatch -must be set.`, - }, - "present_match": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `Specifies that the queryParameterMatch matches if the request contains the query -parameter, irrespective of whether the parameter has a value or not. Only one of -presentMatch, exactMatch and regexMatch must be set.`, - }, - "regex_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The queryParameterMatch matches if the value of the parameter matches the -regular expression specified by regexMatch. For the regular expression grammar, -please see en.cppreference.com/w/cpp/regex/ecmascript Only one of presentMatch, -exactMatch and regexMatch must be set.`, - }, - }, - }, - }, - "regex_match": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the path of the request must satisfy the -regular expression specified in regexMatch after removing any query parameters -and anchor supplied with the original URL. For regular expression grammar please -see en.cppreference.com/w/cpp/regex/ecmascript Only one of prefixMatch, -fullPathMatch or regexMatch must be specified.`, - }, - }, - }, - }, - "route_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `In response to a matching matchRule, the load balancer performs advanced routing -actions like URL rewrites, header transformations, etc. prior to forwarding the -request to the selected backend. If routeAction specifies any -weightedBackendServices, service must not be set. Conversely if service is set, -routeAction cannot contain any weightedBackendServices. Only one of routeAction -or urlRedirect must be set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "cors_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for allowing client side cross-origin requests. Please see W3C -Recommendation for Cross Origin Resource Sharing`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "allow_credentials": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `In response to a preflight request, setting this to true indicates that the -actual request can include user credentials. This translates to the Access- -Control-Allow-Credentials header. Defaults to false.`, - Default: false, - }, - "allow_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_methods": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Methods header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_origin_regexes": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the regular expression patterns that match allowed origins. For -regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript -An origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "allow_origins": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the list of origins that will be allowed to do CORS requests. An -origin is allowed if it matches either allow_origins or allow_origin_regex.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "disabled": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If true, specifies the CORS policy is disabled. -which indicates that the CORS policy is in effect. Defaults to false.`, - Default: false, - }, - "expose_headers": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Expose-Headers header.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "max_age": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Specifies how long the results of a preflight request can be cached. This -translates to the content for the Access-Control-Max-Age header.`, - }, - }, - }, - }, - "fault_injection_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for fault injection introduced into traffic to test the -resiliency of clients to backend service failure. As part of fault injection, -when clients send requests to a backend service, delays can be introduced by -Loadbalancer on a percentage of requests before sending those request to the -backend service. Similarly requests from clients can be aborted by the -Loadbalancer for a percentage of requests. timeout and retry_policy will be -ignored by clients that are configured with a fault_injection_policy.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "abort": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are aborted as part of fault -injection.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "http_status": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `The HTTP status code used to abort the request. The value must be between 200 -and 599 inclusive.`, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Optional: true, - Description: `The percentage of traffic (connections/operations/requests) which will be -aborted as part of fault injection. The value must be between 0.0 and 100.0 -inclusive.`, - }, - }, - }, - }, - "delay": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The specification for how client requests are delayed as part of fault -injection, before being sent to a backend service.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "fixed_delay": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the value of the fixed delay interval.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "percentage": { - Type: resource_compute_url_map_schema.TypeFloat, - Optional: true, - Description: `The percentage of traffic (connections/operations/requests) on which delay will -be introduced as part of fault injection. The value must be between 0.0 and -100.0 inclusive.`, - }, - }, - }, - }, - }, - }, - }, - "request_mirror_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the policy on how requests intended for the route's backends are -shadowed to a separate mirrored backend service. Loadbalancer does not wait for -responses from the shadow service. Prior to sending traffic to the shadow -service, the host / authority header is suffixed with -shadow.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The BackendService resource being mirrored to.`, - }, - }, - }, - }, - "retry_policy": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the retry policy associated with this route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "num_retries": { - Type: resource_compute_url_map_schema.TypeInt, - Required: true, - Description: `Specifies the allowed number retries. This number must be > 0.`, - }, - "per_try_timeout": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies a non-zero timeout per retry attempt. -If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction -is not set, will use the largest timeout among all backend services associated with the route.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "retry_conditions": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specfies one or more conditions when this retry rule applies. Valid values are: - -* 5xx: Loadbalancer will attempt a retry if the backend service responds with - any 5xx response code, or if the backend service does not respond at all, - example: disconnects, reset, read timeout, connection failure, and refused - streams. -* gateway-error: Similar to 5xx, but only applies to response codes - 502, 503 or 504. -* connect-failure: Loadbalancer will retry on failures - connecting to backend services, for example due to connection timeouts. -* retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. - Currently the only retriable error supported is 409. -* refused-stream: Loadbalancer will retry if the backend service resets the stream with a - REFUSED_STREAM error code. This reset type indicates that it is safe to retry. -* cancelled: Loadbalancer will retry if the gRPC status code in the response - header is set to cancelled -* deadline-exceeded: Loadbalancer will retry if the - gRPC status code in the response header is set to deadline-exceeded -* resource-exhausted: Loadbalancer will retry if the gRPC status code in the response - header is set to resource-exhausted -* unavailable: Loadbalancer will retry if the gRPC status code in - the response header is set to unavailable`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - "timeout": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies the timeout for the selected route. Timeout is computed from the time -the request is has been fully processed (i.e. end-of-stream) up until the -response has been completely processed. Timeout includes all retries. If not -specified, the default value is 15 seconds.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "seconds": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 -inclusive.`, - }, - "nanos": { - Type: resource_compute_url_map_schema.TypeInt, - Optional: true, - Description: `Span of time that's a fraction of a second at nanosecond resolution. Durations -less than one second are represented with a 0 'seconds' field and a positive -'nanos' field. Must be from 0 to 999,999,999 inclusive.`, - }, - }, - }, - }, - "url_rewrite": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The spec to modify the URL of the request, prior to forwarding the request to -the matched service`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "host_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected service, the request's host -header is replaced with contents of hostRewrite. The value must be between 1 and -255 characters.`, - }, - "path_prefix_rewrite": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected backend service, the matching -portion of the request's path is replaced by pathPrefixRewrite. The value must -be between 1 and 1024 characters.`, - }, - }, - }, - }, - "weighted_backend_services": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of weighted backend services to send traffic to when a route match -occurs. The weights determine the fraction of traffic that flows to their -corresponding backend service. If all traffic needs to go to a single backend -service, there must be one weightedBackendService with weight set to a non 0 -number. Once a backendService is identified and before forwarding the request to -the backend service, advanced routing actions like Url rewrites and header -transformations are applied depending on additional settings specified in this -HttpRouteAction.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "backend_service": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The default BackendService resource. Before -forwarding the request to backendService, the loadbalancer applies any relevant -headerActions specified as part of this backendServiceWeight.`, - }, - "weight": { - Type: resource_compute_url_map_schema.TypeInt, - Required: true, - Description: `Specifies the fraction of traffic sent to backendService, computed as weight / -(sum of all weightedBackendService weights in routeAction) . The selection of a -backend service is determined only for new traffic. Once a user's request has -been directed to a backendService, subsequent requests will be sent to the same -backendService as determined by the BackendService's session affinity policy. -The value must be between 0 and 1000`, - }, - "header_action": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Specifies changes to request and response headers that need to take effect for -the selected backendService. headerAction specified here take effect before -headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "request_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add to a matching request prior to forwarding the request to the -backendService.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "request_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request -prior to forwarding the request to the backendService.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - "response_headers_to_add": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `Headers to add the response prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "header_name": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the header.`, - }, - "header_value": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_compute_url_map_schema.TypeBool, - Required: true, - Description: `If false, headerValue is appended to any values that already exist for the -header. If true, headerValue is set for the header, discarding any values that -were set for that header.`, - }, - }, - }, - }, - "response_headers_to_remove": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the response -prior to sending the response back to the client.`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "service": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend service resource to which traffic is -directed if this rule is matched. If routeAction is additionally specified, -advanced routing actions like URL Rewrites, etc. take effect prior to sending -the request to the backend. However, if service is specified, routeAction cannot -contain any weightedBackendService s. Conversely, if routeAction specifies any -weightedBackendServices, service must not be specified. Only one of urlRedirect, -service or routeAction.weightedBackendService must be set.`, - }, - "url_redirect": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `When this rule is matched, the request is redirected to a URL specified by -urlRedirect. If urlRedirect is specified, service or routeAction must not be -set.`, - MaxItems: 1, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "host_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one that was -supplied in the request. The value must be between 1 and 255 characters.`, - }, - "https_redirect": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. If set -to false, the URL scheme of the redirected request will remain the same as that -of the request. This must only be set for UrlMaps used in TargetHttpProxys. -Setting this true for TargetHttpsProxy is not permitted. Defaults to false.`, - Default: false, - }, - "path_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one that was -supplied in the request. Only one of pathRedirect or prefixRedirect must be -specified. The value must be between 1 and 1024 characters.`, - }, - "prefix_redirect": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, -retaining the remaining portion of the URL before redirecting the request.`, - }, - "redirect_response_code": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - ValidateFunc: resource_compute_url_map_validation.StringInSlice([]string{"FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. Supported values are: - -* MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. - -* FOUND, which corresponds to 302. - -* SEE_OTHER which corresponds to 303. - -* TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method will be retained. - -* PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method will be retained. Possible values: ["FOUND", "MOVED_PERMANENTLY_DEFAULT", "PERMANENT_REDIRECT", "SEE_OTHER", "TEMPORARY_REDIRECT"]`, - }, - "strip_query": { - Type: resource_compute_url_map_schema.TypeBool, - Optional: true, - Description: `If set to true, any accompanying query portion of the original URL is removed -prior to redirecting the request. If set to false, the query portion of the -original URL is retained. Defaults to false.`, - Default: false, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "test": { - Type: resource_compute_url_map_schema.TypeList, - Optional: true, - Description: `The list of expected URL mapping tests. Request to update this UrlMap will -succeed only if all of the test cases pass. You can specify a maximum of 100 -tests per UrlMap.`, - Elem: &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "host": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Host portion of the URL.`, - }, - "path": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `Path portion of the URL.`, - }, - "service": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The backend service or backend bucket link that should be matched by this test.`, - }, - "description": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `Description of this test case.`, - }, - }, - }, - }, - "creation_timestamp": { - Type: resource_compute_url_map_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "fingerprint": { - Type: resource_compute_url_map_schema.TypeString, - Computed: true, - Description: `Fingerprint of this resource. A hash of the contents stored in this object. This -field is used in optimistic locking.`, - }, - "map_id": { - Type: resource_compute_url_map_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_url_map_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func computeUrlMapHostRuleSchema() *resource_compute_url_map_schema.Resource { - return &resource_compute_url_map_schema.Resource{ - Schema: map[string]*resource_compute_url_map_schema.Schema{ - "hosts": { - Type: resource_compute_url_map_schema.TypeSet, - Required: true, - Description: `The list of host patterns to match. They must be valid hostnames, except * will -match any string of ([a-z0-9-.]*). In that case, * must be the first character -and must be followed in the pattern by either - or ..`, - Elem: &resource_compute_url_map_schema.Schema{ - Type: resource_compute_url_map_schema.TypeString, - }, - Set: resource_compute_url_map_schema.HashString, - }, - "path_matcher": { - Type: resource_compute_url_map_schema.TypeString, - Required: true, - Description: `The name of the PathMatcher to use to match the path portion of the URL if the -hostRule matches the URL's host portion.`, - }, - "description": { - Type: resource_compute_url_map_schema.TypeString, - Optional: true, - Description: `An optional description of this resource. Provide this property when you create -the resource.`, - }, - }, - } -} - -func resourceComputeUrlMapCreate(d *resource_compute_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - defaultServiceProp, err := expandComputeUrlMapDefaultService(d.Get("default_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(defaultServiceProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, defaultServiceProp)) { - obj["defaultService"] = defaultServiceProp - } - descriptionProp, err := expandComputeUrlMapDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeUrlMapFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(fingerprintProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - headerActionProp, err := expandComputeUrlMapHeaderAction(d.Get("header_action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("header_action"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(headerActionProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, headerActionProp)) { - obj["headerAction"] = headerActionProp - } - hostRulesProp, err := expandComputeUrlMapHostRule(d.Get("host_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(hostRulesProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, hostRulesProp)) { - obj["hostRules"] = hostRulesProp - } - nameProp, err := expandComputeUrlMapName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(nameProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - pathMatchersProp, err := expandComputeUrlMapPathMatcher(d.Get("path_matcher"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(pathMatchersProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, pathMatchersProp)) { - obj["pathMatchers"] = pathMatchersProp - } - testsProp, err := expandComputeUrlMapTest(d.Get("test"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(testsProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, testsProp)) { - obj["tests"] = testsProp - } - defaultUrlRedirectProp, err := expandComputeUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(defaultUrlRedirectProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, defaultUrlRedirectProp)) { - obj["defaultUrlRedirect"] = defaultUrlRedirectProp - } - defaultRouteActionProp, err := expandComputeUrlMapDefaultRouteAction(d.Get("default_route_action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_route_action"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(defaultRouteActionProp)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, defaultRouteActionProp)) { - obj["defaultRouteAction"] = defaultRouteActionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps") - if err != nil { - return err - } - - resource_compute_url_map_log.Printf("[DEBUG] Creating new UrlMap: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_url_map_fmt.Errorf("Error fetching project for UrlMap: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_url_map_schema.TimeoutCreate)) - if err != nil { - return resource_compute_url_map_fmt.Errorf("Error creating UrlMap: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") - if err != nil { - return resource_compute_url_map_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating UrlMap", userAgent, - d.Timeout(resource_compute_url_map_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_url_map_fmt.Errorf("Error waiting to create UrlMap: %s", err) - } - - resource_compute_url_map_log.Printf("[DEBUG] Finished creating UrlMap %q: %#v", d.Id(), res) - - return resourceComputeUrlMapRead(d, meta) -} - -func resourceComputeUrlMapRead(d *resource_compute_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_url_map_fmt.Errorf("Error fetching project for UrlMap: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_url_map_fmt.Sprintf("ComputeUrlMap %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeUrlMapCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("default_service", flattenComputeUrlMapDefaultService(res["defaultService"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("description", flattenComputeUrlMapDescription(res["description"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("map_id", flattenComputeUrlMapMapId(res["id"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("fingerprint", flattenComputeUrlMapFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("header_action", flattenComputeUrlMapHeaderAction(res["headerAction"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("host_rule", flattenComputeUrlMapHostRule(res["hostRules"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("name", flattenComputeUrlMapName(res["name"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("path_matcher", flattenComputeUrlMapPathMatcher(res["pathMatchers"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("test", flattenComputeUrlMapTest(res["tests"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("default_url_redirect", flattenComputeUrlMapDefaultUrlRedirect(res["defaultUrlRedirect"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("default_route_action", flattenComputeUrlMapDefaultRouteAction(res["defaultRouteAction"], d, config)); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_url_map_fmt.Errorf("Error reading UrlMap: %s", err) - } - - return nil -} - -func resourceComputeUrlMapUpdate(d *resource_compute_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_url_map_fmt.Errorf("Error fetching project for UrlMap: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - defaultServiceProp, err := expandComputeUrlMapDefaultService(d.Get("default_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_service"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, defaultServiceProp)) { - obj["defaultService"] = defaultServiceProp - } - descriptionProp, err := expandComputeUrlMapDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - fingerprintProp, err := expandComputeUrlMapFingerprint(d.Get("fingerprint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fingerprint"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, fingerprintProp)) { - obj["fingerprint"] = fingerprintProp - } - headerActionProp, err := expandComputeUrlMapHeaderAction(d.Get("header_action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("header_action"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, headerActionProp)) { - obj["headerAction"] = headerActionProp - } - hostRulesProp, err := expandComputeUrlMapHostRule(d.Get("host_rule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("host_rule"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, hostRulesProp)) { - obj["hostRules"] = hostRulesProp - } - nameProp, err := expandComputeUrlMapName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - pathMatchersProp, err := expandComputeUrlMapPathMatcher(d.Get("path_matcher"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("path_matcher"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, pathMatchersProp)) { - obj["pathMatchers"] = pathMatchersProp - } - testsProp, err := expandComputeUrlMapTest(d.Get("test"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("test"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, testsProp)) { - obj["tests"] = testsProp - } - defaultUrlRedirectProp, err := expandComputeUrlMapDefaultUrlRedirect(d.Get("default_url_redirect"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_url_redirect"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, defaultUrlRedirectProp)) { - obj["defaultUrlRedirect"] = defaultUrlRedirectProp - } - defaultRouteActionProp, err := expandComputeUrlMapDefaultRouteAction(d.Get("default_route_action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_route_action"); !isEmptyValue(resource_compute_url_map_reflect.ValueOf(v)) && (ok || !resource_compute_url_map_reflect.DeepEqual(v, defaultRouteActionProp)) { - obj["defaultRouteAction"] = defaultRouteActionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") - if err != nil { - return err - } - - resource_compute_url_map_log.Printf("[DEBUG] Updating UrlMap %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_compute_url_map_schema.TimeoutUpdate)) - - if err != nil { - return resource_compute_url_map_fmt.Errorf("Error updating UrlMap %q: %s", d.Id(), err) - } else { - resource_compute_url_map_log.Printf("[DEBUG] Finished updating UrlMap %q: %#v", d.Id(), res) - } - - err = computeOperationWaitTime( - config, res, project, "Updating UrlMap", userAgent, - d.Timeout(resource_compute_url_map_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceComputeUrlMapRead(d, meta) -} - -func resourceComputeUrlMapDelete(d *resource_compute_url_map_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_url_map_fmt.Errorf("Error fetching project for UrlMap: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/global/urlMaps/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_url_map_log.Printf("[DEBUG] Deleting UrlMap %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_url_map_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "UrlMap") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting UrlMap", userAgent, - d.Timeout(resource_compute_url_map_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_url_map_log.Printf("[DEBUG] Finished deleting UrlMap %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeUrlMapImport(d *resource_compute_url_map_schema.ResourceData, meta interface{}) ([]*resource_compute_url_map_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/global/urlMaps/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/global/urlMaps/{{name}}") - if err != nil { - return nil, resource_compute_url_map_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_url_map_schema.ResourceData{d}, nil -} - -func flattenComputeUrlMapCreationTimestamp(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapDescription(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapMapId(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapFingerprint(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeUrlMapHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeUrlMapHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeUrlMapHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeUrlMapHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHostRule(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_compute_url_map_schema.NewSet(resource_compute_url_map_schema.HashResource(computeUrlMapHostRuleSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "description": flattenComputeUrlMapHostRuleDescription(original["description"], d, config), - "hosts": flattenComputeUrlMapHostRuleHosts(original["hosts"], d, config), - "path_matcher": flattenComputeUrlMapHostRulePathMatcher(original["pathMatcher"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapHostRuleDescription(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapHostRuleHosts(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_url_map_schema.NewSet(resource_compute_url_map_schema.HashString, v.([]interface{})) -} - -func flattenComputeUrlMapHostRulePathMatcher(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcher(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "default_service": flattenComputeUrlMapPathMatcherDefaultService(original["defaultService"], d, config), - "description": flattenComputeUrlMapPathMatcherDescription(original["description"], d, config), - "header_action": flattenComputeUrlMapPathMatcherHeaderAction(original["headerAction"], d, config), - "name": flattenComputeUrlMapPathMatcherName(original["name"], d, config), - "path_rule": flattenComputeUrlMapPathMatcherPathRule(original["pathRules"], d, config), - "route_rules": flattenComputeUrlMapPathMatcherRouteRules(original["routeRules"], d, config), - "default_url_redirect": flattenComputeUrlMapPathMatcherDefaultUrlRedirect(original["defaultUrlRedirect"], d, config), - "default_route_action": flattenComputeUrlMapPathMatcherDefaultRouteAction(original["defaultRouteAction"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherDefaultService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherDescription(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRule(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "service": flattenComputeUrlMapPathMatcherPathRuleService(original["service"], d, config), - "paths": flattenComputeUrlMapPathMatcherPathRulePaths(original["paths"], d, config), - "route_action": flattenComputeUrlMapPathMatcherPathRuleRouteAction(original["routeAction"], d, config), - "url_redirect": flattenComputeUrlMapPathMatcherPathRuleUrlRedirect(original["urlRedirect"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherPathRuleService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherPathRulePaths(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_url_map_schema.NewSet(resource_compute_url_map_schema.HashString, v.([]interface{})) -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cors_policy"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(original["corsPolicy"], d, config) - transformed["fault_injection_policy"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) - transformed["request_mirror_policy"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(original["requestMirrorPolicy"], d, config) - transformed["retry_policy"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(original["retryPolicy"], d, config) - transformed["timeout"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeout(original["timeout"], d, config) - transformed["url_rewrite"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(original["urlRewrite"], d, config) - transformed["weighted_backend_services"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_credentials"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(original["allowCredentials"], d, config) - transformed["allow_headers"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(original["allowHeaders"], d, config) - transformed["allow_methods"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(original["allowMethods"], d, config) - transformed["allow_origin_regexes"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(original["allowOriginRegexes"], d, config) - transformed["allow_origins"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(original["allowOrigins"], d, config) - transformed["disabled"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) - transformed["expose_headers"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(original["exposeHeaders"], d, config) - transformed["max_age"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["abort"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - transformed["delay"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_status"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(original["httpStatus"], d, config) - transformed["percentage"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_delay"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixedDelay"], d, config) - transformed["percentage"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backend_service"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_retries"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(original["numRetries"], d, config) - transformed["per_try_timeout"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) - transformed["retry_conditions"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_rewrite"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) - transformed["path_prefix_rewrite"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "backend_service": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(original["backendService"], d, config), - "header_action": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(original["headerAction"], d, config), - "weight": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(original["weight"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRules(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "priority": flattenComputeUrlMapPathMatcherRouteRulesPriority(original["priority"], d, config), - "service": flattenComputeUrlMapPathMatcherRouteRulesService(original["service"], d, config), - "header_action": flattenComputeUrlMapPathMatcherRouteRulesHeaderAction(original["headerAction"], d, config), - "match_rules": flattenComputeUrlMapPathMatcherRouteRulesMatchRules(original["matchRules"], d, config), - "route_action": flattenComputeUrlMapPathMatcherRouteRulesRouteAction(original["routeAction"], d, config), - "url_redirect": flattenComputeUrlMapPathMatcherRouteRulesUrlRedirect(original["urlRedirect"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesPriority(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "full_path_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(original["fullPathMatch"], d, config), - "header_matches": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(original["headerMatches"], d, config), - "ignore_case": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(original["ignoreCase"], d, config), - "metadata_filters": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(original["metadataFilters"], d, config), - "prefix_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(original["prefixMatch"], d, config), - "query_parameter_matches": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(original["queryParameterMatches"], d, config), - "regex_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(original["regexMatch"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "exact_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(original["exactMatch"], d, config), - "header_name": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(original["headerName"], d, config), - "invert_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(original["invertMatch"], d, config), - "prefix_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(original["prefixMatch"], d, config), - "present_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(original["presentMatch"], d, config), - "range_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(original["rangeMatch"], d, config), - "regex_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(original["regexMatch"], d, config), - "suffix_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(original["suffixMatch"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["range_end"] = - flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(original["rangeEnd"], d, config) - transformed["range_start"] = - flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["rangeStart"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "filter_labels": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(original["filterLabels"], d, config), - "filter_match_criteria": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(original["filterMatchCriteria"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(original["name"], d, config), - "value": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(original["value"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "exact_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(original["exactMatch"], d, config), - "name": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(original["name"], d, config), - "present_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(original["presentMatch"], d, config), - "regex_match": flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(original["regexMatch"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cors_policy"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(original["corsPolicy"], d, config) - transformed["fault_injection_policy"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) - transformed["request_mirror_policy"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(original["requestMirrorPolicy"], d, config) - transformed["retry_policy"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(original["retryPolicy"], d, config) - transformed["timeout"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(original["timeout"], d, config) - transformed["url_rewrite"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(original["urlRewrite"], d, config) - transformed["weighted_backend_services"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_credentials"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(original["allowCredentials"], d, config) - transformed["allow_headers"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(original["allowHeaders"], d, config) - transformed["allow_methods"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(original["allowMethods"], d, config) - transformed["allow_origin_regexes"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(original["allowOriginRegexes"], d, config) - transformed["allow_origins"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(original["allowOrigins"], d, config) - transformed["disabled"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(original["disabled"], d, config) - transformed["expose_headers"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(original["exposeHeaders"], d, config) - transformed["max_age"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["abort"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - transformed["delay"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_status"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(original["httpStatus"], d, config) - transformed["percentage"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_delay"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixedDelay"], d, config) - transformed["percentage"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backend_service"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_retries"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(original["numRetries"], d, config) - transformed["per_try_timeout"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) - transformed["retry_conditions"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(original["nanos"], d, config) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_rewrite"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) - transformed["path_prefix_rewrite"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "backend_service": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(original["backendService"], d, config), - "header_action": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(original["headerAction"], d, config), - "weight": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(original["weight"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_add"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["request_headers_to_remove"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["weighted_backend_services"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) - transformed["url_rewrite"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(original["urlRewrite"], d, config) - transformed["timeout"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionTimeout(original["timeout"], d, config) - transformed["retry_policy"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(original["retryPolicy"], d, config) - transformed["request_mirror_policy"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(original["requestMirrorPolicy"], d, config) - transformed["cors_policy"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(original["corsPolicy"], d, config) - transformed["fault_injection_policy"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "backend_service": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(original["backendService"], d, config), - "weight": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(original["weight"], d, config), - "header_action": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(original["headerAction"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_remove"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["request_headers_to_add"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path_prefix_rewrite"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) - transformed["host_rewrite"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["retry_conditions"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) - transformed["num_retries"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(original["numRetries"], d, config) - transformed["per_try_timeout"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backend_service"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_origins"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(original["allowOrigins"], d, config) - transformed["allow_origin_regexes"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(original["allowOriginRegexes"], d, config) - transformed["allow_methods"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(original["allowMethods"], d, config) - transformed["allow_headers"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(original["allowHeaders"], d, config) - transformed["expose_headers"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(original["exposeHeaders"], d, config) - transformed["max_age"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) - transformed["allow_credentials"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(original["allowCredentials"], d, config) - transformed["disabled"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["delay"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - transformed["abort"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_delay"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixedDelay"], d, config) - transformed["percentage"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_status"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(original["httpStatus"], d, config) - transformed["percentage"] = - flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapTest(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "description": flattenComputeUrlMapTestDescription(original["description"], d, config), - "host": flattenComputeUrlMapTestHost(original["host"], d, config), - "path": flattenComputeUrlMapTestPath(original["path"], d, config), - "service": flattenComputeUrlMapTestService(original["service"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapTestDescription(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapTestHost(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapTestPath(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapTestService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapDefaultUrlRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenComputeUrlMapDefaultUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["https_redirect"] = - flattenComputeUrlMapDefaultUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["path_redirect"] = - flattenComputeUrlMapDefaultUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenComputeUrlMapDefaultUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenComputeUrlMapDefaultUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["strip_query"] = - flattenComputeUrlMapDefaultUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultUrlRedirectStripQuery(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["weighted_backend_services"] = - flattenComputeUrlMapDefaultRouteActionWeightedBackendServices(original["weightedBackendServices"], d, config) - transformed["url_rewrite"] = - flattenComputeUrlMapDefaultRouteActionUrlRewrite(original["urlRewrite"], d, config) - transformed["timeout"] = - flattenComputeUrlMapDefaultRouteActionTimeout(original["timeout"], d, config) - transformed["retry_policy"] = - flattenComputeUrlMapDefaultRouteActionRetryPolicy(original["retryPolicy"], d, config) - transformed["request_mirror_policy"] = - flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicy(original["requestMirrorPolicy"], d, config) - transformed["cors_policy"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicy(original["corsPolicy"], d, config) - transformed["fault_injection_policy"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicy(original["faultInjectionPolicy"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "backend_service": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(original["backendService"], d, config), - "weight": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(original["weight"], d, config), - "header_action": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(original["headerAction"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_headers_to_remove"] = - flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["requestHeadersToRemove"], d, config) - transformed["request_headers_to_add"] = - flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["requestHeadersToAdd"], d, config) - transformed["response_headers_to_remove"] = - flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["responseHeadersToRemove"], d, config) - transformed["response_headers_to_add"] = - flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["responseHeadersToAdd"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["headerName"], d, config), - "header_value": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path_prefix_rewrite"] = - flattenComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) - transformed["host_rewrite"] = - flattenComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeUrlMapDefaultRouteActionTimeoutSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeUrlMapDefaultRouteActionTimeoutNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["retry_conditions"] = - flattenComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(original["retryConditions"], d, config) - transformed["num_retries"] = - flattenComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(original["numRetries"], d, config) - transformed["per_try_timeout"] = - flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(original["perTryTimeout"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["backend_service"] = - flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(original["backendService"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_origins"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(original["allowOrigins"], d, config) - transformed["allow_origin_regexes"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(original["allowOriginRegexes"], d, config) - transformed["allow_methods"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(original["allowMethods"], d, config) - transformed["allow_headers"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(original["allowHeaders"], d, config) - transformed["expose_headers"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(original["exposeHeaders"], d, config) - transformed["max_age"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) - transformed["allow_credentials"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(original["allowCredentials"], d, config) - transformed["disabled"] = - flattenComputeUrlMapDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["delay"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - transformed["abort"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_delay"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixedDelay"], d, config) - transformed["percentage"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["seconds"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["http_status"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(original["httpStatus"], d, config) - transformed["percentage"] = - flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - return []interface{}{transformed} -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_url_map_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d *resource_compute_url_map_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandComputeUrlMapDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapFingerprint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeUrlMapHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeUrlMapHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeUrlMapHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeUrlMapHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeUrlMapHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHostRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_url_map_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDescription, err := expandComputeUrlMapHostRuleDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedHosts, err := expandComputeUrlMapHostRuleHosts(original["hosts"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHosts); val.IsValid() && !isEmptyValue(val) { - transformed["hosts"] = transformedHosts - } - - transformedPathMatcher, err := expandComputeUrlMapHostRulePathMatcher(original["path_matcher"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["pathMatcher"] = transformedPathMatcher - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapHostRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapHostRuleHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_url_map_schema.Set).List() - return v, nil -} - -func expandComputeUrlMapHostRulePathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDefaultService, err := expandComputeUrlMapPathMatcherDefaultService(original["default_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDefaultService); val.IsValid() && !isEmptyValue(val) { - transformed["defaultService"] = transformedDefaultService - } - - transformedDescription, err := expandComputeUrlMapPathMatcherDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedHeaderAction, err := expandComputeUrlMapPathMatcherHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedName, err := expandComputeUrlMapPathMatcherName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPathRule, err := expandComputeUrlMapPathMatcherPathRule(original["path_rule"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathRule); val.IsValid() && !isEmptyValue(val) { - transformed["pathRules"] = transformedPathRule - } - - transformedRouteRules, err := expandComputeUrlMapPathMatcherRouteRules(original["route_rules"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRouteRules); val.IsValid() && !isEmptyValue(val) { - transformed["routeRules"] = transformedRouteRules - } - - transformedDefaultUrlRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirect(original["default_url_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDefaultUrlRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["defaultUrlRedirect"] = transformedDefaultUrlRedirect - } - - transformedDefaultRouteAction, err := expandComputeUrlMapPathMatcherDefaultRouteAction(original["default_route_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDefaultRouteAction); val.IsValid() && !isEmptyValue(val) { - transformed["defaultRouteAction"] = transformedDefaultRouteAction - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherDefaultService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedService, err := expandComputeUrlMapPathMatcherPathRuleService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedPaths, err := expandComputeUrlMapPathMatcherPathRulePaths(original["paths"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPaths); val.IsValid() && !isEmptyValue(val) { - transformed["paths"] = transformedPaths - } - - transformedRouteAction, err := expandComputeUrlMapPathMatcherPathRuleRouteAction(original["route_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { - transformed["routeAction"] = transformedRouteAction - } - - transformedUrlRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirect(original["url_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["urlRedirect"] = transformedUrlRedirect - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherPathRuleService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherPathRulePaths(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_url_map_schema.Set).List() - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCorsPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(original["cors_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["corsPolicy"] = transformedCorsPolicy - } - - transformedFaultInjectionPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy - } - - transformedRequestMirrorPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy - } - - transformedRetryPolicy, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(original["retry_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["retryPolicy"] = transformedRetryPolicy - } - - transformedTimeout, err := expandComputeUrlMapPathMatcherPathRuleRouteActionTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedUrlRewrite, err := expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(original["url_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["urlRewrite"] = transformedUrlRewrite - } - - transformedWeightedBackendServices, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { - transformed["weightedBackendServices"] = transformedWeightedBackendServices - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowCredentials, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { - transformed["allowCredentials"] = transformedAllowCredentials - } - - transformedAllowHeaders, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["allowHeaders"] = transformedAllowHeaders - } - - transformedAllowMethods, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { - transformed["allowMethods"] = transformedAllowMethods - } - - transformedAllowOriginRegexes, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { - transformed["allowOriginRegexes"] = transformedAllowOriginRegexes - } - - transformedAllowOrigins, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { - transformed["allowOrigins"] = transformedAllowOrigins - } - - transformedDisabled, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - transformedExposeHeaders, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["exposeHeaders"] = transformedExposeHeaders - } - - transformedMaxAge, err := expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(original["max_age"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { - transformed["maxAge"] = transformedMaxAge - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAbort, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { - transformed["abort"] = transformedAbort - } - - transformedDelay, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["delay"] = transformedDelay - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpStatus, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { - transformed["httpStatus"] = transformedHttpStatus - } - - transformedPercentage, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedDelay, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["fixedDelay"] = transformedFixedDelay - } - - transformedPercentage, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumRetries, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { - transformed["numRetries"] = transformedNumRetries - } - - transformedPerTryTimeout, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["perTryTimeout"] = transformedPerTryTimeout - } - - transformedRetryConditions, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { - transformed["retryConditions"] = transformedRetryConditions - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRewrite, err := expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["hostRewrite"] = transformedHostRewrite - } - - transformedPathPrefixRewrite, err := expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - transformedHeaderAction, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedWeight, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherPathRuleUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPriority, err := expandComputeUrlMapPathMatcherRouteRulesPriority(original["priority"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { - transformed["priority"] = transformedPriority - } - - transformedService, err := expandComputeUrlMapPathMatcherRouteRulesService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - transformedHeaderAction, err := expandComputeUrlMapPathMatcherRouteRulesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedMatchRules, err := expandComputeUrlMapPathMatcherRouteRulesMatchRules(original["match_rules"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedMatchRules); val.IsValid() && !isEmptyValue(val) { - transformed["matchRules"] = transformedMatchRules - } - - transformedRouteAction, err := expandComputeUrlMapPathMatcherRouteRulesRouteAction(original["route_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { - transformed["routeAction"] = transformedRouteAction - } - - transformedUrlRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirect(original["url_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["urlRedirect"] = transformedUrlRedirect - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFullPathMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(original["full_path_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !isEmptyValue(val) { - transformed["fullPathMatch"] = transformedFullPathMatch - } - - transformedHeaderMatches, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(original["header_matches"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderMatches); val.IsValid() && !isEmptyValue(val) { - transformed["headerMatches"] = transformedHeaderMatches - } - - transformedIgnoreCase, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(original["ignore_case"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !isEmptyValue(val) { - transformed["ignoreCase"] = transformedIgnoreCase - } - - transformedMetadataFilters, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(original["metadata_filters"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedMetadataFilters); val.IsValid() && !isEmptyValue(val) { - transformed["metadataFilters"] = transformedMetadataFilters - } - - transformedPrefixMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(original["prefix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["prefixMatch"] = transformedPrefixMatch - } - - transformedQueryParameterMatches, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(original["query_parameter_matches"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedQueryParameterMatches); val.IsValid() && !isEmptyValue(val) { - transformed["queryParameterMatches"] = transformedQueryParameterMatches - } - - transformedRegexMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(original["regex_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { - transformed["regexMatch"] = transformedRegexMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesFullPathMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExactMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(original["exact_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { - transformed["exactMatch"] = transformedExactMatch - } - - transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedInvertMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(original["invert_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedInvertMatch); val.IsValid() && !isEmptyValue(val) { - transformed["invertMatch"] = transformedInvertMatch - } - - transformedPrefixMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(original["prefix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["prefixMatch"] = transformedPrefixMatch - } - - transformedPresentMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(original["present_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { - transformed["presentMatch"] = transformedPresentMatch - } - - transformedRangeMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(original["range_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRangeMatch); val.IsValid() && !isEmptyValue(val) { - transformed["rangeMatch"] = transformedRangeMatch - } - - transformedRegexMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(original["regex_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { - transformed["regexMatch"] = transformedRegexMatch - } - - transformedSuffixMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(original["suffix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["suffixMatch"] = transformedSuffixMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesInvertMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRangeEnd, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(original["range_end"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRangeEnd); val.IsValid() && !isEmptyValue(val) { - transformed["rangeEnd"] = transformedRangeEnd - } - - transformedRangeStart, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(original["range_start"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRangeStart); val.IsValid() && !isEmptyValue(val) { - transformed["rangeStart"] = transformedRangeStart - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeEnd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRangeMatchRangeStart(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesHeaderMatchesSuffixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesIgnoreCase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFilters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilterLabels, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(original["filter_labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFilterLabels); val.IsValid() && !isEmptyValue(val) { - transformed["filterLabels"] = transformedFilterLabels - } - - transformedFilterMatchCriteria, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(original["filter_match_criteria"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFilterMatchCriteria); val.IsValid() && !isEmptyValue(val) { - transformed["filterMatchCriteria"] = transformedFilterMatchCriteria - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedValue, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterLabelsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesMetadataFiltersFilterMatchCriteria(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExactMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(original["exact_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { - transformed["exactMatch"] = transformedExactMatch - } - - transformedName, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPresentMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(original["present_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { - transformed["presentMatch"] = transformedPresentMatch - } - - transformedRegexMatch, err := expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(original["regex_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRegexMatch); val.IsValid() && !isEmptyValue(val) { - transformed["regexMatch"] = transformedRegexMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesQueryParameterMatchesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesMatchRulesRegexMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCorsPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(original["cors_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["corsPolicy"] = transformedCorsPolicy - } - - transformedFaultInjectionPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy - } - - transformedRequestMirrorPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy - } - - transformedRetryPolicy, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(original["retry_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["retryPolicy"] = transformedRetryPolicy - } - - transformedTimeout, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedUrlRewrite, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(original["url_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["urlRewrite"] = transformedUrlRewrite - } - - transformedWeightedBackendServices, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { - transformed["weightedBackendServices"] = transformedWeightedBackendServices - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowCredentials, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { - transformed["allowCredentials"] = transformedAllowCredentials - } - - transformedAllowHeaders, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["allowHeaders"] = transformedAllowHeaders - } - - transformedAllowMethods, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { - transformed["allowMethods"] = transformedAllowMethods - } - - transformedAllowOriginRegexes, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { - transformed["allowOriginRegexes"] = transformedAllowOriginRegexes - } - - transformedAllowOrigins, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { - transformed["allowOrigins"] = transformedAllowOrigins - } - - transformedDisabled, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - transformedExposeHeaders, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["exposeHeaders"] = transformedExposeHeaders - } - - transformedMaxAge, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(original["max_age"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { - transformed["maxAge"] = transformedMaxAge - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAbort, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { - transformed["abort"] = transformedAbort - } - - transformedDelay, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["delay"] = transformedDelay - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpStatus, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { - transformed["httpStatus"] = transformedHttpStatus - } - - transformedPercentage, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedDelay, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["fixedDelay"] = transformedFixedDelay - } - - transformedPercentage, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumRetries, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { - transformed["numRetries"] = transformedNumRetries - } - - transformedPerTryTimeout, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["perTryTimeout"] = transformedPerTryTimeout - } - - transformedRetryConditions, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { - transformed["retryConditions"] = transformedRetryConditions - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNanos, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - transformedSeconds, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRewrite, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["hostRewrite"] = transformedHostRewrite - } - - transformedPathPrefixRewrite, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - transformedHeaderAction, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedWeight, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherRouteRulesUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWeightedBackendServices, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { - transformed["weightedBackendServices"] = transformedWeightedBackendServices - } - - transformedUrlRewrite, err := expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(original["url_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["urlRewrite"] = transformedUrlRewrite - } - - transformedTimeout, err := expandComputeUrlMapPathMatcherDefaultRouteActionTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedRetryPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(original["retry_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["retryPolicy"] = transformedRetryPolicy - } - - transformedRequestMirrorPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy - } - - transformedCorsPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(original["cors_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["corsPolicy"] = transformedCorsPolicy - } - - transformedFaultInjectionPolicy, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - transformedWeight, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - transformedHeaderAction, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToRemove, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedRequestHeadersToAdd, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPathPrefixRewrite, err := expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite - } - - transformedHostRewrite, err := expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["hostRewrite"] = transformedHostRewrite - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRetryConditions, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { - transformed["retryConditions"] = transformedRetryConditions - } - - transformedNumRetries, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { - transformed["numRetries"] = transformedNumRetries - } - - transformedPerTryTimeout, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["perTryTimeout"] = transformedPerTryTimeout - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowOrigins, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { - transformed["allowOrigins"] = transformedAllowOrigins - } - - transformedAllowOriginRegexes, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { - transformed["allowOriginRegexes"] = transformedAllowOriginRegexes - } - - transformedAllowMethods, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { - transformed["allowMethods"] = transformedAllowMethods - } - - transformedAllowHeaders, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["allowHeaders"] = transformedAllowHeaders - } - - transformedExposeHeaders, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["exposeHeaders"] = transformedExposeHeaders - } - - transformedMaxAge, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(original["max_age"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { - transformed["maxAge"] = transformedMaxAge - } - - transformedAllowCredentials, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { - transformed["allowCredentials"] = transformedAllowCredentials - } - - transformedDisabled, err := expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDelay, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["delay"] = transformedDelay - } - - transformedAbort, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { - transformed["abort"] = transformedAbort - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedDelay, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["fixedDelay"] = transformedFixedDelay - } - - transformedPercentage, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpStatus, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { - transformed["httpStatus"] = transformedHttpStatus - } - - transformedPercentage, err := expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapPathMatcherDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapTest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDescription, err := expandComputeUrlMapTestDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedHost, err := expandComputeUrlMapTestHost(original["host"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedPath, err := expandComputeUrlMapTestPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedService, err := expandComputeUrlMapTestService(original["service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedService); val.IsValid() && !isEmptyValue(val) { - transformed["service"] = transformedService - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapTestDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapTestHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapTestPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapTestService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapDefaultUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandComputeUrlMapDefaultUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedHttpsRedirect, err := expandComputeUrlMapDefaultUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedPathRedirect, err := expandComputeUrlMapDefaultUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandComputeUrlMapDefaultUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandComputeUrlMapDefaultUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedStripQuery, err := expandComputeUrlMapDefaultUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWeightedBackendServices, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServices(original["weighted_backend_services"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeightedBackendServices); val.IsValid() && !isEmptyValue(val) { - transformed["weightedBackendServices"] = transformedWeightedBackendServices - } - - transformedUrlRewrite, err := expandComputeUrlMapDefaultRouteActionUrlRewrite(original["url_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["urlRewrite"] = transformedUrlRewrite - } - - transformedTimeout, err := expandComputeUrlMapDefaultRouteActionTimeout(original["timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["timeout"] = transformedTimeout - } - - transformedRetryPolicy, err := expandComputeUrlMapDefaultRouteActionRetryPolicy(original["retry_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["retryPolicy"] = transformedRetryPolicy - } - - transformedRequestMirrorPolicy, err := expandComputeUrlMapDefaultRouteActionRequestMirrorPolicy(original["request_mirror_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestMirrorPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["requestMirrorPolicy"] = transformedRequestMirrorPolicy - } - - transformedCorsPolicy, err := expandComputeUrlMapDefaultRouteActionCorsPolicy(original["cors_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["corsPolicy"] = transformedCorsPolicy - } - - transformedFaultInjectionPolicy, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicy(original["fault_injection_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFaultInjectionPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["faultInjectionPolicy"] = transformedFaultInjectionPolicy - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - transformedWeight, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - transformedHeaderAction, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeadersToRemove, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(original["request_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeadersToRemove - } - - transformedRequestHeadersToAdd, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(original["request_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRequestHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeadersToAdd - } - - transformedResponseHeadersToRemove, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(original["response_headers_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeadersToRemove - } - - transformedResponseHeadersToAdd, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(original["response_headers_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedResponseHeadersToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeadersToAdd - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionRequestHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionWeightedBackendServicesHeaderActionResponseHeadersToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPathPrefixRewrite, err := expandComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite - } - - transformedHostRewrite, err := expandComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["hostRewrite"] = transformedHostRewrite - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeUrlMapDefaultRouteActionTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeUrlMapDefaultRouteActionTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRetryConditions, err := expandComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(original["retry_conditions"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedRetryConditions); val.IsValid() && !isEmptyValue(val) { - transformed["retryConditions"] = transformedRetryConditions - } - - transformedNumRetries, err := expandComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(original["num_retries"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNumRetries); val.IsValid() && !isEmptyValue(val) { - transformed["numRetries"] = transformedNumRetries - } - - transformedPerTryTimeout, err := expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(original["per_try_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPerTryTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["perTryTimeout"] = transformedPerTryTimeout - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionRetryPolicyRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionRetryPolicyNumRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionRetryPolicyPerTryTimeoutNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBackendService, err := expandComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(original["backend_service"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedBackendService); val.IsValid() && !isEmptyValue(val) { - transformed["backendService"] = transformedBackendService - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionRequestMirrorPolicyBackendService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - if v == nil || v.(string) == "" { - - return "", nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "https://") { - - return v, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "projects/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return url, nil - } else if resource_compute_url_map_strings.HasPrefix(v.(string), "regions/") || resource_compute_url_map_strings.HasPrefix(v.(string), "zones/") { - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/") - if err != nil { - return nil, err - } - return url + v.(string), nil - } - - f, err := parseGlobalFieldValue("backendServices", v.(string), "project", d, config, true) - if err != nil { - return "", err - } - - return f.RelativeLink(), nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowOrigins, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { - transformed["allowOrigins"] = transformedAllowOrigins - } - - transformedAllowOriginRegexes, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(original["allow_origin_regexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowOriginRegexes); val.IsValid() && !isEmptyValue(val) { - transformed["allowOriginRegexes"] = transformedAllowOriginRegexes - } - - transformedAllowMethods, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { - transformed["allowMethods"] = transformedAllowMethods - } - - transformedAllowHeaders, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["allowHeaders"] = transformedAllowHeaders - } - - transformedExposeHeaders, err := expandComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["exposeHeaders"] = transformedExposeHeaders - } - - transformedMaxAge, err := expandComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(original["max_age"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { - transformed["maxAge"] = transformedMaxAge - } - - transformedAllowCredentials, err := expandComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { - transformed["allowCredentials"] = transformedAllowCredentials - } - - transformedDisabled, err := expandComputeUrlMapDefaultRouteActionCorsPolicyDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowOriginRegexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDelay, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(original["delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["delay"] = transformedDelay - } - - transformedAbort, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(original["abort"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedAbort); val.IsValid() && !isEmptyValue(val) { - transformed["abort"] = transformedAbort - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedDelay, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(original["fixed_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedFixedDelay); val.IsValid() && !isEmptyValue(val) { - transformed["fixedDelay"] = transformedFixedDelay - } - - transformedPercentage, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSeconds, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayFixedDelayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyDelayPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHttpStatus, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(original["http_status"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedHttpStatus); val.IsValid() && !isEmptyValue(val) { - transformed["httpStatus"] = transformedHttpStatus - } - - transformedPercentage, err := expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_compute_url_map_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percentage"] = transformedPercentage - } - - return transformed, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortHttpStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeUrlMapDefaultRouteActionFaultInjectionPolicyAbortPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceComputeVpnGateway() *resource_compute_vpn_gateway_schema.Resource { - return &resource_compute_vpn_gateway_schema.Resource{ - Create: resourceComputeVpnGatewayCreate, - Read: resourceComputeVpnGatewayRead, - Delete: resourceComputeVpnGatewayDelete, - - Importer: &resource_compute_vpn_gateway_schema.ResourceImporter{ - State: resourceComputeVpnGatewayImport, - }, - - Timeouts: &resource_compute_vpn_gateway_schema.ResourceTimeout{ - Create: resource_compute_vpn_gateway_schema.DefaultTimeout(4 * resource_compute_vpn_gateway_time.Minute), - Delete: resource_compute_vpn_gateway_schema.DefaultTimeout(4 * resource_compute_vpn_gateway_time.Minute), - }, - - Schema: map[string]*resource_compute_vpn_gateway_schema.Schema{ - "name": { - Type: resource_compute_vpn_gateway_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. Provided by the client when the resource is -created. The name must be 1-63 characters long, and comply with -RFC1035. Specifically, the name must be 1-63 characters long and -match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means -the first character must be a lowercase letter, and all following -characters must be a dash, lowercase letter, or digit, except the last -character, which cannot be a dash.`, - }, - "network": { - Type: resource_compute_vpn_gateway_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network this VPN gateway is accepting traffic for.`, - }, - "description": { - Type: resource_compute_vpn_gateway_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "region": { - Type: resource_compute_vpn_gateway_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region this gateway should sit in.`, - }, - "creation_timestamp": { - Type: resource_compute_vpn_gateway_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "gateway_id": { - Type: resource_compute_vpn_gateway_schema.TypeInt, - Computed: true, - Description: `The unique identifier for the resource.`, - }, - "project": { - Type: resource_compute_vpn_gateway_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_vpn_gateway_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeVpnGatewayCreate(d *resource_compute_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandComputeVpnGatewayDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_vpn_gateway_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_vpn_gateway_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - nameProp, err := expandComputeVpnGatewayName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_vpn_gateway_reflect.ValueOf(nameProp)) && (ok || !resource_compute_vpn_gateway_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandComputeVpnGatewayNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_compute_vpn_gateway_reflect.ValueOf(networkProp)) && (ok || !resource_compute_vpn_gateway_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - regionProp, err := expandComputeVpnGatewayRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_vpn_gateway_reflect.ValueOf(regionProp)) && (ok || !resource_compute_vpn_gateway_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways") - if err != nil { - return err - } - - resource_compute_vpn_gateway_log.Printf("[DEBUG] Creating new VpnGateway: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error fetching project for VpnGateway: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_vpn_gateway_schema.TimeoutCreate)) - if err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error creating VpnGateway: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating VpnGateway", userAgent, - d.Timeout(resource_compute_vpn_gateway_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_vpn_gateway_fmt.Errorf("Error waiting to create VpnGateway: %s", err) - } - - resource_compute_vpn_gateway_log.Printf("[DEBUG] Finished creating VpnGateway %q: %#v", d.Id(), res) - - return resourceComputeVpnGatewayRead(d, meta) -} - -func resourceComputeVpnGatewayRead(d *resource_compute_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error fetching project for VpnGateway: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_vpn_gateway_fmt.Sprintf("ComputeVpnGateway %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - - if err := d.Set("creation_timestamp", flattenComputeVpnGatewayCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("description", flattenComputeVpnGatewayDescription(res["description"], d, config)); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("name", flattenComputeVpnGatewayName(res["name"], d, config)); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("gateway_id", flattenComputeVpnGatewayGatewayId(res["id"], d, config)); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("network", flattenComputeVpnGatewayNetwork(res["network"], d, config)); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("region", flattenComputeVpnGatewayRegion(res["region"], d, config)); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error reading VpnGateway: %s", err) - } - - return nil -} - -func resourceComputeVpnGatewayDelete(d *resource_compute_vpn_gateway_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_vpn_gateway_fmt.Errorf("Error fetching project for VpnGateway: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_vpn_gateway_log.Printf("[DEBUG] Deleting VpnGateway %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_vpn_gateway_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "VpnGateway") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting VpnGateway", userAgent, - d.Timeout(resource_compute_vpn_gateway_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_vpn_gateway_log.Printf("[DEBUG] Finished deleting VpnGateway %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeVpnGatewayImport(d *resource_compute_vpn_gateway_schema.ResourceData, meta interface{}) ([]*resource_compute_vpn_gateway_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/targetVpnGateways/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}") - if err != nil { - return nil, resource_compute_vpn_gateway_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_vpn_gateway_schema.ResourceData{d}, nil -} - -func flattenComputeVpnGatewayCreationTimestamp(v interface{}, d *resource_compute_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnGatewayDescription(v interface{}, d *resource_compute_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnGatewayName(v interface{}, d *resource_compute_vpn_gateway_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnGatewayGatewayId(v interface{}, d *resource_compute_vpn_gateway_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_vpn_gateway_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeVpnGatewayNetwork(v interface{}, d *resource_compute_vpn_gateway_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnGatewayRegion(v interface{}, d *resource_compute_vpn_gateway_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeVpnGatewayDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnGatewayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnGatewayNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("networks", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_vpn_gateway_fmt.Errorf("Invalid value for network: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnGatewayRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_vpn_gateway_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func validatePeerAddr(i interface{}, val string) ([]string, []error) { - ip := resource_compute_vpn_tunnel_net.ParseIP(i.(string)) - if ip == nil { - return nil, []error{resource_compute_vpn_tunnel_fmt.Errorf("could not parse %q to IP address", val)} - } - for _, test := range invalidPeerAddrs { - if resource_compute_vpn_tunnel_bytes.Compare(ip, test.from) >= 0 && resource_compute_vpn_tunnel_bytes.Compare(ip, test.to) <= 0 { - return nil, []error{resource_compute_vpn_tunnel_fmt.Errorf("address is invalid (is between %q and %q, conflicting with RFC5735)", test.from, test.to)} - } - } - return nil, nil -} - -var invalidPeerAddrs = []struct { - from resource_compute_vpn_tunnel_net.IP - to resource_compute_vpn_tunnel_net.IP -}{ - { - from: resource_compute_vpn_tunnel_net.ParseIP("0.0.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("0.255.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("10.0.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("10.255.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("127.0.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("127.255.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("169.254.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("169.254.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("172.16.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("172.31.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("192.0.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("192.0.0.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("192.0.2.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("192.0.2.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("192.88.99.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("192.88.99.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("192.168.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("192.168.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("198.18.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("198.19.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("198.51.100.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("198.51.100.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("203.0.113.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("203.0.113.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("224.0.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("239.255.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("240.0.0.0"), - to: resource_compute_vpn_tunnel_net.ParseIP("255.255.255.255"), - }, - { - from: resource_compute_vpn_tunnel_net.ParseIP("255.255.255.255"), - to: resource_compute_vpn_tunnel_net.ParseIP("255.255.255.255"), - }, -} - -func getVpnTunnelLink(config *Config, project, region, tunnel, userAgent string) (string, error) { - if !resource_compute_vpn_tunnel_strings.Contains(tunnel, "/") { - - tunnelData, err := config.NewComputeClient(userAgent).VpnTunnels.Get( - project, region, tunnel).Do() - if err != nil { - return "", resource_compute_vpn_tunnel_fmt.Errorf("Error reading tunnel: %s", err) - } - tunnel = tunnelData.SelfLink - } - - return tunnel, nil - -} - -func resourceComputeVpnTunnel() *resource_compute_vpn_tunnel_schema.Resource { - return &resource_compute_vpn_tunnel_schema.Resource{ - Create: resourceComputeVpnTunnelCreate, - Read: resourceComputeVpnTunnelRead, - Delete: resourceComputeVpnTunnelDelete, - - Importer: &resource_compute_vpn_tunnel_schema.ResourceImporter{ - State: resourceComputeVpnTunnelImport, - }, - - Timeouts: &resource_compute_vpn_tunnel_schema.ResourceTimeout{ - Create: resource_compute_vpn_tunnel_schema.DefaultTimeout(4 * resource_compute_vpn_tunnel_time.Minute), - Delete: resource_compute_vpn_tunnel_schema.DefaultTimeout(4 * resource_compute_vpn_tunnel_time.Minute), - }, - - Schema: map[string]*resource_compute_vpn_tunnel_schema.Schema{ - "name": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource. The name must be 1-63 characters long, and -comply with RFC1035. Specifically, the name must be 1-63 -characters long and match the regular expression -'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character -must be a lowercase letter, and all following characters must -be a dash, lowercase letter, or digit, -except the last character, which cannot be a dash.`, - }, - "shared_secret": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Shared secret used to set the secure session between the Cloud VPN -gateway and the peer VPN gateway.`, - Sensitive: true, - }, - "description": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An optional description of this resource.`, - }, - "ike_version": { - Type: resource_compute_vpn_tunnel_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `IKE protocol version to use when establishing the VPN tunnel with -peer VPN gateway. -Acceptable IKE versions are 1 or 2. Default version is 2.`, - Default: 2, - }, - "local_traffic_selector": { - Type: resource_compute_vpn_tunnel_schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Local traffic selector to use when establishing the VPN tunnel with -peer VPN gateway. The value should be a CIDR formatted string, -for example '192.168.0.0/16'. The ranges should be disjoint. -Only IPv4 is supported.`, - Elem: &resource_compute_vpn_tunnel_schema.Schema{ - Type: resource_compute_vpn_tunnel_schema.TypeString, - }, - Set: resource_compute_vpn_tunnel_schema.HashString, - }, - "peer_external_gateway": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the peer side external VPN gateway to which this VPN tunnel is connected.`, - ConflictsWith: []string{"peer_gcp_gateway"}, - }, - "peer_external_gateway_interface": { - Type: resource_compute_vpn_tunnel_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The interface ID of the external VPN gateway to which this VPN tunnel is connected.`, - }, - "peer_gcp_gateway": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. -If provided, the VPN tunnel will automatically use the same vpn_gateway_interface -ID in the peer GCP VPN gateway. -This field must reference a 'google_compute_ha_vpn_gateway' resource.`, - ConflictsWith: []string{"peer_external_gateway"}, - }, - "peer_ip": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validatePeerAddr, - Description: `IP address of the peer VPN gateway. Only IPv4 is supported.`, - }, - "region": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The region where the tunnel is located. If unset, is set to the region of 'target_vpn_gateway'.`, - }, - "remote_traffic_selector": { - Type: resource_compute_vpn_tunnel_schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Remote traffic selector to use when establishing the VPN tunnel with -peer VPN gateway. The value should be a CIDR formatted string, -for example '192.168.0.0/16'. The ranges should be disjoint. -Only IPv4 is supported.`, - Elem: &resource_compute_vpn_tunnel_schema.Schema{ - Type: resource_compute_vpn_tunnel_schema.TypeString, - }, - Set: resource_compute_vpn_tunnel_schema.HashString, - }, - "router": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of router resource to be used for dynamic routing.`, - }, - "target_vpn_gateway": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the Target VPN gateway with which this VPN tunnel is -associated.`, - }, - "vpn_gateway": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `URL of the VPN gateway with which this VPN tunnel is associated. -This must be used if a High Availability VPN gateway resource is created. -This field must reference a 'google_compute_ha_vpn_gateway' resource.`, - }, - "vpn_gateway_interface": { - Type: resource_compute_vpn_tunnel_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The interface ID of the VPN gateway with which this VPN tunnel is associated.`, - }, - "creation_timestamp": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "detailed_status": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Computed: true, - Description: `Detailed status message for the VPN tunnel.`, - }, - "shared_secret_hash": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Computed: true, - Description: `Hash of the shared secret.`, - }, - "tunnel_id": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Computed: true, - Description: `The unique identifier for the resource. This identifier is defined by the server.`, - }, - "project": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_compute_vpn_tunnel_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceComputeVpnTunnelCreate(d *resource_compute_vpn_tunnel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandComputeVpnTunnelName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(nameProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandComputeVpnTunnelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(descriptionProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - targetVpnGatewayProp, err := expandComputeVpnTunnelTargetVpnGateway(d.Get("target_vpn_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target_vpn_gateway"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(targetVpnGatewayProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, targetVpnGatewayProp)) { - obj["targetVpnGateway"] = targetVpnGatewayProp - } - vpnGatewayProp, err := expandComputeVpnTunnelVpnGateway(d.Get("vpn_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpn_gateway"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(vpnGatewayProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, vpnGatewayProp)) { - obj["vpnGateway"] = vpnGatewayProp - } - vpnGatewayInterfaceProp, err := expandComputeVpnTunnelVpnGatewayInterface(d.Get("vpn_gateway_interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vpn_gateway_interface"); ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, vpnGatewayInterfaceProp) { - obj["vpnGatewayInterface"] = vpnGatewayInterfaceProp - } - peerExternalGatewayProp, err := expandComputeVpnTunnelPeerExternalGateway(d.Get("peer_external_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_external_gateway"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(peerExternalGatewayProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, peerExternalGatewayProp)) { - obj["peerExternalGateway"] = peerExternalGatewayProp - } - peerExternalGatewayInterfaceProp, err := expandComputeVpnTunnelPeerExternalGatewayInterface(d.Get("peer_external_gateway_interface"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_external_gateway_interface"); ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, peerExternalGatewayInterfaceProp) { - obj["peerExternalGatewayInterface"] = peerExternalGatewayInterfaceProp - } - peerGcpGatewayProp, err := expandComputeVpnTunnelPeerGcpGateway(d.Get("peer_gcp_gateway"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_gcp_gateway"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(peerGcpGatewayProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, peerGcpGatewayProp)) { - obj["peerGcpGateway"] = peerGcpGatewayProp - } - routerProp, err := expandComputeVpnTunnelRouter(d.Get("router"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("router"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(routerProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, routerProp)) { - obj["router"] = routerProp - } - peerIpProp, err := expandComputeVpnTunnelPeerIp(d.Get("peer_ip"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peer_ip"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(peerIpProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, peerIpProp)) { - obj["peerIp"] = peerIpProp - } - sharedSecretProp, err := expandComputeVpnTunnelSharedSecret(d.Get("shared_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("shared_secret"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(sharedSecretProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, sharedSecretProp)) { - obj["sharedSecret"] = sharedSecretProp - } - ikeVersionProp, err := expandComputeVpnTunnelIkeVersion(d.Get("ike_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ike_version"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(ikeVersionProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, ikeVersionProp)) { - obj["ikeVersion"] = ikeVersionProp - } - localTrafficSelectorProp, err := expandComputeVpnTunnelLocalTrafficSelector(d.Get("local_traffic_selector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("local_traffic_selector"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(localTrafficSelectorProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, localTrafficSelectorProp)) { - obj["localTrafficSelector"] = localTrafficSelectorProp - } - remoteTrafficSelectorProp, err := expandComputeVpnTunnelRemoteTrafficSelector(d.Get("remote_traffic_selector"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("remote_traffic_selector"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(remoteTrafficSelectorProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, remoteTrafficSelectorProp)) { - obj["remoteTrafficSelector"] = remoteTrafficSelectorProp - } - regionProp, err := expandComputeVpnTunnelRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_compute_vpn_tunnel_reflect.ValueOf(regionProp)) && (ok || !resource_compute_vpn_tunnel_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - - obj, err = resourceComputeVpnTunnelEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels") - if err != nil { - return err - } - - resource_compute_vpn_tunnel_log.Printf("[DEBUG] Creating new VpnTunnel: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error fetching project for VpnTunnel: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_compute_vpn_tunnel_schema.TimeoutCreate)) - if err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error creating VpnTunnel: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = computeOperationWaitTime( - config, res, project, "Creating VpnTunnel", userAgent, - d.Timeout(resource_compute_vpn_tunnel_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_compute_vpn_tunnel_fmt.Errorf("Error waiting to create VpnTunnel: %s", err) - } - - resource_compute_vpn_tunnel_log.Printf("[DEBUG] Finished creating VpnTunnel %q: %#v", d.Id(), res) - - return resourceComputeVpnTunnelRead(d, meta) -} - -func resourceComputeVpnTunnelRead(d *resource_compute_vpn_tunnel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error fetching project for VpnTunnel: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_compute_vpn_tunnel_fmt.Sprintf("ComputeVpnTunnel %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - - if err := d.Set("tunnel_id", flattenComputeVpnTunnelTunnelId(res["id"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("creation_timestamp", flattenComputeVpnTunnelCreationTimestamp(res["creationTimestamp"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("name", flattenComputeVpnTunnelName(res["name"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("description", flattenComputeVpnTunnelDescription(res["description"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("target_vpn_gateway", flattenComputeVpnTunnelTargetVpnGateway(res["targetVpnGateway"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("vpn_gateway", flattenComputeVpnTunnelVpnGateway(res["vpnGateway"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("vpn_gateway_interface", flattenComputeVpnTunnelVpnGatewayInterface(res["vpnGatewayInterface"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_external_gateway", flattenComputeVpnTunnelPeerExternalGateway(res["peerExternalGateway"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_external_gateway_interface", flattenComputeVpnTunnelPeerExternalGatewayInterface(res["peerExternalGatewayInterface"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_gcp_gateway", flattenComputeVpnTunnelPeerGcpGateway(res["peerGcpGateway"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("router", flattenComputeVpnTunnelRouter(res["router"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("peer_ip", flattenComputeVpnTunnelPeerIp(res["peerIp"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("shared_secret_hash", flattenComputeVpnTunnelSharedSecretHash(res["sharedSecretHash"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("ike_version", flattenComputeVpnTunnelIkeVersion(res["ikeVersion"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("local_traffic_selector", flattenComputeVpnTunnelLocalTrafficSelector(res["localTrafficSelector"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("remote_traffic_selector", flattenComputeVpnTunnelRemoteTrafficSelector(res["remoteTrafficSelector"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("detailed_status", flattenComputeVpnTunnelDetailedStatus(res["detailedStatus"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("region", flattenComputeVpnTunnelRegion(res["region"], d, config)); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error reading VpnTunnel: %s", err) - } - - return nil -} - -func resourceComputeVpnTunnelDelete(d *resource_compute_vpn_tunnel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_compute_vpn_tunnel_fmt.Errorf("Error fetching project for VpnTunnel: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_compute_vpn_tunnel_log.Printf("[DEBUG] Deleting VpnTunnel %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_compute_vpn_tunnel_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "VpnTunnel") - } - - err = computeOperationWaitTime( - config, res, project, "Deleting VpnTunnel", userAgent, - d.Timeout(resource_compute_vpn_tunnel_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_compute_vpn_tunnel_log.Printf("[DEBUG] Finished deleting VpnTunnel %q: %#v", d.Id(), res) - return nil -} - -func resourceComputeVpnTunnelImport(d *resource_compute_vpn_tunnel_schema.ResourceData, meta interface{}) ([]*resource_compute_vpn_tunnel_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/regions/(?P[^/]+)/vpnTunnels/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}") - if err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_compute_vpn_tunnel_schema.ResourceData{d}, nil -} - -func flattenComputeVpnTunnelTunnelId(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelCreationTimestamp(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelName(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelDescription(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelTargetVpnGateway(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelVpnGateway(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelVpnGatewayInterface(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_vpn_tunnel_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeVpnTunnelPeerExternalGateway(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelPeerExternalGatewayInterface(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_vpn_tunnel_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeVpnTunnelPeerGcpGateway(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelRouter(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenComputeVpnTunnelPeerIp(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelSharedSecretHash(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelIkeVersion(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_compute_vpn_tunnel_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenComputeVpnTunnelLocalTrafficSelector(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_vpn_tunnel_schema.NewSet(resource_compute_vpn_tunnel_schema.HashString, v.([]interface{})) -} - -func flattenComputeVpnTunnelRemoteTrafficSelector(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_compute_vpn_tunnel_schema.NewSet(resource_compute_vpn_tunnel_schema.HashString, v.([]interface{})) -} - -func flattenComputeVpnTunnelDetailedStatus(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenComputeVpnTunnelRegion(v interface{}, d *resource_compute_vpn_tunnel_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandComputeVpnTunnelName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelTargetVpnGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("targetVpnGateways", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Invalid value for target_vpn_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelVpnGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("vpnGateways", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Invalid value for vpn_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelVpnGatewayInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelPeerExternalGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("externalVpnGateways", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Invalid value for peer_external_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelPeerExternalGatewayInterface(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelPeerGcpGateway(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("vpnGateways", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Invalid value for peer_gcp_gateway: %s", err) - } - return f.RelativeLink(), nil -} - -func expandComputeVpnTunnelRouter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || v.(string) == "" { - return "", nil - } - f, err := parseRegionalFieldValue("routers", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Invalid value for router: %s", err) - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+f.RelativeLink()) - if err != nil { - return nil, err - } - - return url, nil -} - -func expandComputeVpnTunnelPeerIp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelSharedSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelIkeVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandComputeVpnTunnelLocalTrafficSelector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_vpn_tunnel_schema.Set).List() - return v, nil -} - -func expandComputeVpnTunnelRemoteTrafficSelector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_compute_vpn_tunnel_schema.Set).List() - return v, nil -} - -func expandComputeVpnTunnelRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Invalid value for region: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceComputeVpnTunnelEncoder(d *resource_compute_vpn_tunnel_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - f, err := parseRegionalFieldValue("targetVpnGateways", d.Get("target_vpn_gateway").(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, err - } - if _, ok := d.GetOk("project"); !ok { - if err := d.Set("project", f.Project); err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Error setting project: %s", err) - } - } - if _, ok := d.GetOk("region"); !ok { - if err := d.Set("region", f.Region); err != nil { - return nil, resource_compute_vpn_tunnel_fmt.Errorf("Error setting region: %s", err) - } - } - return obj, nil -} - -func resourceContainerAnalysisNote() *resource_container_analysis_note_schema.Resource { - return &resource_container_analysis_note_schema.Resource{ - Create: resourceContainerAnalysisNoteCreate, - Read: resourceContainerAnalysisNoteRead, - Update: resourceContainerAnalysisNoteUpdate, - Delete: resourceContainerAnalysisNoteDelete, - - Importer: &resource_container_analysis_note_schema.ResourceImporter{ - State: resourceContainerAnalysisNoteImport, - }, - - Timeouts: &resource_container_analysis_note_schema.ResourceTimeout{ - Create: resource_container_analysis_note_schema.DefaultTimeout(4 * resource_container_analysis_note_time.Minute), - Update: resource_container_analysis_note_schema.DefaultTimeout(4 * resource_container_analysis_note_time.Minute), - Delete: resource_container_analysis_note_schema.DefaultTimeout(4 * resource_container_analysis_note_time.Minute), - }, - - Schema: map[string]*resource_container_analysis_note_schema.Schema{ - "attestation_authority": { - Type: resource_container_analysis_note_schema.TypeList, - Required: true, - Description: `Note kind that represents a logical attestation "role" or "authority". -For example, an organization might have one AttestationAuthority for -"QA" and one for "build". This Note is intended to act strictly as a -grouping mechanism for the attached Occurrences (Attestations). This -grouping mechanism also provides a security boundary, since IAM ACLs -gate the ability for a principle to attach an Occurrence to a given -Note. It also provides a single point of lookup to find all attached -Attestation Occurrences, even if they don't all live in the same -project.`, - MaxItems: 1, - Elem: &resource_container_analysis_note_schema.Resource{ - Schema: map[string]*resource_container_analysis_note_schema.Schema{ - "hint": { - Type: resource_container_analysis_note_schema.TypeList, - Required: true, - Description: `This submessage provides human-readable hints about the purpose of -the AttestationAuthority. Because the name of a Note acts as its -resource reference, it is important to disambiguate the canonical -name of the Note (which might be a UUID for security purposes) -from "readable" names more suitable for debug output. Note that -these hints should NOT be used to look up AttestationAuthorities -in security sensitive contexts, such as when looking up -Attestations to verify.`, - MaxItems: 1, - Elem: &resource_container_analysis_note_schema.Resource{ - Schema: map[string]*resource_container_analysis_note_schema.Schema{ - "human_readable_name": { - Type: resource_container_analysis_note_schema.TypeString, - Required: true, - Description: `The human readable name of this Attestation Authority, for -example "qa".`, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: resource_container_analysis_note_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the note.`, - }, - "expiration_time": { - Type: resource_container_analysis_note_schema.TypeString, - Optional: true, - Description: `Time of expiration for this note. Leave empty if note does not expire.`, - }, - "long_description": { - Type: resource_container_analysis_note_schema.TypeString, - Optional: true, - Description: `A detailed description of the note`, - }, - "related_note_names": { - Type: resource_container_analysis_note_schema.TypeSet, - Optional: true, - Description: `Names of other notes related to this note.`, - Elem: &resource_container_analysis_note_schema.Schema{ - Type: resource_container_analysis_note_schema.TypeString, - }, - Set: resource_container_analysis_note_schema.HashString, - }, - "related_url": { - Type: resource_container_analysis_note_schema.TypeSet, - Optional: true, - Description: `URLs associated with this note and related metadata.`, - Elem: containeranalysisNoteRelatedUrlSchema(), - }, - "short_description": { - Type: resource_container_analysis_note_schema.TypeString, - Optional: true, - Description: `A one sentence description of the note.`, - }, - "create_time": { - Type: resource_container_analysis_note_schema.TypeString, - Computed: true, - Description: `The time this note was created.`, - }, - "kind": { - Type: resource_container_analysis_note_schema.TypeString, - Computed: true, - Description: `The type of analysis this note describes`, - }, - "update_time": { - Type: resource_container_analysis_note_schema.TypeString, - Computed: true, - Description: `The time this note was last updated.`, - }, - "project": { - Type: resource_container_analysis_note_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func containeranalysisNoteRelatedUrlSchema() *resource_container_analysis_note_schema.Resource { - return &resource_container_analysis_note_schema.Resource{ - Schema: map[string]*resource_container_analysis_note_schema.Schema{ - "url": { - Type: resource_container_analysis_note_schema.TypeString, - Required: true, - Description: `Specific URL associated with the resource.`, - }, - "label": { - Type: resource_container_analysis_note_schema.TypeString, - Optional: true, - Description: `Label to describe usage of the URL`, - }, - }, - } -} - -func resourceContainerAnalysisNoteCreate(d *resource_container_analysis_note_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandContainerAnalysisNoteName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(nameProp)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - shortDescriptionProp, err := expandContainerAnalysisNoteShortDescription(d.Get("short_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_description"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(shortDescriptionProp)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, shortDescriptionProp)) { - obj["shortDescription"] = shortDescriptionProp - } - longDescriptionProp, err := expandContainerAnalysisNoteLongDescription(d.Get("long_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("long_description"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(longDescriptionProp)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, longDescriptionProp)) { - obj["longDescription"] = longDescriptionProp - } - relatedUrlProp, err := expandContainerAnalysisNoteRelatedUrl(d.Get("related_url"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_url"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(relatedUrlProp)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, relatedUrlProp)) { - obj["relatedUrl"] = relatedUrlProp - } - expirationTimeProp, err := expandContainerAnalysisNoteExpirationTime(d.Get("expiration_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(expirationTimeProp)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, expirationTimeProp)) { - obj["expirationTime"] = expirationTimeProp - } - relatedNoteNamesProp, err := expandContainerAnalysisNoteRelatedNoteNames(d.Get("related_note_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_note_names"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(relatedNoteNamesProp)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, relatedNoteNamesProp)) { - obj["relatedNoteNames"] = relatedNoteNamesProp - } - attestationAuthorityProp, err := expandContainerAnalysisNoteAttestationAuthority(d.Get("attestation_authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(attestationAuthorityProp)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, attestationAuthorityProp)) { - obj["attestationAuthority"] = attestationAuthorityProp - } - - obj, err = resourceContainerAnalysisNoteEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes?noteId={{name}}") - if err != nil { - return err - } - - resource_container_analysis_note_log.Printf("[DEBUG] Creating new Note: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_note_fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_container_analysis_note_schema.TimeoutCreate)) - if err != nil { - return resource_container_analysis_note_fmt.Errorf("Error creating Note: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return resource_container_analysis_note_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_container_analysis_note_log.Printf("[DEBUG] Finished creating Note %q: %#v", d.Id(), res) - - return resourceContainerAnalysisNoteRead(d, meta) -} - -func resourceContainerAnalysisNoteRead(d *resource_container_analysis_note_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_note_fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_container_analysis_note_fmt.Sprintf("ContainerAnalysisNote %q", d.Id())) - } - - res, err = resourceContainerAnalysisNoteDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_container_analysis_note_log.Printf("[DEBUG] Removing ContainerAnalysisNote because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - - if err := d.Set("name", flattenContainerAnalysisNoteName(res["name"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("short_description", flattenContainerAnalysisNoteShortDescription(res["shortDescription"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("long_description", flattenContainerAnalysisNoteLongDescription(res["longDescription"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("kind", flattenContainerAnalysisNoteKind(res["kind"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("related_url", flattenContainerAnalysisNoteRelatedUrl(res["relatedUrl"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("expiration_time", flattenContainerAnalysisNoteExpirationTime(res["expirationTime"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("create_time", flattenContainerAnalysisNoteCreateTime(res["createTime"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("update_time", flattenContainerAnalysisNoteUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("related_note_names", flattenContainerAnalysisNoteRelatedNoteNames(res["relatedNoteNames"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - if err := d.Set("attestation_authority", flattenContainerAnalysisNoteAttestationAuthority(res["attestationAuthority"], d, config)); err != nil { - return resource_container_analysis_note_fmt.Errorf("Error reading Note: %s", err) - } - - return nil -} - -func resourceContainerAnalysisNoteUpdate(d *resource_container_analysis_note_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_note_fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - shortDescriptionProp, err := expandContainerAnalysisNoteShortDescription(d.Get("short_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_description"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(v)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, shortDescriptionProp)) { - obj["shortDescription"] = shortDescriptionProp - } - longDescriptionProp, err := expandContainerAnalysisNoteLongDescription(d.Get("long_description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("long_description"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(v)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, longDescriptionProp)) { - obj["longDescription"] = longDescriptionProp - } - relatedUrlProp, err := expandContainerAnalysisNoteRelatedUrl(d.Get("related_url"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_url"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(v)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, relatedUrlProp)) { - obj["relatedUrl"] = relatedUrlProp - } - expirationTimeProp, err := expandContainerAnalysisNoteExpirationTime(d.Get("expiration_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(v)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, expirationTimeProp)) { - obj["expirationTime"] = expirationTimeProp - } - relatedNoteNamesProp, err := expandContainerAnalysisNoteRelatedNoteNames(d.Get("related_note_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_note_names"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(v)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, relatedNoteNamesProp)) { - obj["relatedNoteNames"] = relatedNoteNamesProp - } - attestationAuthorityProp, err := expandContainerAnalysisNoteAttestationAuthority(d.Get("attestation_authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation_authority"); !isEmptyValue(resource_container_analysis_note_reflect.ValueOf(v)) && (ok || !resource_container_analysis_note_reflect.DeepEqual(v, attestationAuthorityProp)) { - obj["attestationAuthority"] = attestationAuthorityProp - } - - obj, err = resourceContainerAnalysisNoteEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - - resource_container_analysis_note_log.Printf("[DEBUG] Updating Note %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("short_description") { - updateMask = append(updateMask, "shortDescription") - } - - if d.HasChange("long_description") { - updateMask = append(updateMask, "longDescription") - } - - if d.HasChange("related_url") { - updateMask = append(updateMask, "relatedUrl") - } - - if d.HasChange("expiration_time") { - updateMask = append(updateMask, "expirationTime") - } - - if d.HasChange("related_note_names") { - updateMask = append(updateMask, "relatedNoteNames") - } - - if d.HasChange("attestation_authority") { - updateMask = append(updateMask, "attestationAuthority") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_container_analysis_note_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_container_analysis_note_schema.TimeoutUpdate)) - - if err != nil { - return resource_container_analysis_note_fmt.Errorf("Error updating Note %q: %s", d.Id(), err) - } else { - resource_container_analysis_note_log.Printf("[DEBUG] Finished updating Note %q: %#v", d.Id(), res) - } - - return resourceContainerAnalysisNoteRead(d, meta) -} - -func resourceContainerAnalysisNoteDelete(d *resource_container_analysis_note_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_note_fmt.Errorf("Error fetching project for Note: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/notes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_container_analysis_note_log.Printf("[DEBUG] Deleting Note %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_container_analysis_note_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Note") - } - - resource_container_analysis_note_log.Printf("[DEBUG] Finished deleting Note %q: %#v", d.Id(), res) - return nil -} - -func resourceContainerAnalysisNoteImport(d *resource_container_analysis_note_schema.ResourceData, meta interface{}) ([]*resource_container_analysis_note_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/notes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/notes/{{name}}") - if err != nil { - return nil, resource_container_analysis_note_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_container_analysis_note_schema.ResourceData{d}, nil -} - -func flattenContainerAnalysisNoteName(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenContainerAnalysisNoteShortDescription(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteLongDescription(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteKind(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteRelatedUrl(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_container_analysis_note_schema.NewSet(resource_container_analysis_note_schema.HashResource(containeranalysisNoteRelatedUrlSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "url": flattenContainerAnalysisNoteRelatedUrlUrl(original["url"], d, config), - "label": flattenContainerAnalysisNoteRelatedUrlLabel(original["label"], d, config), - }) - } - return transformed -} - -func flattenContainerAnalysisNoteRelatedUrlUrl(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteRelatedUrlLabel(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteExpirationTime(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteCreateTime(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteUpdateTime(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisNoteRelatedNoteNames(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_container_analysis_note_schema.NewSet(resource_container_analysis_note_schema.HashString, v.([]interface{})) -} - -func flattenContainerAnalysisNoteAttestationAuthority(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hint"] = - flattenContainerAnalysisNoteAttestationAuthorityHint(original["hint"], d, config) - return []interface{}{transformed} -} - -func flattenContainerAnalysisNoteAttestationAuthorityHint(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["human_readable_name"] = - flattenContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(original["humanReadableName"], d, config) - return []interface{}{transformed} -} - -func flattenContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(v interface{}, d *resource_container_analysis_note_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandContainerAnalysisNoteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteShortDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteLongDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteRelatedUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_container_analysis_note_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandContainerAnalysisNoteRelatedUrlUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_note_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - transformedLabel, err := expandContainerAnalysisNoteRelatedUrlLabel(original["label"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_note_reflect.ValueOf(transformedLabel); val.IsValid() && !isEmptyValue(val) { - transformed["label"] = transformedLabel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandContainerAnalysisNoteRelatedUrlUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteRelatedUrlLabel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteExpirationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisNoteRelatedNoteNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_container_analysis_note_schema.Set).List() - return v, nil -} - -func expandContainerAnalysisNoteAttestationAuthority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHint, err := expandContainerAnalysisNoteAttestationAuthorityHint(original["hint"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_note_reflect.ValueOf(transformedHint); val.IsValid() && !isEmptyValue(val) { - transformed["hint"] = transformedHint - } - - return transformed, nil -} - -func expandContainerAnalysisNoteAttestationAuthorityHint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHumanReadableName, err := expandContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(original["human_readable_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_note_reflect.ValueOf(transformedHumanReadableName); val.IsValid() && !isEmptyValue(val) { - transformed["humanReadableName"] = transformedHumanReadableName - } - - return transformed, nil -} - -func expandContainerAnalysisNoteAttestationAuthorityHintHumanReadableName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceContainerAnalysisNoteEncoder(d *resource_container_analysis_note_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - obj["attestation"] = obj["attestationAuthority"] - delete(obj, "attestationAuthority") - - return obj, nil -} - -func resourceContainerAnalysisNoteDecoder(d *resource_container_analysis_note_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["attestationAuthority"] = res["attestation"] - delete(res, "attestation") - - return res, nil -} - -func resourceContainerAnalysisOccurrence() *resource_container_analysis_occurrence_schema.Resource { - return &resource_container_analysis_occurrence_schema.Resource{ - Create: resourceContainerAnalysisOccurrenceCreate, - Read: resourceContainerAnalysisOccurrenceRead, - Update: resourceContainerAnalysisOccurrenceUpdate, - Delete: resourceContainerAnalysisOccurrenceDelete, - - Importer: &resource_container_analysis_occurrence_schema.ResourceImporter{ - State: resourceContainerAnalysisOccurrenceImport, - }, - - Timeouts: &resource_container_analysis_occurrence_schema.ResourceTimeout{ - Create: resource_container_analysis_occurrence_schema.DefaultTimeout(4 * resource_container_analysis_occurrence_time.Minute), - Update: resource_container_analysis_occurrence_schema.DefaultTimeout(4 * resource_container_analysis_occurrence_time.Minute), - Delete: resource_container_analysis_occurrence_schema.DefaultTimeout(4 * resource_container_analysis_occurrence_time.Minute), - }, - - Schema: map[string]*resource_container_analysis_occurrence_schema.Schema{ - "attestation": { - Type: resource_container_analysis_occurrence_schema.TypeList, - Required: true, - Description: `Occurrence that represents a single "attestation". The authenticity -of an attestation can be verified using the attached signature. -If the verifier trusts the public key of the signer, then verifying -the signature is sufficient to establish trust. In this circumstance, -the authority to which this attestation is attached is primarily -useful for lookup (how to find this attestation if you already -know the authority and artifact to be verified) and intent (for -which authority this attestation was intended to sign.`, - MaxItems: 1, - Elem: &resource_container_analysis_occurrence_schema.Resource{ - Schema: map[string]*resource_container_analysis_occurrence_schema.Schema{ - "serialized_payload": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Required: true, - Description: `The serialized payload that is verified by one or -more signatures. A base64-encoded string.`, - }, - "signatures": { - Type: resource_container_analysis_occurrence_schema.TypeSet, - Required: true, - Description: `One or more signatures over serializedPayload. -Verifier implementations should consider this attestation -message verified if at least one signature verifies -serializedPayload. See Signature in common.proto for more -details on signature structure and verification.`, - Elem: containeranalysisOccurrenceAttestationSignaturesSchema(), - }, - }, - }, - }, - "note_name": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The analysis note associated with this occurrence, in the form of -projects/[PROJECT]/notes/[NOTE_ID]. This field can be used as a -filter in list requests.`, - }, - "resource_uri": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. Immutable. A URI that represents the resource for which -the occurrence applies. For example, -https://gcr.io/project/image@sha256:123abc for a Docker image.`, - }, - "remediation": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Optional: true, - Description: `A description of actions that can be taken to remedy the note.`, - }, - "create_time": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Computed: true, - Description: `The time when the repository was created.`, - }, - "kind": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Computed: true, - Description: `The note kind which explicitly denotes which of the occurrence -details are specified. This field can be used as a filter in list -requests.`, - }, - "name": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Computed: true, - Description: `The name of the occurrence.`, - }, - "update_time": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Computed: true, - Description: `The time when the repository was last updated.`, - }, - "project": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func containeranalysisOccurrenceAttestationSignaturesSchema() *resource_container_analysis_occurrence_schema.Resource { - return &resource_container_analysis_occurrence_schema.Resource{ - Schema: map[string]*resource_container_analysis_occurrence_schema.Schema{ - "public_key_id": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Required: true, - Description: `The identifier for the public key that verifies this -signature. MUST be an RFC3986 conformant -URI. * When possible, the key id should be an -immutable reference, such as a cryptographic digest. -Examples of valid values: - -* OpenPGP V4 public key fingerprint. See https://www.iana.org/assignments/uri-schemes/prov/openpgp4fpr - for more details on this scheme. - * 'openpgp4fpr:74FAF3B861BDA0870C7B6DEF607E48D2A663AEEA' -* RFC6920 digest-named SubjectPublicKeyInfo (digest of the DER serialization): - * "ni:///sha-256;cD9o9Cq6LG3jD0iKXqEi_vdjJGecm_iXkbqVoScViaU"`, - }, - "signature": { - Type: resource_container_analysis_occurrence_schema.TypeString, - Optional: true, - Description: `The content of the signature, an opaque bytestring. -The payload that this signature verifies MUST be -unambiguously provided with the Signature during -verification. A wrapper message might provide the -payload explicitly. Alternatively, a message might -have a canonical serialization that can always be -unambiguously computed to derive the payload.`, - }, - }, - } -} - -func resourceContainerAnalysisOccurrenceCreate(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - resourceUriProp, err := expandContainerAnalysisOccurrenceResourceUri(d.Get("resource_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resource_uri"); !isEmptyValue(resource_container_analysis_occurrence_reflect.ValueOf(resourceUriProp)) && (ok || !resource_container_analysis_occurrence_reflect.DeepEqual(v, resourceUriProp)) { - obj["resourceUri"] = resourceUriProp - } - noteNameProp, err := expandContainerAnalysisOccurrenceNoteName(d.Get("note_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("note_name"); !isEmptyValue(resource_container_analysis_occurrence_reflect.ValueOf(noteNameProp)) && (ok || !resource_container_analysis_occurrence_reflect.DeepEqual(v, noteNameProp)) { - obj["noteName"] = noteNameProp - } - remediationProp, err := expandContainerAnalysisOccurrenceRemediation(d.Get("remediation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("remediation"); !isEmptyValue(resource_container_analysis_occurrence_reflect.ValueOf(remediationProp)) && (ok || !resource_container_analysis_occurrence_reflect.DeepEqual(v, remediationProp)) { - obj["remediation"] = remediationProp - } - attestationProp, err := expandContainerAnalysisOccurrenceAttestation(d.Get("attestation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation"); !isEmptyValue(resource_container_analysis_occurrence_reflect.ValueOf(attestationProp)) && (ok || !resource_container_analysis_occurrence_reflect.DeepEqual(v, attestationProp)) { - obj["attestation"] = attestationProp - } - - obj, err = resourceContainerAnalysisOccurrenceEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "{{note_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences") - if err != nil { - return err - } - - resource_container_analysis_occurrence_log.Printf("[DEBUG] Creating new Occurrence: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_container_analysis_occurrence_schema.TimeoutCreate)) - if err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error creating Occurrence: %s", err) - } - if err := d.Set("name", flattenContainerAnalysisOccurrenceName(res["name"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/occurrences/{{name}}") - if err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_container_analysis_occurrence_log.Printf("[DEBUG] Finished creating Occurrence %q: %#v", d.Id(), res) - - return resourceContainerAnalysisOccurrenceRead(d, meta) -} - -func resourceContainerAnalysisOccurrenceRead(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_container_analysis_occurrence_fmt.Sprintf("ContainerAnalysisOccurrence %q", d.Id())) - } - - res, err = resourceContainerAnalysisOccurrenceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_container_analysis_occurrence_log.Printf("[DEBUG] Removing ContainerAnalysisOccurrence because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - - if err := d.Set("name", flattenContainerAnalysisOccurrenceName(res["name"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("resource_uri", flattenContainerAnalysisOccurrenceResourceUri(res["resourceUri"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("note_name", flattenContainerAnalysisOccurrenceNoteName(res["noteName"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("kind", flattenContainerAnalysisOccurrenceKind(res["kind"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("remediation", flattenContainerAnalysisOccurrenceRemediation(res["remediation"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("create_time", flattenContainerAnalysisOccurrenceCreateTime(res["createTime"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("update_time", flattenContainerAnalysisOccurrenceUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - if err := d.Set("attestation", flattenContainerAnalysisOccurrenceAttestation(res["attestation"], d, config)); err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error reading Occurrence: %s", err) - } - - return nil -} - -func resourceContainerAnalysisOccurrenceUpdate(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - remediationProp, err := expandContainerAnalysisOccurrenceRemediation(d.Get("remediation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("remediation"); !isEmptyValue(resource_container_analysis_occurrence_reflect.ValueOf(v)) && (ok || !resource_container_analysis_occurrence_reflect.DeepEqual(v, remediationProp)) { - obj["remediation"] = remediationProp - } - attestationProp, err := expandContainerAnalysisOccurrenceAttestation(d.Get("attestation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("attestation"); !isEmptyValue(resource_container_analysis_occurrence_reflect.ValueOf(v)) && (ok || !resource_container_analysis_occurrence_reflect.DeepEqual(v, attestationProp)) { - obj["attestation"] = attestationProp - } - - obj, err = resourceContainerAnalysisOccurrenceUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "{{note_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") - if err != nil { - return err - } - - resource_container_analysis_occurrence_log.Printf("[DEBUG] Updating Occurrence %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("remediation") { - updateMask = append(updateMask, "remediation") - } - - if d.HasChange("attestation") { - updateMask = append(updateMask, "attestation") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_container_analysis_occurrence_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_container_analysis_occurrence_schema.TimeoutUpdate)) - - if err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error updating Occurrence %q: %s", d.Id(), err) - } else { - resource_container_analysis_occurrence_log.Printf("[DEBUG] Finished updating Occurrence %q: %#v", d.Id(), res) - } - - return resourceContainerAnalysisOccurrenceRead(d, meta) -} - -func resourceContainerAnalysisOccurrenceDelete(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_container_analysis_occurrence_fmt.Errorf("Error fetching project for Occurrence: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "{{note_name}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{ContainerAnalysisBasePath}}projects/{{project}}/occurrences/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_container_analysis_occurrence_log.Printf("[DEBUG] Deleting Occurrence %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_container_analysis_occurrence_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Occurrence") - } - - resource_container_analysis_occurrence_log.Printf("[DEBUG] Finished deleting Occurrence %q: %#v", d.Id(), res) - return nil -} - -func resourceContainerAnalysisOccurrenceImport(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}) ([]*resource_container_analysis_occurrence_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/occurrences/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/occurrences/{{name}}") - if err != nil { - return nil, resource_container_analysis_occurrence_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_container_analysis_occurrence_schema.ResourceData{d}, nil -} - -func flattenContainerAnalysisOccurrenceName(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenContainerAnalysisOccurrenceResourceUri(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceNoteName(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceKind(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceRemediation(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceCreateTime(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceUpdateTime(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceAttestation(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["serialized_payload"] = - flattenContainerAnalysisOccurrenceAttestationSerializedPayload(original["serializedPayload"], d, config) - transformed["signatures"] = - flattenContainerAnalysisOccurrenceAttestationSignatures(original["signatures"], d, config) - return []interface{}{transformed} -} - -func flattenContainerAnalysisOccurrenceAttestationSerializedPayload(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceAttestationSignatures(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_container_analysis_occurrence_schema.NewSet(resource_container_analysis_occurrence_schema.HashResource(containeranalysisOccurrenceAttestationSignaturesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "signature": flattenContainerAnalysisOccurrenceAttestationSignaturesSignature(original["signature"], d, config), - "public_key_id": flattenContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(original["publicKeyId"], d, config), - }) - } - return transformed -} - -func flattenContainerAnalysisOccurrenceAttestationSignaturesSignature(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(v interface{}, d *resource_container_analysis_occurrence_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandContainerAnalysisOccurrenceResourceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceNoteName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceRemediation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceAttestation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSerializedPayload, err := expandContainerAnalysisOccurrenceAttestationSerializedPayload(original["serialized_payload"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_occurrence_reflect.ValueOf(transformedSerializedPayload); val.IsValid() && !isEmptyValue(val) { - transformed["serializedPayload"] = transformedSerializedPayload - } - - transformedSignatures, err := expandContainerAnalysisOccurrenceAttestationSignatures(original["signatures"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_occurrence_reflect.ValueOf(transformedSignatures); val.IsValid() && !isEmptyValue(val) { - transformed["signatures"] = transformedSignatures - } - - return transformed, nil -} - -func expandContainerAnalysisOccurrenceAttestationSerializedPayload(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceAttestationSignatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_container_analysis_occurrence_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSignature, err := expandContainerAnalysisOccurrenceAttestationSignaturesSignature(original["signature"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_occurrence_reflect.ValueOf(transformedSignature); val.IsValid() && !isEmptyValue(val) { - transformed["signature"] = transformedSignature - } - - transformedPublicKeyId, err := expandContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(original["public_key_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_container_analysis_occurrence_reflect.ValueOf(transformedPublicKeyId); val.IsValid() && !isEmptyValue(val) { - transformed["publicKeyId"] = transformedPublicKeyId - } - - req = append(req, transformed) - } - return req, nil -} - -func expandContainerAnalysisOccurrenceAttestationSignaturesSignature(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandContainerAnalysisOccurrenceAttestationSignaturesPublicKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceContainerAnalysisOccurrenceEncoder(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - return obj, nil -} - -func resourceContainerAnalysisOccurrenceUpdateEncoder(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - noteNameProp, err := expandContainerAnalysisOccurrenceNoteName(d.Get("note_name"), d, meta.(*Config)) - if err != nil { - return nil, err - } else if v, ok := d.GetOkExists("note_name"); !isEmptyValue(resource_container_analysis_occurrence_reflect.ValueOf(noteNameProp)) && (ok || !resource_container_analysis_occurrence_reflect.DeepEqual(v, noteNameProp)) { - obj["noteName"] = noteNameProp - } - - return resourceContainerAnalysisOccurrenceEncoder(d, meta, obj) -} - -func resourceContainerAnalysisOccurrenceDecoder(d *resource_container_analysis_occurrence_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - return res, nil -} - -var ( - instanceGroupManagerURL = resource_container_cluster_regexp.MustCompile(resource_container_cluster_fmt.Sprintf("projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", ProjectRegex)) - - networkConfig = &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "cidr_blocks": { - Type: resource_container_cluster_schema.TypeSet, - - Optional: true, - Elem: cidrBlockConfig, - Description: `External networks that can access the Kubernetes cluster master through HTTPS.`, - }, - }, - } - cidrBlockConfig = &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "cidr_block": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: resource_container_cluster_validation.IsCIDRNetwork(0, 32), - Description: `External network that can access Kubernetes master through HTTPS. Must be specified in CIDR notation.`, - }, - "display_name": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Description: `Field for users to identify CIDR blocks.`, - }, - }, - } - - ipAllocationCidrBlockFields = []string{"ip_allocation_policy.0.cluster_ipv4_cidr_block", "ip_allocation_policy.0.services_ipv4_cidr_block"} - ipAllocationRangeFields = []string{"ip_allocation_policy.0.cluster_secondary_range_name", "ip_allocation_policy.0.services_secondary_range_name"} - - addonsConfigKeys = []string{ - "addons_config.0.http_load_balancing", - "addons_config.0.horizontal_pod_autoscaling", - "addons_config.0.network_policy_config", - "addons_config.0.cloudrun_config", - } - - forceNewClusterNodeConfigFields = []string{ - "workload_metadata_config", - } -) - -func clusterSchemaNodeConfig() *resource_container_cluster_schema.Schema { - nodeConfigSch := schemaNodeConfig() - schemaMap := nodeConfigSch.Elem.(*resource_container_cluster_schema.Resource).Schema - for _, k := range forceNewClusterNodeConfigFields { - if sch, ok := schemaMap[k]; ok { - changeFieldSchemaToForceNew(sch) - } - } - return nodeConfigSch -} - -func rfc5545RecurrenceDiffSuppress(k, o, n string, d *resource_container_cluster_schema.ResourceData) bool { - - if o == "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA,SU" && n == "FREQ=DAILY" { - return true - } - - return false -} - -func resourceContainerCluster() *resource_container_cluster_schema.Resource { - return &resource_container_cluster_schema.Resource{ - UseJSONNumber: true, - Create: resourceContainerClusterCreate, - Read: resourceContainerClusterRead, - Update: resourceContainerClusterUpdate, - Delete: resourceContainerClusterDelete, - - CustomizeDiff: resource_container_cluster_customdiff.All( - resourceNodeConfigEmptyGuestAccelerator, - containerClusterPrivateClusterConfigCustomDiff, - containerClusterAutopilotCustomizeDiff, - containerClusterNodeVersionRemoveDefaultCustomizeDiff, - ), - - Timeouts: &resource_container_cluster_schema.ResourceTimeout{ - Create: resource_container_cluster_schema.DefaultTimeout(40 * resource_container_cluster_time.Minute), - Read: resource_container_cluster_schema.DefaultTimeout(40 * resource_container_cluster_time.Minute), - Update: resource_container_cluster_schema.DefaultTimeout(60 * resource_container_cluster_time.Minute), - Delete: resource_container_cluster_schema.DefaultTimeout(40 * resource_container_cluster_time.Minute), - }, - - SchemaVersion: 1, - MigrateState: resourceContainerClusterMigrateState, - - Importer: &resource_container_cluster_schema.ResourceImporter{ - State: resourceContainerClusterStateImporter, - }, - - Schema: map[string]*resource_container_cluster_schema.Schema{ - "name": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the cluster, unique within the project and location.`, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 40 { - errors = append(errors, resource_container_cluster_fmt.Errorf( - "%q cannot be longer than 40 characters", k)) - } - if !resource_container_cluster_regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { - errors = append(errors, resource_container_cluster_fmt.Errorf( - "%q can only contain lowercase letters, numbers and hyphens", k)) - } - if !resource_container_cluster_regexp.MustCompile("^[a-z]").MatchString(value) { - errors = append(errors, resource_container_cluster_fmt.Errorf( - "%q must start with a letter", k)) - } - if !resource_container_cluster_regexp.MustCompile("[a-z0-9]$").MatchString(value) { - errors = append(errors, resource_container_cluster_fmt.Errorf( - "%q must end with a number or a letter", k)) - } - return - }, - }, - - "operation": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - }, - - "location": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The location (region or zone) in which the cluster master will be created, as well as the default node location. If you specify a zone (such as us-central1-a), the cluster will be a zonal cluster with a single cluster master. If you specify a region (such as us-west1), the cluster will be a regional cluster with multiple masters spread across zones in the region, and with default node locations in those zones as well.`, - }, - - "node_locations": { - Type: resource_container_cluster_schema.TypeSet, - Optional: true, - Computed: true, - Elem: &resource_container_cluster_schema.Schema{Type: resource_container_cluster_schema.TypeString}, - Description: `The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If this is specified for a zonal cluster, omit the cluster's zone.`, - }, - - "addons_config": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `The configuration for addons supported by GKE.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "http_load_balancing": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster. It is enabled by default; set disabled = true to disable.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "disabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - }, - }, - }, - }, - "horizontal_pod_autoscaling": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the Horizontal Pod Autoscaling addon, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. It ensures that a Heapster pod is running in the cluster, which is also used by the Cloud Monitoring service. It is enabled by default; set disabled = true to disable.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "disabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - }, - }, - }, - }, - "network_policy_config": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `Whether we should enable the network policy addon for the master. This must be enabled in order to enable network policy for the nodes. To enable this, you must also define a network_policy block, otherwise nothing will happen. It can only be disabled if the nodes already do not have network policies enabled. Defaults to disabled; set disabled = false to enable.`, - ConflictsWith: []string{"enable_autopilot"}, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "disabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - }, - }, - }, - }, - "cloudrun_config": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: addonsConfigKeys, - MaxItems: 1, - Description: `The status of the CloudRun addon. It is disabled by default. Set disabled = false to enable.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "disabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - }, - "load_balancer_type": { - Type: resource_container_cluster_schema.TypeString, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"LOAD_BALANCER_TYPE_INTERNAL"}, false), - Optional: true, - }, - }, - }, - }, - }, - }, - }, - - "cluster_autoscaling": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - - Optional: true, - Computed: true, - Description: `Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to automatically adjust the size of the cluster and create/delete node pools based on the current needs of the cluster's workload. See the guide to using Node Auto-Provisioning for more details.`, - ConflictsWith: []string{"enable_autopilot"}, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - Description: `Whether node auto-provisioning is enabled. Resource limits for cpu and memory must be defined to enable node auto-provisioning.`, - }, - "resource_limits": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Description: `Global constraints for machine resources in the cluster. Configuring the cpu and memory types is required if node auto-provisioning is enabled. These limits will apply to node pool autoscaling in addition to node auto-provisioning.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "resource_type": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - Description: `The type of the resource. For example, cpu and memory. See the guide to using Node Auto-Provisioning for a list of types.`, - }, - "minimum": { - Type: resource_container_cluster_schema.TypeInt, - Optional: true, - Description: `Minimum amount of the resource in the cluster.`, - }, - "maximum": { - Type: resource_container_cluster_schema.TypeInt, - Optional: true, - Description: `Maximum amount of the resource in the cluster.`, - }, - }, - }, - }, - "auto_provisioning_defaults": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Description: `Contains defaults for a node pool created by NAP.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "oauth_scopes": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - Elem: &resource_container_cluster_schema.Schema{Type: resource_container_cluster_schema.TypeString}, - DiffSuppressFunc: containerClusterAddedScopesSuppress, - Description: `Scopes that are used by NAP when creating node pools.`, - }, - "service_account": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Default: "default", - Description: `The Google Cloud Platform Service Account to be used by the node VMs.`, - }, - }, - }, - }, - }, - }, - }, - - "cluster_ipv4_cidr": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: orEmpty(validateRFC1918Network(8, 32)), - ConflictsWith: []string{"ip_allocation_policy"}, - Description: `The IP address range of the Kubernetes pods in this cluster in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block in 10.0.0.0/8. This field will only work for routes-based clusters, where ip_allocation_policy is not defined.`, - }, - - "description": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - ForceNew: true, - Description: ` Description of the cluster.`, - }, - - "enable_binary_authorization": { - Default: false, - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - Description: `Enable Binary Authorization for this cluster. If enabled, all container images will be validated by Google Binary Authorization.`, - ConflictsWith: []string{"enable_autopilot"}, - }, - - "enable_kubernetes_alpha": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Whether to enable Kubernetes Alpha features for this cluster. Note that when this option is enabled, the cluster cannot be upgraded and will be automatically deleted after 30 days.`, - }, - - "enable_tpu": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to enable Cloud TPU resources in this cluster.`, - }, - - "enable_legacy_abac": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether the ABAC authorizer is enabled for this cluster. When enabled, identities in the system, including service accounts, nodes, and controllers, will have statically granted permissions beyond those provided by the RBAC configuration or IAM. Defaults to false.`, - }, - - "enable_shielded_nodes": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - Default: true, - Description: `Enable Shielded Nodes features on all nodes in this cluster. Defaults to true.`, - ConflictsWith: []string{"enable_autopilot"}, - }, - - "enable_autopilot": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Enable Autopilot for this cluster.`, - }, - - "authenticator_groups_config": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - MaxItems: 1, - Description: `Configuration for the Google Groups for GKE feature.`, - ConflictsWith: []string{"enable_autopilot"}, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "security_group": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.`, - }, - }, - }, - }, - - "initial_node_count": { - Type: resource_container_cluster_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The number of nodes to create in this cluster's default node pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Must be set if node_pool is not set. If you're using google_container_node_pool objects with no default node pool, you'll need to set this to a value of at least 1, alongside setting remove_default_node_pool to true.`, - }, - - "logging_config": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Logging configuration for the cluster.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enable_components": { - Type: resource_container_cluster_schema.TypeList, - Required: true, - Description: `GKE components exposing logs. Valid values include SYSTEM_COMPONENTS and WORKLOADS.`, - Elem: &resource_container_cluster_schema.Schema{ - Type: resource_container_cluster_schema.TypeString, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "WORKLOADS"}, false), - }, - }, - }, - }, - }, - - "logging_service": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"logging.googleapis.com", "logging.googleapis.com/kubernetes", "none"}, false), - Description: `The logging service that the cluster should write logs to. Available options include logging.googleapis.com(Legacy Stackdriver), logging.googleapis.com/kubernetes(Stackdriver Kubernetes Engine Logging), and none. Defaults to logging.googleapis.com/kubernetes.`, - }, - - "maintenance_policy": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `The maintenance policy to use for the cluster.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "daily_maintenance_window": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - ExactlyOneOf: []string{ - "maintenance_policy.0.daily_maintenance_window", - "maintenance_policy.0.recurring_window", - }, - MaxItems: 1, - Description: `Time window specified for daily maintenance operations. Specify start_time in RFC3339 format "HH:MM”, where HH : [00-23] and MM : [00-59] GMT.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "start_time": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: validateRFC3339Time, - DiffSuppressFunc: rfc3339TimeDiffSuppress, - }, - "duration": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - }, - }, - }, - }, - "recurring_window": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - MaxItems: 1, - ExactlyOneOf: []string{ - "maintenance_policy.0.daily_maintenance_window", - "maintenance_policy.0.recurring_window", - }, - Description: `Time window for recurring maintenance operations.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "start_time": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: validateRFC3339Date, - }, - "end_time": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: validateRFC3339Date, - }, - "recurrence": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - DiffSuppressFunc: rfc5545RecurrenceDiffSuppress, - }, - }, - }, - }, - "maintenance_exclusion": { - Type: resource_container_cluster_schema.TypeSet, - Optional: true, - MaxItems: 3, - Description: `Exceptions to maintenance window. Non-emergency maintenance should not occur in these windows.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "exclusion_name": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - }, - "start_time": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: validateRFC3339Date, - }, - "end_time": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: validateRFC3339Date, - }, - }, - }, - }, - }, - }, - }, - - "monitoring_config": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Monitoring configuration for the cluster.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enable_components": { - Type: resource_container_cluster_schema.TypeList, - Required: true, - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS.`, - Elem: &resource_container_cluster_schema.Schema{ - Type: resource_container_cluster_schema.TypeString, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"SYSTEM_COMPONENTS"}, false), - }, - }, - }, - }, - }, - - "confidential_nodes": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - MaxItems: 1, - Description: `Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after cluster creation without deleting and recreating the entire cluster.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Whether Confidential Nodes feature is enabled for all nodes in this cluster.`, - }, - }, - }, - }, - - "master_auth": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - MaxItems: 1, - Computed: true, - Description: `The authentication information for accessing the Kubernetes master. Some values in this block are only returned by the API if your service account has permission to get credentials for your GKE cluster. If you see an unexpected diff unsetting your client cert, ensure you have the container.clusters.getCredentials permission.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "client_certificate_config": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Required: true, - ForceNew: true, - Description: `Whether client certificate authorization is enabled for this cluster.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "issue_client_certificate": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Whether client certificate authorization is enabled for this cluster.`, - }, - }, - }, - }, - - "client_certificate": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.`, - }, - - "client_key": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Sensitive: true, - Description: `Base64 encoded private key used by clients to authenticate to the cluster endpoint.`, - }, - - "cluster_ca_certificate": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `Base64 encoded public certificate that is the root of trust for the cluster.`, - }, - }, - }, - }, - - "master_authorized_networks_config": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: networkConfig, - Description: `The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists).`, - }, - - "min_master_version": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Description: `The minimum version of the master. GKE will auto-update the master to new versions, so this does not guarantee the current master version--use the read-only master_version field to obtain that. If unset, the cluster's version will be set by GKE to the version of the most recent official release (which is not necessarily the latest version).`, - }, - - "monitoring_service": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"monitoring.googleapis.com", "monitoring.googleapis.com/kubernetes", "none"}, false), - Description: `The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com(Legacy Stackdriver), monitoring.googleapis.com/kubernetes(Stackdriver Kubernetes Engine Monitoring), and none. Defaults to monitoring.googleapis.com/kubernetes.`, - }, - - "network": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Default: "default", - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the Google Compute Engine network to which the cluster is connected. For Shared VPC, set this to the self link of the shared network.`, - }, - - "network_policy": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Configuration options for the NetworkPolicy feature.`, - ConflictsWith: []string{"enable_autopilot"}, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - Description: `Whether network policy is enabled on the cluster.`, - }, - "provider": { - Type: resource_container_cluster_schema.TypeString, - Default: "PROVIDER_UNSPECIFIED", - Optional: true, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"PROVIDER_UNSPECIFIED", "CALICO"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("PROVIDER_UNSPECIFIED"), - Description: `The selected network policy provider. Defaults to PROVIDER_UNSPECIFIED.`, - }, - }, - }, - }, - - "node_config": clusterSchemaNodeConfig(), - - "node_pool": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - ForceNew: true, - Elem: &resource_container_cluster_schema.Resource{ - Schema: schemaNodePool, - }, - Description: `List of node pools associated with this cluster. See google_container_node_pool for schema. Warning: node pools defined inside a cluster can't be changed (or added/removed) after cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability to say "these are the only node pools associated with this cluster", use the google_container_node_pool resource instead of this property.`, - ConflictsWith: []string{"enable_autopilot"}, - }, - - "node_version": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - Description: `The Kubernetes version on the nodes. Must either be unset or set to the same value as min_master_version on create. Defaults to the default version set by GKE which is not necessarily the latest version. This only affects nodes in the default node pool. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way. To update nodes in other node pools, use the version attribute on the node pool.`, - }, - - "project": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "subnetwork": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the Google Compute Engine subnetwork in which the cluster's instances are launched.`, - }, - - "self_link": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `Server-defined URL for the resource.`, - }, - - "endpoint": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `The IP address of this cluster's Kubernetes master.`, - }, - - "master_version": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `The current version of the master in the cluster. This may be different than the min_master_version set in the config if the master has been updated by GKE.`, - }, - - "services_ipv4_cidr": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `The IP address range of the Kubernetes services in this cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the container CIDR.`, - }, - - "ip_allocation_policy": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - ForceNew: true, - Computed: true, - Optional: true, - ConflictsWith: []string{"cluster_ipv4_cidr"}, - Description: `Configuration of cluster IP allocation for VPC-native clusters. Adding this block enables IP aliasing, making the cluster VPC-native instead of routes-based.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - - "cluster_ipv4_cidr_block": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, - Description: `The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, - }, - - "services_ipv4_cidr_block": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: ipAllocationRangeFields, - DiffSuppressFunc: cidrOrSizeDiffSuppress, - Description: `The IP address range of the services IPs in this cluster. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, - }, - - "cluster_secondary_range_name": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: ipAllocationCidrBlockFields, - Description: `The name of the existing secondary range in the cluster's subnetwork to use for pod IP addresses. Alternatively, cluster_ipv4_cidr_block can be used to automatically create a GKE-managed one.`, - }, - - "services_secondary_range_name": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: ipAllocationCidrBlockFields, - Description: `The name of the existing secondary range in the cluster's subnetwork to use for service ClusterIPs. Alternatively, services_ipv4_cidr_block can be used to automatically create a GKE-managed one.`, - }, - }, - }, - }, - - "networking_mode": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"VPC_NATIVE", "ROUTES"}, false), - Description: `Determines whether alias IPs or routes will be used for pod IPs in the cluster.`, - }, - - "remove_default_node_pool": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - Description: `If true, deletes the default node pool upon cluster creation. If you're using google_container_node_pool resources with no default node pool, this should be set to true, alongside setting initial_node_count to at least 1.`, - ConflictsWith: []string{"enable_autopilot"}, - }, - - "private_cluster_config": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, - Description: `Configuration for private clusters, clusters with private nodes.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enable_private_endpoint": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - ForceNew: true, - DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, - Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, - }, - "enable_private_nodes": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - ForceNew: true, - DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, - Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used. This field only applies to private clusters, when enable_private_nodes is true.`, - }, - "master_ipv4_cidr_block": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: orEmpty(resource_container_cluster_validation.IsCIDRNetwork(28, 28)), - Description: `The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning private IP addresses to the cluster master(s) and the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network, and it must be a /28 subnet. See Private Cluster Limitations for more details. This field only applies to private clusters, when enable_private_nodes is true.`, - }, - "peering_name": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `The name of the peering between this cluster and the Google owned VPC.`, - }, - "private_endpoint": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `The internal IP address of this cluster's master endpoint.`, - }, - "public_endpoint": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `The external IP address of this cluster's master endpoint.`, - }, - "master_global_access_config": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Description: "Controls cluster master global access settings.", - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - Description: `Whether the cluster master is accessible globally or not.`, - }, - }, - }, - }, - }, - }, - }, - - "resource_labels": { - Type: resource_container_cluster_schema.TypeMap, - Optional: true, - Elem: &resource_container_cluster_schema.Schema{Type: resource_container_cluster_schema.TypeString}, - Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster.`, - }, - - "label_fingerprint": { - Type: resource_container_cluster_schema.TypeString, - Computed: true, - Description: `The fingerprint of the set of labels for this cluster.`, - }, - - "default_max_pods_per_node": { - Type: resource_container_cluster_schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The default maximum number of pods per node in this cluster. This doesn't work on "routes-based" clusters, clusters that don't have IP Aliasing enabled.`, - ConflictsWith: []string{"enable_autopilot"}, - }, - - "vertical_pod_autoscaling": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - Description: `Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - Description: `Enables vertical pod autoscaling.`, - }, - }, - }, - }, - "workload_identity_config": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - - Computed: true, - Description: `Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.`, - ConflictsWith: []string{"enable_autopilot"}, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "workload_pool": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Description: "The workload pool to attach all Kubernetes service accounts to.", - }, - }, - }, - }, - - "database_encryption": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Description: `Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "state": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"ENCRYPTED", "DECRYPTED"}, false), - Description: `ENCRYPTED or DECRYPTED.`, - }, - "key_name": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Description: `The key to use to encrypt/decrypt secrets.`, - }, - }, - }, - }, - - "release_channel": { - Type: resource_container_cluster_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Configuration options for the Release channel feature, which provide more control over automatic upgrades of your GKE clusters. Note that removing this field from your config will not unenroll it. Instead, use the "UNSPECIFIED" channel.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "channel": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE"}, false), - Description: `The selected release channel. Accepted values are: -* UNSPECIFIED: Not set. -* RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. -* REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. -* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky.`, - }, - }, - }, - }, - - "tpu_ipv4_cidr_block": { - Computed: true, - Type: resource_container_cluster_schema.TypeString, - Description: `The IP address range of the Cloud TPUs in this cluster, in CIDR notation (e.g. 1.2.3.4/29).`, - }, - - "default_snat_status": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Description: `Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "disabled": { - Type: resource_container_cluster_schema.TypeBool, - Required: true, - Description: `When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic.`, - }, - }, - }, - }, - - "datapath_provider": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Computed: true, - Description: `The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.`, - ValidateFunc: resource_container_cluster_validation.StringInSlice([]string{"DATAPATH_PROVIDER_UNSPECIFIED", "LEGACY_DATAPATH", "ADVANCED_DATAPATH"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("DATAPATH_PROVIDER_UNSPECIFIED"), - }, - - "enable_intranode_visibility": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - Computed: true, - Description: `Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.`, - ConflictsWith: []string{"enable_autopilot"}, - }, - "private_ipv6_google_access": { - Type: resource_container_cluster_schema.TypeString, - Optional: true, - Description: `The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).`, - Computed: true, - }, - - "resource_usage_export_config": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Optional: true, - Description: `Configuration for the ResourceUsageExportConfig feature.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "enable_network_egress_metering": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created in the cluster to meter network egress traffic.`, - }, - "enable_resource_consumption_metering": { - Type: resource_container_cluster_schema.TypeBool, - Optional: true, - Default: true, - Description: `Whether to enable resource consumption metering on this cluster. When enabled, a table will be created in the resource export BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. Defaults to true.`, - }, - "bigquery_destination": { - Type: resource_container_cluster_schema.TypeList, - MaxItems: 1, - Required: true, - Description: `Parameters for using BigQuery as the destination of resource usage export.`, - Elem: &resource_container_cluster_schema.Resource{ - Schema: map[string]*resource_container_cluster_schema.Schema{ - "dataset_id": { - Type: resource_container_cluster_schema.TypeString, - Required: true, - Description: `The ID of a BigQuery Dataset.`, - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func resourceNodeConfigEmptyGuestAccelerator(_ resource_container_cluster_context.Context, diff *resource_container_cluster_schema.ResourceDiff, meta interface{}) error { - old, new := diff.GetChange("node_config.0.guest_accelerator") - oList := old.([]interface{}) - nList := new.([]interface{}) - - if len(nList) == len(oList) || len(nList) == 0 { - return nil - } - var hasAcceleratorWithEmptyCount bool - - index := 0 - for i, item := range nList { - accel := item.(map[string]interface{}) - if accel["count"].(int) == 0 { - hasAcceleratorWithEmptyCount = true - - continue - } - if index >= len(oList) { - - return nil - } - if !resource_container_cluster_reflect.DeepEqual(nList[i], oList[index]) { - return nil - } - index += 1 - } - - if hasAcceleratorWithEmptyCount && index == len(oList) { - - err := diff.Clear("node_config.0.guest_accelerator") - if err != nil { - return err - } - } - - return nil -} - -func resourceContainerClusterCreate(d *resource_container_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - - clusterName := d.Get("name").(string) - - ipAllocationBlock, err := expandIPAllocationPolicy(d.Get("ip_allocation_policy"), d.Get("networking_mode").(string)) - if err != nil { - return err - } - - cluster := &resource_container_cluster_container.Cluster{ - Name: clusterName, - InitialNodeCount: int64(d.Get("initial_node_count").(int)), - MaintenancePolicy: expandMaintenancePolicy(d, meta), - MasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(d.Get("master_authorized_networks_config")), - InitialClusterVersion: d.Get("min_master_version").(string), - ClusterIpv4Cidr: d.Get("cluster_ipv4_cidr").(string), - Description: d.Get("description").(string), - LegacyAbac: &resource_container_cluster_container.LegacyAbac{ - Enabled: d.Get("enable_legacy_abac").(bool), - ForceSendFields: []string{"Enabled"}, - }, - LoggingService: d.Get("logging_service").(string), - MonitoringService: d.Get("monitoring_service").(string), - NetworkPolicy: expandNetworkPolicy(d.Get("network_policy")), - AddonsConfig: expandClusterAddonsConfig(d.Get("addons_config")), - EnableKubernetesAlpha: d.Get("enable_kubernetes_alpha").(bool), - IpAllocationPolicy: ipAllocationBlock, - Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), - BinaryAuthorization: &resource_container_cluster_container.BinaryAuthorization{ - Enabled: d.Get("enable_binary_authorization").(bool), - ForceSendFields: []string{"Enabled"}, - }, - Autopilot: &resource_container_cluster_container.Autopilot{ - Enabled: d.Get("enable_autopilot").(bool), - ForceSendFields: []string{"Enabled"}, - }, - ReleaseChannel: expandReleaseChannel(d.Get("release_channel")), - EnableTpu: d.Get("enable_tpu").(bool), - NetworkConfig: &resource_container_cluster_container.NetworkConfig{ - EnableIntraNodeVisibility: d.Get("enable_intranode_visibility").(bool), - DefaultSnatStatus: expandDefaultSnatStatus(d.Get("default_snat_status")), - DatapathProvider: d.Get("datapath_provider").(string), - PrivateIpv6GoogleAccess: d.Get("private_ipv6_google_access").(string), - }, - MasterAuth: expandMasterAuth(d.Get("master_auth")), - ConfidentialNodes: expandConfidentialNodes(d.Get("confidential_nodes")), - ResourceLabels: expandStringMap(d, "resource_labels"), - } - - v := d.Get("enable_shielded_nodes") - cluster.ShieldedNodes = &resource_container_cluster_container.ShieldedNodes{ - Enabled: v.(bool), - ForceSendFields: []string{"Enabled"}, - } - - if v, ok := d.GetOk("default_max_pods_per_node"); ok { - cluster.DefaultMaxPodsConstraint = expandDefaultMaxPodsConstraint(v) - } - - if v, ok := d.GetOk("node_version"); ok { - - mv := resource_container_cluster_strings.Split(cluster.InitialClusterVersion, "-")[0] - nv := resource_container_cluster_strings.Split(v.(string), "-")[0] - if mv != nv { - return resource_container_cluster_fmt.Errorf("node_version and min_master_version must be set to equivalent values on create") - } - } - - if v, ok := d.GetOk("node_locations"); ok { - locationsSet := v.(*resource_container_cluster_schema.Set) - if locationsSet.Contains(location) { - return resource_container_cluster_fmt.Errorf("when using a multi-zonal cluster, node_locations should not contain the original 'zone'") - } - - if isZone(location) { - locationsSet.Add(location) - } - cluster.Locations = convertStringSet(locationsSet) - } - - if v, ok := d.GetOk("network"); ok { - network, err := ParseNetworkFieldValue(v.(string), d, config) - if err != nil { - return err - } - cluster.Network = network.RelativeLink() - } - - if v, ok := d.GetOk("subnetwork"); ok { - subnetwork, err := parseRegionalFieldValue("subnetworks", v.(string), "project", "location", "location", d, config, true) - if err != nil { - return err - } - cluster.Subnetwork = subnetwork.RelativeLink() - } - - nodePoolsCount := d.Get("node_pool.#").(int) - if nodePoolsCount > 0 { - nodePools := make([]*resource_container_cluster_container.NodePool, 0, nodePoolsCount) - for i := 0; i < nodePoolsCount; i++ { - prefix := resource_container_cluster_fmt.Sprintf("node_pool.%d.", i) - nodePool, err := expandNodePool(d, prefix) - if err != nil { - return err - } - nodePools = append(nodePools, nodePool) - } - cluster.NodePools = nodePools - } else { - - cluster.NodeConfig = expandNodeConfig([]interface{}{}) - } - - if v, ok := d.GetOk("node_config"); ok { - cluster.NodeConfig = expandNodeConfig(v) - } - - if v, ok := d.GetOk("authenticator_groups_config"); ok { - cluster.AuthenticatorGroupsConfig = expandAuthenticatorGroupsConfig(v) - } - - if v, ok := d.GetOk("private_cluster_config"); ok { - cluster.PrivateClusterConfig = expandPrivateClusterConfig(v) - } - - if v, ok := d.GetOk("vertical_pod_autoscaling"); ok { - cluster.VerticalPodAutoscaling = expandVerticalPodAutoscaling(v) - } - - if v, ok := d.GetOk("database_encryption"); ok { - cluster.DatabaseEncryption = expandDatabaseEncryption(v) - } - - if v, ok := d.GetOk("workload_identity_config"); ok { - cluster.WorkloadIdentityConfig = expandWorkloadIdentityConfig(v) - } - - if v, ok := d.GetOk("resource_usage_export_config"); ok { - cluster.ResourceUsageExportConfig = expandResourceUsageExportConfig(v) - } - - if v, ok := d.GetOk("logging_config"); ok { - cluster.LoggingConfig = expandContainerClusterLoggingConfig(v) - } - - if v, ok := d.GetOk("monitoring_config"); ok { - cluster.MonitoringConfig = expandMonitoringConfig(v) - } - - req := &resource_container_cluster_container.CreateClusterRequest{ - Cluster: cluster, - } - - mutexKV.Lock(containerClusterMutexKey(project, location, clusterName)) - defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName)) - - parent := resource_container_cluster_fmt.Sprintf("projects/%s/locations/%s", project, location) - var op *resource_container_cluster_container.Operation - err = retry(func() error { - clusterCreateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Create(parent, req) - if config.UserProjectOverride { - clusterCreateCall.Header().Add("X-Goog-User-Project", project) - } - op, err = clusterCreateCall.Do() - return err - }) - if err != nil { - return err - } - - d.SetId(containerClusterFullName(project, location, clusterName)) - - waitErr := containerOperationWait(config, op, project, location, "creating GKE cluster", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutCreate)) - if waitErr != nil { - - select { - case <-config.context.Done(): - resource_container_cluster_log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", op.Name) - if err := d.Set("operation", op.Name); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting operation: %s", err) - } - return nil - default: - - } - - clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(containerClusterFullName(project, location, clusterName)) - if config.UserProjectOverride { - clusterGetCall.Header().Add("X-Goog-User-Project", project) - } - _, getErr := clusterGetCall.Do() - if getErr != nil { - resource_container_cluster_log.Printf("[WARN] Cluster %s was created in an error state and not found", clusterName) - d.SetId("") - } - - if deleteErr := cleanFailedContainerCluster(d, meta); deleteErr != nil { - resource_container_cluster_log.Printf("[WARN] Unable to clean up cluster from failed creation: %s", deleteErr) - - } else { - resource_container_cluster_log.Printf("[WARN] Verified failed creation of cluster %s was cleaned up", d.Id()) - d.SetId("") - } - - return waitErr - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s has been created", clusterName) - - if d.Get("remove_default_node_pool").(bool) { - parent := resource_container_cluster_fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") - err = retry(func() error { - clusterNodePoolDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(parent) - if config.UserProjectOverride { - clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) - } - op, err = clusterNodePoolDeleteCall.Do() - return err - }) - if err != nil { - return resource_container_cluster_errwrap.Wrapf("Error deleting default node pool: {{err}}", err) - } - err = containerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutCreate)) - if err != nil { - return resource_container_cluster_errwrap.Wrapf("Error while waiting to delete default node pool: {{err}}", err) - } - } - - if err := resourceContainerClusterRead(d, meta); err != nil { - return err - } - - state, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(resource_container_cluster_schema.TimeoutCreate)) - if err != nil { - return err - } - - if containerClusterRestingStates[state] == ErrorState { - return resource_container_cluster_fmt.Errorf("Cluster %s was created in the error state %q", clusterName, state) - } - - return nil -} - -func resourceContainerClusterRead(d *resource_container_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - - operation := d.Get("operation").(string) - if operation != "" { - resource_container_cluster_log.Printf("[DEBUG] in progress operation detected at %v, attempting to resume", operation) - op := &resource_container_cluster_container.Operation{ - Name: operation, - } - if err := d.Set("operation", ""); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting operation: %s", err) - } - waitErr := containerOperationWait(config, op, project, location, "resuming GKE cluster", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutRead)) - if waitErr != nil { - return waitErr - } - } - - clusterName := d.Get("name").(string) - name := containerClusterFullName(project, location, clusterName) - clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) - if config.UserProjectOverride { - clusterGetCall.Header().Add("X-Goog-User-Project", project) - } - - cluster, err := clusterGetCall.Do() - if err != nil { - return handleNotFoundError(err, d, resource_container_cluster_fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) - } - - if err := d.Set("name", cluster.Name); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("network_policy", flattenNetworkPolicy(cluster.NetworkPolicy)); err != nil { - return err - } - - if err := d.Set("location", cluster.Location); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting location: %s", err) - } - - locations := resource_container_cluster_schema.NewSet(resource_container_cluster_schema.HashString, convertStringArrToInterface(cluster.Locations)) - locations.Remove(cluster.Zone) - if err := d.Set("node_locations", locations); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting node_locations: %s", err) - } - - if err := d.Set("endpoint", cluster.Endpoint); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting endpoint: %s", err) - } - if err := d.Set("self_link", cluster.SelfLink); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting self link: %s", err) - } - if err := d.Set("maintenance_policy", flattenMaintenancePolicy(cluster.MaintenancePolicy)); err != nil { - return err - } - if err := d.Set("master_auth", flattenMasterAuth(cluster.MasterAuth)); err != nil { - return err - } - if err := d.Set("master_authorized_networks_config", flattenMasterAuthorizedNetworksConfig(cluster.MasterAuthorizedNetworksConfig)); err != nil { - return err - } - if err := d.Set("initial_node_count", cluster.InitialNodeCount); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting initial_node_count: %s", err) - } - if err := d.Set("master_version", cluster.CurrentMasterVersion); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting master_version: %s", err) - } - if err := d.Set("node_version", cluster.CurrentNodeVersion); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting node_version: %s", err) - } - if err := d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting cluster_ipv4_cidr: %s", err) - } - if err := d.Set("services_ipv4_cidr", cluster.ServicesIpv4Cidr); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting services_ipv4_cidr: %s", err) - } - if err := d.Set("description", cluster.Description); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("enable_kubernetes_alpha", cluster.EnableKubernetesAlpha); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting enable_kubernetes_alpha: %s", err) - } - if err := d.Set("enable_legacy_abac", cluster.LegacyAbac.Enabled); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting enable_legacy_abac: %s", err) - } - if err := d.Set("logging_service", cluster.LoggingService); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting logging_service: %s", err) - } - if err := d.Set("monitoring_service", cluster.MonitoringService); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting monitoring_service: %s", err) - } - if err := d.Set("network", cluster.NetworkConfig.Network); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("subnetwork", cluster.NetworkConfig.Subnetwork); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting subnetwork: %s", err) - } - if err := d.Set("cluster_autoscaling", flattenClusterAutoscaling(cluster.Autoscaling)); err != nil { - return err - } - if err := d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting enable_binary_authorization: %s", err) - } - if cluster.Autopilot != nil { - if err := d.Set("enable_autopilot", cluster.Autopilot.Enabled); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting enable_autopilot: %s", err) - } - } - if cluster.ShieldedNodes != nil { - if err := d.Set("enable_shielded_nodes", cluster.ShieldedNodes.Enabled); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting enable_shielded_nodes: %s", err) - } - } - if err := d.Set("release_channel", flattenReleaseChannel(cluster.ReleaseChannel)); err != nil { - return err - } - if err := d.Set("confidential_nodes", flattenConfidentialNodes(cluster.ConfidentialNodes)); err != nil { - return err - } - if err := d.Set("enable_tpu", cluster.EnableTpu); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting enable_tpu: %s", err) - } - if err := d.Set("tpu_ipv4_cidr_block", cluster.TpuIpv4CidrBlock); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting tpu_ipv4_cidr_block: %s", err) - } - if err := d.Set("datapath_provider", cluster.NetworkConfig.DatapathProvider); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting datapath_provider: %s", err) - } - if err := d.Set("default_snat_status", flattenDefaultSnatStatus(cluster.NetworkConfig.DefaultSnatStatus)); err != nil { - return err - } - if err := d.Set("enable_intranode_visibility", cluster.NetworkConfig.EnableIntraNodeVisibility); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting enable_intranode_visibility: %s", err) - } - if err := d.Set("private_ipv6_google_access", cluster.NetworkConfig.PrivateIpv6GoogleAccess); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting private_ipv6_google_access: %s", err) - } - if err := d.Set("authenticator_groups_config", flattenAuthenticatorGroupsConfig(cluster.AuthenticatorGroupsConfig)); err != nil { - return err - } - if cluster.DefaultMaxPodsConstraint != nil { - if err := d.Set("default_max_pods_per_node", cluster.DefaultMaxPodsConstraint.MaxPodsPerNode); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting default_max_pods_per_node: %s", err) - } - } - if err := d.Set("node_config", flattenNodeConfig(cluster.NodeConfig)); err != nil { - return err - } - if err := d.Set("project", project); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("addons_config", flattenClusterAddonsConfig(cluster.AddonsConfig)); err != nil { - return err - } - nps, err := flattenClusterNodePools(d, config, cluster.NodePools) - if err != nil { - return err - } - if err := d.Set("node_pool", nps); err != nil { - return err - } - - ipAllocPolicy, err := flattenIPAllocationPolicy(cluster, d, config) - if err != nil { - return err - } - if err := d.Set("ip_allocation_policy", ipAllocPolicy); err != nil { - return err - } - - if err := d.Set("private_cluster_config", flattenPrivateClusterConfig(cluster.PrivateClusterConfig)); err != nil { - return err - } - - if err := d.Set("vertical_pod_autoscaling", flattenVerticalPodAutoscaling(cluster.VerticalPodAutoscaling)); err != nil { - return err - } - - if err := d.Set("workload_identity_config", flattenWorkloadIdentityConfig(cluster.WorkloadIdentityConfig, d, config)); err != nil { - return err - } - - if err := d.Set("database_encryption", flattenDatabaseEncryption(cluster.DatabaseEncryption)); err != nil { - return err - } - - if err := d.Set("resource_labels", cluster.ResourceLabels); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting resource_labels: %s", err) - } - if err := d.Set("label_fingerprint", cluster.LabelFingerprint); err != nil { - return resource_container_cluster_fmt.Errorf("Error setting label_fingerprint: %s", err) - } - - if err := d.Set("resource_usage_export_config", flattenResourceUsageExportConfig(cluster.ResourceUsageExportConfig)); err != nil { - return err - } - - if err := d.Set("logging_config", flattenContainerClusterLoggingConfig(cluster.LoggingConfig)); err != nil { - return err - } - - if err := d.Set("monitoring_config", flattenMonitoringConfig(cluster.MonitoringConfig)); err != nil { - return err - } - - return nil -} - -func resourceContainerClusterUpdate(d *resource_container_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - - clusterName := d.Get("name").(string) - - if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)); err != nil { - return err - } - - d.Partial(true) - - lockKey := containerClusterMutexKey(project, location, clusterName) - - updateFunc := func(req *resource_container_cluster_container.UpdateClusterRequest, updateDescription string) func() error { - return func() error { - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, project, location, updateDescription, userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - } - } - - if d.HasChange("master_authorized_networks_config") { - c := d.Get("master_authorized_networks_config") - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredMasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(c), - }, - } - - updateF := updateFunc(req, "updating GKE cluster master authorized networks") - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - resource_container_cluster_log.Printf("[INFO] GKE cluster %s master authorized networks config has been updated", d.Id()) - } - - if d.HasChange("addons_config") { - if ac, ok := d.GetOk("addons_config"); ok { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredAddonsConfig: expandClusterAddonsConfig(ac), - }, - } - - updateF := updateFunc(req, "updating GKE cluster addons") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s addons have been updated", d.Id()) - } - } - - if d.HasChange("cluster_autoscaling") { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredClusterAutoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), - }} - - updateF := updateFunc(req, "updating GKE cluster autoscaling") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s's cluster-wide autoscaling has been updated", d.Id()) - } - - if d.HasChange("enable_binary_authorization") { - enabled := d.Get("enable_binary_authorization").(bool) - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredBinaryAuthorization: &resource_container_cluster_container.BinaryAuthorization{ - Enabled: enabled, - ForceSendFields: []string{"Enabled"}, - }, - }, - } - - updateF := updateFunc(req, "updating GKE binary authorization") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), enabled) - } - - if d.HasChange("enable_shielded_nodes") { - enabled := d.Get("enable_shielded_nodes").(bool) - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredShieldedNodes: &resource_container_cluster_container.ShieldedNodes{ - Enabled: enabled, - ForceSendFields: []string{"Enabled"}, - }, - }, - } - - updateF := updateFunc(req, "updating GKE shielded nodes") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s's shielded nodes has been updated to %v", d.Id(), enabled) - } - - if d.HasChange("release_channel") { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredReleaseChannel: expandReleaseChannel(d.Get("release_channel")), - }, - } - updateF := func() error { - resource_container_cluster_log.Println("[DEBUG] updating release_channel") - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - err = containerOperationWait(config, op, project, location, "updating Release Channel", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - resource_container_cluster_log.Println("[DEBUG] done updating release_channel") - return err - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s Release Channel has been updated to %#v", d.Id(), req.Update.DesiredReleaseChannel) - } - - if d.HasChange("enable_intranode_visibility") { - enabled := d.Get("enable_intranode_visibility").(bool) - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredIntraNodeVisibilityConfig: &resource_container_cluster_container.IntraNodeVisibilityConfig{ - Enabled: enabled, - ForceSendFields: []string{"Enabled"}, - }, - }, - } - updateF := func() error { - resource_container_cluster_log.Println("[DEBUG] updating enable_intranode_visibility") - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - err = containerOperationWait(config, op, project, location, "updating GKE Intra Node Visibility", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - resource_container_cluster_log.Println("[DEBUG] done updating enable_intranode_visibility") - return err - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s Intra Node Visibility has been updated to %v", d.Id(), enabled) - } - - if d.HasChange("private_ipv6_google_access") { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredPrivateIpv6GoogleAccess: d.Get("private_ipv6_google_access").(string), - }, - } - updateF := func() error { - resource_container_cluster_log.Println("[DEBUG] updating private_ipv6_google_access") - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - err = containerOperationWait(config, op, project, location, "updating GKE Private IPv6 Google Access", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - resource_container_cluster_log.Println("[DEBUG] done updating private_ipv6_google_access") - return err - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s Private IPv6 Google Access has been updated", d.Id()) - } - - if d.HasChange("default_snat_status") { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredDefaultSnatStatus: expandDefaultSnatStatus(d.Get("default_snat_status")), - }, - } - updateF := func() error { - resource_container_cluster_log.Println("[DEBUG] updating default_snat_status") - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - err = containerOperationWait(config, op, project, location, "updating GKE Default SNAT status", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - resource_container_cluster_log.Println("[DEBUG] done updating default_snat_status") - return err - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s Default SNAT status has been updated", d.Id()) - } - - if d.HasChange("maintenance_policy") { - req := &resource_container_cluster_container.SetMaintenancePolicyRequest{ - MaintenancePolicy: expandMaintenancePolicy(d, meta), - } - - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - clusterSetMaintenancePolicyCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetMaintenancePolicy(name, req) - if config.UserProjectOverride { - clusterSetMaintenancePolicyCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterSetMaintenancePolicyCall.Do() - - if err != nil { - return err - } - - return containerOperationWait(config, op, project, location, "updating GKE cluster maintenance policy", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s maintenance policy has been updated", d.Id()) - } - - if d.HasChange("node_locations") { - azSetOldI, azSetNewI := d.GetChange("node_locations") - azSetNew := azSetNewI.(*resource_container_cluster_schema.Set) - azSetOld := azSetOldI.(*resource_container_cluster_schema.Set) - if azSetNew.Contains(location) { - return resource_container_cluster_fmt.Errorf("for multi-zonal clusters, node_locations should not contain the primary 'zone'") - } - - azSet := azSetOld.Union(azSetNew) - - if isZone(location) { - azSet.Add(location) - } - - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredLocations: convertStringSet(azSet), - }, - } - - updateF := updateFunc(req, "updating GKE cluster node locations") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - if isZone(location) { - azSetNew.Add(location) - } - if !azSet.Equal(azSetNew) { - req = &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredLocations: convertStringSet(azSetNew), - }, - } - - updateF := updateFunc(req, "updating GKE cluster node locations") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s node locations have been updated to %v", d.Id(), azSet.List()) - } - - if d.HasChange("enable_legacy_abac") { - enabled := d.Get("enable_legacy_abac").(bool) - req := &resource_container_cluster_container.SetLegacyAbacRequest{ - Enabled: enabled, - ForceSendFields: []string{"Enabled"}, - } - - updateF := func() error { - resource_container_cluster_log.Println("[DEBUG] updating enable_legacy_abac") - name := containerClusterFullName(project, location, clusterName) - clusterSetLegacyAbacCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetLegacyAbac(name, req) - if config.UserProjectOverride { - clusterSetLegacyAbacCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterSetLegacyAbacCall.Do() - if err != nil { - return err - } - - err = containerOperationWait(config, op, project, location, "updating GKE legacy ABAC", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - resource_container_cluster_log.Println("[DEBUG] done updating enable_legacy_abac") - return err - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s legacy ABAC has been updated to %v", d.Id(), enabled) - } - - if d.HasChange("monitoring_service") || d.HasChange("logging_service") { - logging := d.Get("logging_service").(string) - monitoring := d.Get("monitoring_service").(string) - - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredMonitoringService: monitoring, - DesiredLoggingService: logging, - }, - } - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, project, location, "updating GKE logging+monitoring service", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s: logging service has been updated to %s, monitoring service has been updated to %s", d.Id(), logging, monitoring) - } - - if d.HasChange("network_policy") { - np := d.Get("network_policy") - req := &resource_container_cluster_container.SetNetworkPolicyRequest{ - NetworkPolicy: expandNetworkPolicy(np), - } - - updateF := func() error { - resource_container_cluster_log.Println("[DEBUG] updating network_policy") - name := containerClusterFullName(project, location, clusterName) - clusterSetNetworkPolicyCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetNetworkPolicy(name, req) - if config.UserProjectOverride { - clusterSetNetworkPolicyCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterSetNetworkPolicyCall.Do() - if err != nil { - return err - } - - err = containerOperationWait(config, op, project, location, "updating GKE cluster network policy", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - resource_container_cluster_log.Println("[DEBUG] done updating network_policy") - return err - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] Network policy for GKE cluster %s has been updated", d.Id()) - - } - - if n, ok := d.GetOk("node_pool.#"); ok { - for i := 0; i < n.(int); i++ { - nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) - if err != nil { - return err - } - - if err := nodePoolUpdate(d, meta, nodePoolInfo, resource_container_cluster_fmt.Sprintf("node_pool.%d.", i), d.Timeout(resource_container_cluster_schema.TimeoutUpdate)); err != nil { - return err - } - } - } - - if ver := d.Get("min_master_version").(string); d.HasChange("min_master_version") && ver != "" { - des, err := resource_container_cluster_version.NewVersion(ver) - if err != nil { - return err - } - - currentMasterVersion := d.Get("master_version").(string) - cur, err := resource_container_cluster_version.NewVersion(currentMasterVersion) - if err != nil { - return err - } - - if cur.LessThan(des) { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredMasterVersion: ver, - }, - } - - updateF := updateFunc(req, "updating GKE master version") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - resource_container_cluster_log.Printf("[INFO] GKE cluster %s: master has been updated to %s", d.Id(), ver) - } - } - - if d.HasChange("node_version") { - foundDefault := false - if n, ok := d.GetOk("node_pool.#"); ok { - for i := 0; i < n.(int); i++ { - key := resource_container_cluster_fmt.Sprintf("node_pool.%d.", i) - if d.Get(key+"name").(string) == "default-pool" { - desiredNodeVersion := d.Get("node_version").(string) - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredNodeVersion: desiredNodeVersion, - DesiredNodePoolId: "default-pool", - }, - } - updateF := updateFunc(req, "updating GKE default node pool node version") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - resource_container_cluster_log.Printf("[INFO] GKE cluster %s: default node pool has been updated to %s", d.Id(), - desiredNodeVersion) - foundDefault = true - } - } - } - - if !foundDefault { - return resource_container_cluster_fmt.Errorf("node_version was updated but default-pool was not found. To update the version for a non-default pool, use the version attribute on that pool.") - } - } - - if d.HasChange("node_config") { - if d.HasChange("node_config.0.image_type") { - it := d.Get("node_config.0.image_type").(string) - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredImageType: it, - }, - } - - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, project, location, "updating GKE image type", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s: image type has been updated to %s", d.Id(), it) - } - } - - if d.HasChange("vertical_pod_autoscaling") { - if ac, ok := d.GetOk("vertical_pod_autoscaling"); ok { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredVerticalPodAutoscaling: expandVerticalPodAutoscaling(ac), - }, - } - - updateF := updateFunc(req, "updating GKE cluster vertical pod autoscaling") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s vertical pod autoscaling has been updated", d.Id()) - } - } - - if d.HasChange("database_encryption") { - c := d.Get("database_encryption") - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredDatabaseEncryption: expandDatabaseEncryption(c), - }, - } - - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, project, location, "updating GKE cluster database encryption config", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - } - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - resource_container_cluster_log.Printf("[INFO] GKE cluster %s database encryption config has been updated", d.Id()) - } - - if d.HasChange("workload_identity_config") { - - req := &resource_container_cluster_container.UpdateClusterRequest{} - if v, ok := d.GetOk("workload_identity_config"); !ok { - req.Update = &resource_container_cluster_container.ClusterUpdate{ - DesiredWorkloadIdentityConfig: &resource_container_cluster_container.WorkloadIdentityConfig{ - WorkloadPool: "", - ForceSendFields: []string{"WorkloadPool"}, - }, - } - } else { - req.Update = &resource_container_cluster_container.ClusterUpdate{ - DesiredWorkloadIdentityConfig: expandWorkloadIdentityConfig(v), - } - } - - updateF := updateFunc(req, "updating GKE cluster workload identity config") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s workload identity config has been updated", d.Id()) - } - - if d.HasChange("logging_config") { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredLoggingConfig: expandContainerClusterLoggingConfig(d.Get("logging_config")), - }, - } - updateF := updateFunc(req, "updating GKE cluster logging config") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s logging config has been updated", d.Id()) - } - - if d.HasChange("monitoring_config") { - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredMonitoringConfig: expandMonitoringConfig(d.Get("monitoring_config")), - }, - } - updateF := updateFunc(req, "updating GKE cluster monitoring config") - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s monitoring config has been updated", d.Id()) - } - - if d.HasChange("resource_labels") { - resourceLabels := d.Get("resource_labels").(map[string]interface{}) - labelFingerprint := d.Get("label_fingerprint").(string) - req := &resource_container_cluster_container.SetLabelsRequest{ - ResourceLabels: convertStringMap(resourceLabels), - LabelFingerprint: labelFingerprint, - } - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - clusterSetResourceLabelsCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetResourceLabels(name, req) - if config.UserProjectOverride { - clusterSetResourceLabelsCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterSetResourceLabelsCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, project, location, "updating GKE resource labels", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - } - - if d.HasChange("remove_default_node_pool") && d.Get("remove_default_node_pool").(bool) { - name := resource_container_cluster_fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") - clusterNodePoolDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(name) - if config.UserProjectOverride { - clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterNodePoolDeleteCall.Do() - if err != nil { - if !isGoogleApiErrorWithCode(err, 404) { - return resource_container_cluster_errwrap.Wrapf("Error deleting default node pool: {{err}}", err) - } - resource_container_cluster_log.Printf("[WARN] Container cluster %q default node pool already removed, no change", d.Id()) - } else { - err = containerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - if err != nil { - return resource_container_cluster_errwrap.Wrapf("Error deleting default node pool: {{err}}", err) - } - } - } - - if d.HasChange("resource_usage_export_config") { - c := d.Get("resource_usage_export_config") - req := &resource_container_cluster_container.UpdateClusterRequest{ - Update: &resource_container_cluster_container.ClusterUpdate{ - DesiredResourceUsageExportConfig: expandResourceUsageExportConfig(c), - }, - } - - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, project, location, "updating GKE cluster resource usage export config", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)) - } - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - resource_container_cluster_log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id()) - } - - d.Partial(false) - - if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(resource_container_cluster_schema.TimeoutUpdate)); err != nil { - return err - } - - return resourceContainerClusterRead(d, meta) -} - -func resourceContainerClusterDelete(d *resource_container_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - - clusterName := d.Get("name").(string) - - if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(resource_container_cluster_schema.TimeoutDelete)); err != nil { - if isGoogleApiErrorWithCode(err, 404) { - resource_container_cluster_log.Printf("[INFO] GKE cluster %s doesn't exist to delete", d.Id()) - return nil - } - return err - } - - resource_container_cluster_log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) - mutexKV.Lock(containerClusterMutexKey(project, location, clusterName)) - defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName)) - - var op *resource_container_cluster_container.Operation - var count = 0 - err = resource_container_cluster_resource.Retry(30*resource_container_cluster_time.Second, func() *resource_container_cluster_resource.RetryError { - count++ - - name := containerClusterFullName(project, location, clusterName) - clusterDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Delete(name) - if config.UserProjectOverride { - clusterDeleteCall.Header().Add("X-Goog-User-Project", project) - } - op, err = clusterDeleteCall.Do() - - if err != nil { - resource_container_cluster_log.Printf("[WARNING] Cluster is still not ready to delete, retrying %s", clusterName) - return resource_container_cluster_resource.RetryableError(err) - } - - if count == 15 { - return resource_container_cluster_resource.NonRetryableError(resource_container_cluster_fmt.Errorf("Error retrying to delete cluster %s", clusterName)) - } - return nil - }) - - if err != nil { - return resource_container_cluster_fmt.Errorf("Error deleting Cluster: %s", err) - } - - waitErr := containerOperationWait(config, op, project, location, "deleting GKE cluster", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) - - d.SetId("") - - return nil -} - -func cleanFailedContainerCluster(d *resource_container_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - - location, err := getLocation(d, config) - if err != nil { - return err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - clusterName := d.Get("name").(string) - fullName := containerClusterFullName(project, location, clusterName) - - resource_container_cluster_log.Printf("[DEBUG] Cleaning up failed GKE cluster %s", d.Get("name").(string)) - clusterDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Delete(fullName) - if config.UserProjectOverride { - clusterDeleteCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterDeleteCall.Do() - if err != nil { - return handleNotFoundError(err, d, resource_container_cluster_fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) - } - - waitErr := containerOperationWait(config, op, project, location, "deleting GKE cluster", userAgent, d.Timeout(resource_container_cluster_schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - resource_container_cluster_log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) - d.SetId("") - return nil -} - -var containerClusterRestingStates = RestingStates{ - "RUNNING": ReadyState, - "DEGRADED": ErrorState, - "ERROR": ErrorState, -} - -func containerClusterAwaitRestingState(config *Config, project, location, clusterName, userAgent string, timeout resource_container_cluster_time.Duration) (state string, err error) { - err = resource_container_cluster_resource.Retry(timeout, func() *resource_container_cluster_resource.RetryError { - name := containerClusterFullName(project, location, clusterName) - clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) - if config.UserProjectOverride { - clusterGetCall.Header().Add("X-Goog-User-Project", project) - } - cluster, gErr := clusterGetCall.Do() - if gErr != nil { - return resource_container_cluster_resource.NonRetryableError(gErr) - } - - state = cluster.Status - - switch stateType := containerClusterRestingStates[cluster.Status]; stateType { - case ReadyState: - resource_container_cluster_log.Printf("[DEBUG] Cluster %q has status %q with message %q.", clusterName, state, cluster.StatusMessage) - return nil - case ErrorState: - resource_container_cluster_log.Printf("[DEBUG] Cluster %q has error state %q with message %q.", clusterName, state, cluster.StatusMessage) - return nil - default: - return resource_container_cluster_resource.RetryableError(resource_container_cluster_fmt.Errorf("Cluster %q has state %q with message %q", clusterName, state, cluster.StatusMessage)) - } - }) - - return state, err -} - -func expandClusterAddonsConfig(configured interface{}) *resource_container_cluster_container.AddonsConfig { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - - config := l[0].(map[string]interface{}) - ac := &resource_container_cluster_container.AddonsConfig{} - - if v, ok := config["http_load_balancing"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.HttpLoadBalancing = &resource_container_cluster_container.HttpLoadBalancing{ - Disabled: addon["disabled"].(bool), - ForceSendFields: []string{"Disabled"}, - } - } - - if v, ok := config["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.HorizontalPodAutoscaling = &resource_container_cluster_container.HorizontalPodAutoscaling{ - Disabled: addon["disabled"].(bool), - ForceSendFields: []string{"Disabled"}, - } - } - - if v, ok := config["network_policy_config"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.NetworkPolicyConfig = &resource_container_cluster_container.NetworkPolicyConfig{ - Disabled: addon["disabled"].(bool), - ForceSendFields: []string{"Disabled"}, - } - } - - if v, ok := config["cloudrun_config"]; ok && len(v.([]interface{})) > 0 { - addon := v.([]interface{})[0].(map[string]interface{}) - ac.CloudRunConfig = &resource_container_cluster_container.CloudRunConfig{ - Disabled: addon["disabled"].(bool), - ForceSendFields: []string{"Disabled"}, - } - if addon["load_balancer_type"] != "" { - ac.CloudRunConfig.LoadBalancerType = addon["load_balancer_type"].(string) - } - } - - return ac -} - -func expandIPAllocationPolicy(configured interface{}, networkingMode string) (*resource_container_cluster_container.IPAllocationPolicy, error) { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - if networkingMode == "VPC_NATIVE" { - return nil, resource_container_cluster_fmt.Errorf("`ip_allocation_policy` block is required for VPC_NATIVE clusters.") - } - return &resource_container_cluster_container.IPAllocationPolicy{ - UseIpAliases: false, - ForceSendFields: []string{"UseIpAliases"}, - }, nil - } - - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.IPAllocationPolicy{ - UseIpAliases: networkingMode == "VPC_NATIVE" || networkingMode == "", - ClusterIpv4CidrBlock: config["cluster_ipv4_cidr_block"].(string), - ServicesIpv4CidrBlock: config["services_ipv4_cidr_block"].(string), - - ClusterSecondaryRangeName: config["cluster_secondary_range_name"].(string), - ServicesSecondaryRangeName: config["services_secondary_range_name"].(string), - ForceSendFields: []string{"UseIpAliases"}, - UseRoutes: networkingMode == "ROUTES", - }, nil -} - -func expandMaintenancePolicy(d *resource_container_cluster_schema.ResourceData, meta interface{}) *resource_container_cluster_container.MaintenancePolicy { - config := meta.(*Config) - - project, _ := getProject(d, config) - location, _ := getLocation(d, config) - clusterName := d.Get("name").(string) - name := containerClusterFullName(project, location, clusterName) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil - } - clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) - if config.UserProjectOverride { - clusterGetCall.Header().Add("X-Goog-User-Project", project) - } - cluster, _ := clusterGetCall.Do() - resourceVersion := "" - exclusions := make(map[string]resource_container_cluster_container.TimeWindow) - if cluster != nil && cluster.MaintenancePolicy != nil { - - resourceVersion = cluster.MaintenancePolicy.ResourceVersion - - if cluster.MaintenancePolicy.Window != nil && cluster.MaintenancePolicy.Window.MaintenanceExclusions != nil { - exclusions = cluster.MaintenancePolicy.Window.MaintenanceExclusions - } - } - - configured := d.Get("maintenance_policy") - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return &resource_container_cluster_container.MaintenancePolicy{ - ResourceVersion: resourceVersion, - Window: &resource_container_cluster_container.MaintenanceWindow{ - MaintenanceExclusions: exclusions, - }, - } - } - maintenancePolicy := l[0].(map[string]interface{}) - - if maintenanceExclusions, ok := maintenancePolicy["maintenance_exclusion"]; ok { - for k := range exclusions { - delete(exclusions, k) - } - for _, me := range maintenanceExclusions.(*resource_container_cluster_schema.Set).List() { - exclusion := me.(map[string]interface{}) - exclusions[exclusion["exclusion_name"].(string)] = resource_container_cluster_container.TimeWindow{ - StartTime: exclusion["start_time"].(string), - EndTime: exclusion["end_time"].(string), - } - } - } - - if dailyMaintenanceWindow, ok := maintenancePolicy["daily_maintenance_window"]; ok && len(dailyMaintenanceWindow.([]interface{})) > 0 { - dmw := dailyMaintenanceWindow.([]interface{})[0].(map[string]interface{}) - startTime := dmw["start_time"].(string) - return &resource_container_cluster_container.MaintenancePolicy{ - Window: &resource_container_cluster_container.MaintenanceWindow{ - MaintenanceExclusions: exclusions, - DailyMaintenanceWindow: &resource_container_cluster_container.DailyMaintenanceWindow{ - StartTime: startTime, - }, - }, - ResourceVersion: resourceVersion, - } - } - if recurringWindow, ok := maintenancePolicy["recurring_window"]; ok && len(recurringWindow.([]interface{})) > 0 { - rw := recurringWindow.([]interface{})[0].(map[string]interface{}) - return &resource_container_cluster_container.MaintenancePolicy{ - Window: &resource_container_cluster_container.MaintenanceWindow{ - MaintenanceExclusions: exclusions, - RecurringWindow: &resource_container_cluster_container.RecurringTimeWindow{ - Window: &resource_container_cluster_container.TimeWindow{ - StartTime: rw["start_time"].(string), - EndTime: rw["end_time"].(string), - }, - Recurrence: rw["recurrence"].(string), - }, - }, - ResourceVersion: resourceVersion, - } - } - return nil -} - -func expandClusterAutoscaling(configured interface{}, d *resource_container_cluster_schema.ResourceData) *resource_container_cluster_container.ClusterAutoscaling { - l, ok := configured.([]interface{}) - if !ok || l == nil || len(l) == 0 || l[0] == nil { - if v, ok := d.GetOk("enable_autopilot"); ok && v == true { - return nil - } - return &resource_container_cluster_container.ClusterAutoscaling{ - EnableNodeAutoprovisioning: false, - ForceSendFields: []string{"EnableNodeAutoprovisioning"}, - } - } - - config := l[0].(map[string]interface{}) - - var resourceLimits []*resource_container_cluster_container.ResourceLimit - if limits, ok := config["resource_limits"]; ok { - resourceLimits = make([]*resource_container_cluster_container.ResourceLimit, 0) - if lmts, ok := limits.([]interface{}); ok { - for _, v := range lmts { - limit := v.(map[string]interface{}) - resourceLimits = append(resourceLimits, - &resource_container_cluster_container.ResourceLimit{ - ResourceType: limit["resource_type"].(string), - - Minimum: int64(limit["minimum"].(int)), - Maximum: int64(limit["maximum"].(int)), - }) - } - } - } - return &resource_container_cluster_container.ClusterAutoscaling{ - EnableNodeAutoprovisioning: config["enabled"].(bool), - ResourceLimits: resourceLimits, - AutoprovisioningNodePoolDefaults: expandAutoProvisioningDefaults(config["auto_provisioning_defaults"], d), - } -} - -func expandAutoProvisioningDefaults(configured interface{}, d *resource_container_cluster_schema.ResourceData) *resource_container_cluster_container.AutoprovisioningNodePoolDefaults { - l, ok := configured.([]interface{}) - if !ok || l == nil || len(l) == 0 || l[0] == nil { - return &resource_container_cluster_container.AutoprovisioningNodePoolDefaults{} - } - config := l[0].(map[string]interface{}) - - npd := &resource_container_cluster_container.AutoprovisioningNodePoolDefaults{ - OauthScopes: convertStringArr(config["oauth_scopes"].([]interface{})), - ServiceAccount: config["service_account"].(string), - } - - return npd -} - -func expandAuthenticatorGroupsConfig(configured interface{}) *resource_container_cluster_container.AuthenticatorGroupsConfig { - l := configured.([]interface{}) - if len(l) == 0 { - return nil - } - result := &resource_container_cluster_container.AuthenticatorGroupsConfig{} - config := l[0].(map[string]interface{}) - if securityGroup, ok := config["security_group"]; ok { - result.Enabled = true - result.SecurityGroup = securityGroup.(string) - } - return result -} - -func expandConfidentialNodes(configured interface{}) *resource_container_cluster_container.ConfidentialNodes { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.ConfidentialNodes{ - Enabled: config["enabled"].(bool), - } -} - -func expandMasterAuth(configured interface{}) *resource_container_cluster_container.MasterAuth { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - - masterAuth := l[0].(map[string]interface{}) - result := &resource_container_cluster_container.MasterAuth{} - - if v, ok := masterAuth["client_certificate_config"]; ok { - if len(v.([]interface{})) > 0 { - clientCertificateConfig := masterAuth["client_certificate_config"].([]interface{})[0].(map[string]interface{}) - - result.ClientCertificateConfig = &resource_container_cluster_container.ClientCertificateConfig{ - IssueClientCertificate: clientCertificateConfig["issue_client_certificate"].(bool), - } - } - } - - return result -} - -func expandMasterAuthorizedNetworksConfig(configured interface{}) *resource_container_cluster_container.MasterAuthorizedNetworksConfig { - l := configured.([]interface{}) - if len(l) == 0 { - return &resource_container_cluster_container.MasterAuthorizedNetworksConfig{ - Enabled: false, - } - } - result := &resource_container_cluster_container.MasterAuthorizedNetworksConfig{ - Enabled: true, - } - if config, ok := l[0].(map[string]interface{}); ok { - if _, ok := config["cidr_blocks"]; ok { - cidrBlocks := config["cidr_blocks"].(*resource_container_cluster_schema.Set).List() - result.CidrBlocks = make([]*resource_container_cluster_container.CidrBlock, 0) - for _, v := range cidrBlocks { - cidrBlock := v.(map[string]interface{}) - result.CidrBlocks = append(result.CidrBlocks, &resource_container_cluster_container.CidrBlock{ - CidrBlock: cidrBlock["cidr_block"].(string), - DisplayName: cidrBlock["display_name"].(string), - }) - } - } - } - return result -} - -func expandNetworkPolicy(configured interface{}) *resource_container_cluster_container.NetworkPolicy { - l := configured.([]interface{}) - if len(l) == 0 { - return nil - } - result := &resource_container_cluster_container.NetworkPolicy{} - config := l[0].(map[string]interface{}) - if enabled, ok := config["enabled"]; ok && enabled.(bool) { - result.Enabled = true - if provider, ok := config["provider"]; ok { - result.Provider = provider.(string) - } - } - return result -} - -func expandPrivateClusterConfig(configured interface{}) *resource_container_cluster_container.PrivateClusterConfig { - l := configured.([]interface{}) - if len(l) == 0 { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.PrivateClusterConfig{ - EnablePrivateEndpoint: config["enable_private_endpoint"].(bool), - EnablePrivateNodes: config["enable_private_nodes"].(bool), - MasterIpv4CidrBlock: config["master_ipv4_cidr_block"].(string), - MasterGlobalAccessConfig: expandPrivateClusterConfigMasterGlobalAccessConfig(config["master_global_access_config"]), - ForceSendFields: []string{"EnablePrivateEndpoint", "EnablePrivateNodes", "MasterIpv4CidrBlock", "MasterGlobalAccessConfig"}, - } -} - -func expandPrivateClusterConfigMasterGlobalAccessConfig(configured interface{}) *resource_container_cluster_container.PrivateClusterMasterGlobalAccessConfig { - l := configured.([]interface{}) - if len(l) == 0 { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.PrivateClusterMasterGlobalAccessConfig{ - Enabled: config["enabled"].(bool), - ForceSendFields: []string{"Enabled"}, - } -} - -func expandVerticalPodAutoscaling(configured interface{}) *resource_container_cluster_container.VerticalPodAutoscaling { - l := configured.([]interface{}) - if len(l) == 0 { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.VerticalPodAutoscaling{ - Enabled: config["enabled"].(bool), - } -} - -func expandDatabaseEncryption(configured interface{}) *resource_container_cluster_container.DatabaseEncryption { - l := configured.([]interface{}) - if len(l) == 0 { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.DatabaseEncryption{ - State: config["state"].(string), - KeyName: config["key_name"].(string), - } -} - -func expandReleaseChannel(configured interface{}) *resource_container_cluster_container.ReleaseChannel { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.ReleaseChannel{ - Channel: config["channel"].(string), - } -} - -func expandDefaultSnatStatus(configured interface{}) *resource_container_cluster_container.DefaultSnatStatus { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.DefaultSnatStatus{ - Disabled: config["disabled"].(bool), - ForceSendFields: []string{"Disabled"}, - } - -} - -func expandWorkloadIdentityConfig(configured interface{}) *resource_container_cluster_container.WorkloadIdentityConfig { - l := configured.([]interface{}) - v := &resource_container_cluster_container.WorkloadIdentityConfig{} - - if len(l) == 0 || l[0] == nil { - return v - } - - config := l[0].(map[string]interface{}) - v.WorkloadPool = config["workload_pool"].(string) - - return v -} - -func expandDefaultMaxPodsConstraint(v interface{}) *resource_container_cluster_container.MaxPodsConstraint { - if v == nil { - return nil - } - - return &resource_container_cluster_container.MaxPodsConstraint{ - MaxPodsPerNode: int64(v.(int)), - } -} - -func expandResourceUsageExportConfig(configured interface{}) *resource_container_cluster_container.ResourceUsageExportConfig { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return &resource_container_cluster_container.ResourceUsageExportConfig{} - } - - resourceUsageConfig := l[0].(map[string]interface{}) - - result := &resource_container_cluster_container.ResourceUsageExportConfig{ - EnableNetworkEgressMetering: resourceUsageConfig["enable_network_egress_metering"].(bool), - ConsumptionMeteringConfig: &resource_container_cluster_container.ConsumptionMeteringConfig{ - Enabled: resourceUsageConfig["enable_resource_consumption_metering"].(bool), - ForceSendFields: []string{"Enabled"}, - }, - ForceSendFields: []string{"EnableNetworkEgressMetering"}, - } - if _, ok := resourceUsageConfig["bigquery_destination"]; ok { - destinationArr := resourceUsageConfig["bigquery_destination"].([]interface{}) - if len(destinationArr) > 0 && destinationArr[0] != nil { - bigqueryDestination := destinationArr[0].(map[string]interface{}) - if _, ok := bigqueryDestination["dataset_id"]; ok { - result.BigqueryDestination = &resource_container_cluster_container.BigQueryDestination{ - DatasetId: bigqueryDestination["dataset_id"].(string), - } - } - } - } - return result -} - -func expandContainerClusterLoggingConfig(configured interface{}) *resource_container_cluster_container.LoggingConfig { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.LoggingConfig{ - ComponentConfig: &resource_container_cluster_container.LoggingComponentConfig{ - EnableComponents: convertStringArr(config["enable_components"].([]interface{})), - }, - } -} - -func expandMonitoringConfig(configured interface{}) *resource_container_cluster_container.MonitoringConfig { - l := configured.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - - config := l[0].(map[string]interface{}) - return &resource_container_cluster_container.MonitoringConfig{ - ComponentConfig: &resource_container_cluster_container.MonitoringComponentConfig{ - EnableComponents: convertStringArr(config["enable_components"].([]interface{})), - }, - } -} - -func flattenConfidentialNodes(c *resource_container_cluster_container.ConfidentialNodes) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enabled": c.Enabled, - }) - } - return result -} - -func flattenNetworkPolicy(c *resource_container_cluster_container.NetworkPolicy) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "enabled": c.Enabled, - "provider": c.Provider, - }) - } else { - - result = append(result, map[string]interface{}{ - "enabled": false, - "provider": "PROVIDER_UNSPECIFIED", - }) - } - return result -} - -func flattenClusterAddonsConfig(c *resource_container_cluster_container.AddonsConfig) []map[string]interface{} { - result := make(map[string]interface{}) - if c == nil { - return nil - } - if c.HorizontalPodAutoscaling != nil { - result["horizontal_pod_autoscaling"] = []map[string]interface{}{ - { - "disabled": c.HorizontalPodAutoscaling.Disabled, - }, - } - } - if c.HttpLoadBalancing != nil { - result["http_load_balancing"] = []map[string]interface{}{ - { - "disabled": c.HttpLoadBalancing.Disabled, - }, - } - } - if c.NetworkPolicyConfig != nil { - result["network_policy_config"] = []map[string]interface{}{ - { - "disabled": c.NetworkPolicyConfig.Disabled, - }, - } - } - - if c.CloudRunConfig != nil { - cloudRunConfig := map[string]interface{}{ - "disabled": c.CloudRunConfig.Disabled, - } - if c.CloudRunConfig.LoadBalancerType == "LOAD_BALANCER_TYPE_INTERNAL" { - - cloudRunConfig["load_balancer_type"] = "LOAD_BALANCER_TYPE_INTERNAL" - } - result["cloudrun_config"] = []map[string]interface{}{cloudRunConfig} - } - - return []map[string]interface{}{result} -} - -func flattenClusterNodePools(d *resource_container_cluster_schema.ResourceData, config *Config, c []*resource_container_cluster_container.NodePool) ([]map[string]interface{}, error) { - nodePools := make([]map[string]interface{}, 0, len(c)) - - for i, np := range c { - nodePool, err := flattenNodePool(d, config, np, resource_container_cluster_fmt.Sprintf("node_pool.%d.", i)) - if err != nil { - return nil, err - } - nodePools = append(nodePools, nodePool) - } - - return nodePools, nil -} - -func flattenAuthenticatorGroupsConfig(c *resource_container_cluster_container.AuthenticatorGroupsConfig) []map[string]interface{} { - if c == nil { - return nil - } - return []map[string]interface{}{ - { - "security_group": c.SecurityGroup, - }, - } -} - -func flattenPrivateClusterConfig(c *resource_container_cluster_container.PrivateClusterConfig) []map[string]interface{} { - if c == nil { - return nil - } - return []map[string]interface{}{ - { - "enable_private_endpoint": c.EnablePrivateEndpoint, - "enable_private_nodes": c.EnablePrivateNodes, - "master_ipv4_cidr_block": c.MasterIpv4CidrBlock, - "master_global_access_config": flattenPrivateClusterConfigMasterGlobalAccessConfig(c.MasterGlobalAccessConfig), - "peering_name": c.PeeringName, - "private_endpoint": c.PrivateEndpoint, - "public_endpoint": c.PublicEndpoint, - }, - } -} - -func flattenPrivateClusterConfigMasterGlobalAccessConfig(c *resource_container_cluster_container.PrivateClusterMasterGlobalAccessConfig) []map[string]interface{} { - return []map[string]interface{}{ - { - "enabled": c != nil && c.Enabled, - }, - } -} - -func flattenVerticalPodAutoscaling(c *resource_container_cluster_container.VerticalPodAutoscaling) []map[string]interface{} { - if c == nil { - return nil - } - return []map[string]interface{}{ - { - "enabled": c.Enabled, - }, - } -} - -func flattenReleaseChannel(c *resource_container_cluster_container.ReleaseChannel) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil && c.Channel != "" { - result = append(result, map[string]interface{}{ - "channel": c.Channel, - }) - } else { - - result = append(result, map[string]interface{}{ - "channel": "UNSPECIFIED", - }) - } - return result -} - -func flattenDefaultSnatStatus(c *resource_container_cluster_container.DefaultSnatStatus) []map[string]interface{} { - result := []map[string]interface{}{} - if c != nil { - result = append(result, map[string]interface{}{ - "disabled": c.Disabled, - }) - } - return result -} - -func flattenWorkloadIdentityConfig(c *resource_container_cluster_container.WorkloadIdentityConfig, d *resource_container_cluster_schema.ResourceData, config *Config) []map[string]interface{} { - if c == nil { - return nil - } - - return []map[string]interface{}{ - { - "workload_pool": c.WorkloadPool, - }, - } -} - -func flattenIPAllocationPolicy(c *resource_container_cluster_container.Cluster, d *resource_container_cluster_schema.ResourceData, config *Config) ([]map[string]interface{}, error) { - - if c == nil || c.IpAllocationPolicy == nil || !c.IpAllocationPolicy.UseIpAliases { - if err := d.Set("networking_mode", "ROUTES"); err != nil { - return nil, resource_container_cluster_fmt.Errorf("Error setting networking_mode: %s", err) - } - return nil, nil - } - if err := d.Set("networking_mode", "VPC_NATIVE"); err != nil { - return nil, resource_container_cluster_fmt.Errorf("Error setting networking_mode: %s", err) - } - - p := c.IpAllocationPolicy - return []map[string]interface{}{ - { - "cluster_ipv4_cidr_block": p.ClusterIpv4CidrBlock, - "services_ipv4_cidr_block": p.ServicesIpv4CidrBlock, - "cluster_secondary_range_name": p.ClusterSecondaryRangeName, - "services_secondary_range_name": p.ServicesSecondaryRangeName, - }, - }, nil -} - -func flattenMaintenancePolicy(mp *resource_container_cluster_container.MaintenancePolicy) []map[string]interface{} { - if mp == nil || mp.Window == nil { - return nil - } - - exclusions := []map[string]interface{}{} - if mp.Window.MaintenanceExclusions != nil { - for wName, window := range mp.Window.MaintenanceExclusions { - exclusions = append(exclusions, map[string]interface{}{ - "start_time": window.StartTime, - "end_time": window.EndTime, - "exclusion_name": wName, - }) - } - } - - if mp.Window.DailyMaintenanceWindow != nil { - return []map[string]interface{}{ - { - "daily_maintenance_window": []map[string]interface{}{ - { - "start_time": mp.Window.DailyMaintenanceWindow.StartTime, - "duration": mp.Window.DailyMaintenanceWindow.Duration, - }, - }, - "maintenance_exclusion": exclusions, - }, - } - } - if mp.Window.RecurringWindow != nil { - return []map[string]interface{}{ - { - "recurring_window": []map[string]interface{}{ - { - "start_time": mp.Window.RecurringWindow.Window.StartTime, - "end_time": mp.Window.RecurringWindow.Window.EndTime, - "recurrence": mp.Window.RecurringWindow.Recurrence, - }, - }, - "maintenance_exclusion": exclusions, - }, - } - } - return nil -} - -func flattenMasterAuth(ma *resource_container_cluster_container.MasterAuth) []map[string]interface{} { - if ma == nil { - return nil - } - masterAuth := []map[string]interface{}{ - { - "client_certificate": ma.ClientCertificate, - "client_key": ma.ClientKey, - "cluster_ca_certificate": ma.ClusterCaCertificate, - }, - } - - masterAuth[0]["client_certificate_config"] = []map[string]interface{}{ - { - "issue_client_certificate": len(ma.ClientCertificate) != 0, - }, - } - - return masterAuth -} - -func flattenClusterAutoscaling(a *resource_container_cluster_container.ClusterAutoscaling) []map[string]interface{} { - r := make(map[string]interface{}) - if a == nil { - r["enabled"] = false - return []map[string]interface{}{r} - } - - if a.EnableNodeAutoprovisioning { - resourceLimits := make([]interface{}, 0, len(a.ResourceLimits)) - for _, rl := range a.ResourceLimits { - resourceLimits = append(resourceLimits, map[string]interface{}{ - "resource_type": rl.ResourceType, - "minimum": rl.Minimum, - "maximum": rl.Maximum, - }) - } - r["resource_limits"] = resourceLimits - r["enabled"] = true - r["auto_provisioning_defaults"] = flattenAutoProvisioningDefaults(a.AutoprovisioningNodePoolDefaults) - } else { - r["enabled"] = false - } - - return []map[string]interface{}{r} -} - -func flattenAutoProvisioningDefaults(a *resource_container_cluster_container.AutoprovisioningNodePoolDefaults) []map[string]interface{} { - r := make(map[string]interface{}) - r["oauth_scopes"] = a.OauthScopes - r["service_account"] = a.ServiceAccount - - return []map[string]interface{}{r} -} - -func flattenMasterAuthorizedNetworksConfig(c *resource_container_cluster_container.MasterAuthorizedNetworksConfig) []map[string]interface{} { - if c == nil || !c.Enabled { - return nil - } - result := make(map[string]interface{}) - if c.Enabled { - cidrBlocks := make([]interface{}, 0, len(c.CidrBlocks)) - for _, v := range c.CidrBlocks { - cidrBlocks = append(cidrBlocks, map[string]interface{}{ - "cidr_block": v.CidrBlock, - "display_name": v.DisplayName, - }) - } - result["cidr_blocks"] = resource_container_cluster_schema.NewSet(resource_container_cluster_schema.HashResource(cidrBlockConfig), cidrBlocks) - } - return []map[string]interface{}{result} -} - -func flattenResourceUsageExportConfig(c *resource_container_cluster_container.ResourceUsageExportConfig) []map[string]interface{} { - if c == nil { - return nil - } - - enableResourceConsumptionMetering := false - if c.ConsumptionMeteringConfig != nil && c.ConsumptionMeteringConfig.Enabled == true { - enableResourceConsumptionMetering = true - } - - return []map[string]interface{}{ - { - "enable_network_egress_metering": c.EnableNetworkEgressMetering, - "enable_resource_consumption_metering": enableResourceConsumptionMetering, - "bigquery_destination": []map[string]interface{}{ - {"dataset_id": c.BigqueryDestination.DatasetId}, - }, - }, - } -} - -func flattenDatabaseEncryption(c *resource_container_cluster_container.DatabaseEncryption) []map[string]interface{} { - if c == nil { - return nil - } - return []map[string]interface{}{ - { - "state": c.State, - "key_name": c.KeyName, - }, - } -} - -func flattenContainerClusterLoggingConfig(c *resource_container_cluster_container.LoggingConfig) []map[string]interface{} { - if c == nil { - return nil - } - - return []map[string]interface{}{ - { - "enable_components": c.ComponentConfig.EnableComponents, - }, - } -} - -func flattenMonitoringConfig(c *resource_container_cluster_container.MonitoringConfig) []map[string]interface{} { - if c == nil { - return nil - } - - return []map[string]interface{}{ - { - "enable_components": c.ComponentConfig.EnableComponents, - }, - } -} - -func resourceContainerClusterStateImporter(d *resource_container_cluster_schema.ResourceData, meta interface{}) ([]*resource_container_cluster_schema.ResourceData, error) { - config := meta.(*Config) - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - location, err := getLocation(d, config) - if err != nil { - return nil, err - } - - clusterName := d.Get("name").(string) - - if err := d.Set("location", location); err != nil { - return nil, resource_container_cluster_fmt.Errorf("Error setting location: %s", err) - } - if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(resource_container_cluster_schema.TimeoutCreate)); err != nil { - return nil, err - } - - d.SetId(containerClusterFullName(project, location, clusterName)) - - return []*resource_container_cluster_schema.ResourceData{d}, nil -} - -func containerClusterMutexKey(project, location, clusterName string) string { - return resource_container_cluster_fmt.Sprintf("google-container-cluster/%s/%s/%s", project, location, clusterName) -} - -func containerClusterFullName(project, location, cluster string) string { - return resource_container_cluster_fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, cluster) -} - -func extractNodePoolInformationFromCluster(d *resource_container_cluster_schema.ResourceData, config *Config, clusterName string) (*NodePoolInformation, error) { - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - location, err := getLocation(d, config) - if err != nil { - return nil, err - } - - return &NodePoolInformation{ - project: project, - location: location, - cluster: d.Get("name").(string), - }, nil -} - -func cidrOrSizeDiffSuppress(k, old, new string, d *resource_container_cluster_schema.ResourceData) bool { - - return resource_container_cluster_strings.HasPrefix(new, "/") && resource_container_cluster_strings.HasSuffix(old, new) -} - -func containerClusterAddedScopesSuppress(k, old, new string, d *resource_container_cluster_schema.ResourceData) bool { - o, n := d.GetChange("cluster_autoscaling.0.auto_provisioning_defaults.0.oauth_scopes") - if o == nil || n == nil { - return false - } - - addedScopes := []string{ - "https://www.googleapis.com/auth/monitoring.write", - } - - m := golangSetFromStringSlice(append(addedScopes, convertStringArr(n.([]interface{}))...)) - combined := stringSliceFromGolangSet(m) - - if len(combined) != len(convertStringArr(o.([]interface{}))) { - return false - } - - for _, i := range combined { - if stringInSlice(convertStringArr(o.([]interface{})), i) { - continue - } - - return false - } - - return true -} - -func containerClusterPrivateClusterConfigSuppress(k, old, new string, d *resource_container_cluster_schema.ResourceData) bool { - o, n := d.GetChange("private_cluster_config.0.enable_private_endpoint") - suppressEndpoint := !o.(bool) && !n.(bool) - - o, n = d.GetChange("private_cluster_config.0.enable_private_nodes") - suppressNodes := !o.(bool) && !n.(bool) - - if k == "private_cluster_config.0.enable_private_endpoint" { - return suppressEndpoint - } else if k == "private_cluster_config.0.enable_private_nodes" { - return suppressNodes - } else if k == "private_cluster_config.#" { - return suppressEndpoint && suppressNodes - } - return false -} - -func containerClusterPrivateClusterConfigCustomDiff(_ resource_container_cluster_context.Context, d *resource_container_cluster_schema.ResourceDiff, meta interface{}) error { - pcc, ok := d.GetOk("private_cluster_config") - if !ok { - return nil - } - pccList := pcc.([]interface{}) - if len(pccList) == 0 { - return nil - } - config := pccList[0].(map[string]interface{}) - if config["enable_private_nodes"].(bool) { - block := config["master_ipv4_cidr_block"] - - blockValueKnown := d.NewValueKnown("private_cluster_config.0.master_ipv4_cidr_block") - - if blockValueKnown && (block == nil || block == "") { - return resource_container_cluster_fmt.Errorf("master_ipv4_cidr_block must be set if enable_private_nodes is true") - } - } else { - block := config["master_ipv4_cidr_block"] - if block != nil && block != "" { - return resource_container_cluster_fmt.Errorf("master_ipv4_cidr_block can only be set if enable_private_nodes is true") - } - } - return nil -} - -func containerClusterAutopilotCustomizeDiff(_ resource_container_cluster_context.Context, d *resource_container_cluster_schema.ResourceDiff, meta interface{}) error { - if d.HasChange("enable_autopilot") && d.Get("enable_autopilot").(bool) { - if err := d.SetNew("enable_intranode_visibility", true); err != nil { - return err - } - } - return nil -} - -func containerClusterNodeVersionRemoveDefaultCustomizeDiff(_ resource_container_cluster_context.Context, d *resource_container_cluster_schema.ResourceDiff, meta interface{}) error { - - o, _ := d.GetChange("name") - if o != "" { - return nil - } - if d.Get("node_version").(string) != "" && d.Get("remove_default_node_pool").(bool) { - return resource_container_cluster_fmt.Errorf("node_version can only be specified if remove_default_node_pool is not true") - } - return nil -} - -func resourceContainerClusterMigrateState( - v int, is *resource_container_cluster_migrate_terraform.InstanceState, meta interface{}) (*resource_container_cluster_migrate_terraform.InstanceState, error) { - if is.Empty() { - resource_container_cluster_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - resource_container_cluster_migrate_log.Println("[INFO] Found Container Cluster State v0; migrating to v1") - return migrateClusterStateV0toV1(is) - default: - return is, resource_container_cluster_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateClusterStateV0toV1(is *resource_container_cluster_migrate_terraform.InstanceState) (*resource_container_cluster_migrate_terraform.InstanceState, error) { - resource_container_cluster_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - newZones := []string{} - - for k, v := range is.Attributes { - if !resource_container_cluster_migrate_strings.HasPrefix(k, "additional_zones.") { - continue - } - - if k == "additional_zones.#" { - continue - } - - kParts := resource_container_cluster_migrate_strings.Split(k, ".") - - badFormat := false - if len(kParts) != 2 { - badFormat = true - } else if _, err := resource_container_cluster_migrate_strconv.Atoi(kParts[1]); err != nil { - badFormat = true - } - - if badFormat { - return is, resource_container_cluster_migrate_fmt.Errorf("migration error: found additional_zones key in unexpected format: %s", k) - } - - newZones = append(newZones, v) - delete(is.Attributes, k) - } - - for _, v := range newZones { - hash := resource_container_cluster_migrate_schema.HashString(v) - newKey := resource_container_cluster_migrate_fmt.Sprintf("additional_zones.%d", hash) - is.Attributes[newKey] = v - } - - resource_container_cluster_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -var clusterIdRegex = resource_container_node_pool_regexp.MustCompile("projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)") - -func resourceContainerNodePool() *resource_container_node_pool_schema.Resource { - return &resource_container_node_pool_schema.Resource{ - Create: resourceContainerNodePoolCreate, - Read: resourceContainerNodePoolRead, - Update: resourceContainerNodePoolUpdate, - Delete: resourceContainerNodePoolDelete, - Exists: resourceContainerNodePoolExists, - - Timeouts: &resource_container_node_pool_schema.ResourceTimeout{ - Create: resource_container_node_pool_schema.DefaultTimeout(30 * resource_container_node_pool_time.Minute), - Update: resource_container_node_pool_schema.DefaultTimeout(30 * resource_container_node_pool_time.Minute), - Delete: resource_container_node_pool_schema.DefaultTimeout(30 * resource_container_node_pool_time.Minute), - }, - - SchemaVersion: 1, - MigrateState: resourceContainerNodePoolMigrateState, - - Importer: &resource_container_node_pool_schema.ResourceImporter{ - State: resourceContainerNodePoolStateImporter, - }, - - CustomizeDiff: resource_container_node_pool_customdiff.All( - resourceNodeConfigEmptyGuestAccelerator, - ), - - UseJSONNumber: true, - - Schema: mergeSchemas( - schemaNodePool, - map[string]*resource_container_node_pool_schema.Schema{ - "project": { - Type: resource_container_node_pool_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.`, - }, - "cluster": { - Type: resource_container_node_pool_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The cluster to create the node pool for. Cluster must be present in location provided for zonal clusters.`, - }, - "location": { - Type: resource_container_node_pool_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The location (region or zone) of the cluster.`, - }, - "operation": { - Type: resource_container_node_pool_schema.TypeString, - Computed: true, - }, - }), - } -} - -var schemaNodePool = map[string]*resource_container_node_pool_schema.Schema{ - "autoscaling": { - Type: resource_container_node_pool_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.`, - Elem: &resource_container_node_pool_schema.Resource{ - Schema: map[string]*resource_container_node_pool_schema.Schema{ - "min_node_count": { - Type: resource_container_node_pool_schema.TypeInt, - Required: true, - ValidateFunc: resource_container_node_pool_validation.IntAtLeast(0), - Description: `Minimum number of nodes in the NodePool. Must be >=0 and <= max_node_count.`, - }, - - "max_node_count": { - Type: resource_container_node_pool_schema.TypeInt, - Required: true, - ValidateFunc: resource_container_node_pool_validation.IntAtLeast(1), - Description: `Maximum number of nodes in the NodePool. Must be >= min_node_count.`, - }, - }, - }, - }, - - "max_pods_per_node": { - Type: resource_container_node_pool_schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled.`, - }, - - "node_locations": { - Type: resource_container_node_pool_schema.TypeSet, - Optional: true, - Computed: true, - Elem: &resource_container_node_pool_schema.Schema{Type: resource_container_node_pool_schema.TypeString}, - Description: `The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.`, - }, - - "upgrade_settings": { - Type: resource_container_node_pool_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Specify node upgrade settings to change how many nodes GKE attempts to upgrade at once. The number of nodes upgraded simultaneously is the sum of max_surge and max_unavailable. The maximum number of nodes upgraded simultaneously is limited to 20.`, - Elem: &resource_container_node_pool_schema.Resource{ - Schema: map[string]*resource_container_node_pool_schema.Schema{ - "max_surge": { - Type: resource_container_node_pool_schema.TypeInt, - Required: true, - ValidateFunc: resource_container_node_pool_validation.IntAtLeast(0), - Description: `The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.`, - }, - - "max_unavailable": { - Type: resource_container_node_pool_schema.TypeInt, - Required: true, - ValidateFunc: resource_container_node_pool_validation.IntAtLeast(0), - Description: `The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.`, - }, - }, - }, - }, - - "initial_node_count": { - Type: resource_container_node_pool_schema.TypeInt, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource.`, - }, - - "instance_group_urls": { - Type: resource_container_node_pool_schema.TypeList, - Computed: true, - Elem: &resource_container_node_pool_schema.Schema{Type: resource_container_node_pool_schema.TypeString}, - Description: `The resource URLs of the managed instance groups associated with this node pool.`, - }, - - "managed_instance_group_urls": { - Type: resource_container_node_pool_schema.TypeList, - Computed: true, - Elem: &resource_container_node_pool_schema.Schema{Type: resource_container_node_pool_schema.TypeString}, - Description: `List of instance group URLs which have been assigned to this node pool.`, - }, - - "management": { - Type: resource_container_node_pool_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Node management configuration, wherein auto-repair and auto-upgrade is configured.`, - Elem: &resource_container_node_pool_schema.Resource{ - Schema: map[string]*resource_container_node_pool_schema.Schema{ - "auto_repair": { - Type: resource_container_node_pool_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether the nodes will be automatically repaired.`, - }, - - "auto_upgrade": { - Type: resource_container_node_pool_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether the nodes will be automatically upgraded.`, - }, - }, - }, - }, - - "name": { - Type: resource_container_node_pool_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The name of the node pool. If left blank, Terraform will auto-generate a unique name.`, - }, - - "name_prefix": { - Type: resource_container_node_pool_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.`, - }, - - "node_config": schemaNodeConfig(), - - "node_count": { - Type: resource_container_node_pool_schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: resource_container_node_pool_validation.IntAtLeast(0), - Description: `The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.`, - }, - - "version": { - Type: resource_container_node_pool_schema.TypeString, - Optional: true, - Computed: true, - Description: `The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way.`, - }, -} - -type NodePoolInformation struct { - project string - location string - cluster string -} - -func (nodePoolInformation *NodePoolInformation) fullyQualifiedName(nodeName string) string { - return resource_container_node_pool_fmt.Sprintf( - "projects/%s/locations/%s/clusters/%s/nodePools/%s", - nodePoolInformation.project, - nodePoolInformation.location, - nodePoolInformation.cluster, - nodeName, - ) -} - -func (nodePoolInformation *NodePoolInformation) parent() string { - return resource_container_node_pool_fmt.Sprintf( - "projects/%s/locations/%s/clusters/%s", - nodePoolInformation.project, - nodePoolInformation.location, - nodePoolInformation.cluster, - ) -} - -func (nodePoolInformation *NodePoolInformation) lockKey() string { - return containerClusterMutexKey(nodePoolInformation.project, - nodePoolInformation.location, nodePoolInformation.cluster) -} - -func extractNodePoolInformation(d *resource_container_node_pool_schema.ResourceData, config *Config) (*NodePoolInformation, error) { - cluster := d.Get("cluster").(string) - - if fieldValues := clusterIdRegex.FindStringSubmatch(cluster); fieldValues != nil { - resource_container_node_pool_log.Printf("[DEBUG] matching parent cluster %s to regex %s", cluster, clusterIdRegex.String()) - return &NodePoolInformation{ - project: fieldValues[1], - location: fieldValues[2], - cluster: fieldValues[3], - }, nil - } - resource_container_node_pool_log.Printf("[DEBUG] parent cluster %s does not match regex %s", cluster, clusterIdRegex.String()) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - location, err := getLocation(d, config) - if err != nil { - return nil, err - } - - return &NodePoolInformation{ - project: project, - location: location, - cluster: cluster, - }, nil -} - -func resourceContainerNodePoolCreate(d *resource_container_node_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - nodePoolInfo, err := extractNodePoolInformation(d, config) - if err != nil { - return err - } - - nodePool, err := expandNodePool(d, "") - if err != nil { - return err - } - - mutexKV.Lock(nodePoolInfo.lockKey()) - defer mutexKV.Unlock(nodePoolInfo.lockKey()) - - req := &resource_container_node_pool_container.CreateNodePoolRequest{ - NodePool: nodePool, - } - - timeout := d.Timeout(resource_container_node_pool_schema.TimeoutCreate) - startTime := resource_container_node_pool_time.Now() - - var id = resource_container_node_pool_fmt.Sprintf("projects/%s/locations/%s/clusters/%s/nodePools/%s", nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, nodePool.Name) - name := getNodePoolName(id) - clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) - if config.UserProjectOverride { - clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - _, err = clusterNodePoolsGetCall.Do() - if err != nil && isGoogleApiErrorWithCode(err, 404) { - - d.SetId(id) - } else if err == nil { - return resource_container_node_pool_fmt.Errorf("resource - %s - already exists", id) - } - - var operation *resource_container_node_pool_container.Operation - err = resource_container_node_pool_resource.Retry(timeout, func() *resource_container_node_pool_resource.RetryError { - clusterNodePoolsCreateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Create(nodePoolInfo.parent(), req) - if config.UserProjectOverride { - clusterNodePoolsCreateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - operation, err = clusterNodePoolsCreateCall.Do() - - if err != nil { - if isFailedPreconditionError(err) { - - return resource_container_node_pool_resource.RetryableError(err) - } - return resource_container_node_pool_resource.NonRetryableError(err) - } - return nil - }) - if err != nil { - return resource_container_node_pool_fmt.Errorf("error creating NodePool: %s", err) - } - timeout -= resource_container_node_pool_time.Since(startTime) - - waitErr := containerOperationWait(config, - operation, nodePoolInfo.project, - nodePoolInfo.location, "creating GKE NodePool", userAgent, timeout) - - if waitErr != nil { - - select { - case <-config.context.Done(): - resource_container_node_pool_log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", operation.Name) - if err := d.Set("operation", operation.Name); err != nil { - return resource_container_node_pool_fmt.Errorf("Error setting operation: %s", err) - } - return nil - default: - - } - - _, err = clusterNodePoolsGetCall.Do() - if err != nil { - d.SetId("") - return waitErr - } - } - - resource_container_node_pool_log.Printf("[INFO] GKE NodePool %s has been created", nodePool.Name) - - if err = resourceContainerNodePoolRead(d, meta); err != nil { - return err - } - - _, err = containerClusterAwaitRestingState(config, nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutCreate)) - if err != nil { - return err - } - - state, err := containerNodePoolAwaitRestingState(config, d.Id(), nodePoolInfo.project, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutCreate)) - if err != nil { - return err - } - - if containerNodePoolRestingStates[state] == ErrorState { - return resource_container_node_pool_fmt.Errorf("NodePool %s was created in the error state %q", nodePool.Name, state) - } - - return nil -} - -func resourceContainerNodePoolRead(d *resource_container_node_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - nodePoolInfo, err := extractNodePoolInformation(d, config) - if err != nil { - return err - } - - operation := d.Get("operation").(string) - if operation != "" { - resource_container_node_pool_log.Printf("[DEBUG] in progress operation detected at %v, attempting to resume", operation) - op := &resource_container_node_pool_container.Operation{ - Name: operation, - } - if err := d.Set("operation", ""); err != nil { - return resource_container_node_pool_fmt.Errorf("Error setting operation: %s", err) - } - waitErr := containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "resuming GKE node pool", userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutRead)) - if waitErr != nil { - return waitErr - } - } - - name := getNodePoolName(d.Id()) - - clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) - if config.UserProjectOverride { - clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - nodePool, err := clusterNodePoolsGetCall.Do() - if err != nil { - return handleNotFoundError(err, d, resource_container_node_pool_fmt.Sprintf("NodePool %q from cluster %q", name, nodePoolInfo.cluster)) - } - - npMap, err := flattenNodePool(d, config, nodePool, "") - if err != nil { - return err - } - - for k, v := range npMap { - if err := d.Set(k, v); err != nil { - return resource_container_node_pool_fmt.Errorf("Error setting %s: %s", k, err) - } - } - - if err := d.Set("location", nodePoolInfo.location); err != nil { - return resource_container_node_pool_fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("project", nodePoolInfo.project); err != nil { - return resource_container_node_pool_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceContainerNodePoolUpdate(d *resource_container_node_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - nodePoolInfo, err := extractNodePoolInformation(d, config) - if err != nil { - return err - } - name := getNodePoolName(d.Id()) - - _, err = containerClusterAwaitRestingState(config, nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutCreate)) - if err != nil { - return err - } - - _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutUpdate)) - if err != nil { - return err - } - - d.Partial(true) - if err := nodePoolUpdate(d, meta, nodePoolInfo, "", d.Timeout(resource_container_node_pool_schema.TimeoutUpdate)); err != nil { - return err - } - d.Partial(false) - - _, err = containerClusterAwaitRestingState(config, nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutCreate)) - if err != nil { - return err - } - _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutUpdate)) - if err != nil { - return err - } - - return resourceContainerNodePoolRead(d, meta) -} - -func resourceContainerNodePoolDelete(d *resource_container_node_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - nodePoolInfo, err := extractNodePoolInformation(d, config) - if err != nil { - return err - } - - name := getNodePoolName(d.Id()) - - _, err = containerClusterAwaitRestingState(config, nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutCreate)) - if err != nil { - if isGoogleApiErrorWithCode(err, 404) { - resource_container_node_pool_log.Printf("[INFO] GKE cluster %s doesn't exist, skipping node pool %s deletion", nodePoolInfo.cluster, d.Id()) - return nil - } - return err - } - - _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutDelete)) - if err != nil { - - if isGoogleApiErrorWithCode(err, 404) { - resource_container_node_pool_log.Printf("node pool %q not found, doesn't need to be cleaned up", name) - return nil - } else { - return err - } - } - - mutexKV.Lock(nodePoolInfo.lockKey()) - defer mutexKV.Unlock(nodePoolInfo.lockKey()) - - timeout := d.Timeout(resource_container_node_pool_schema.TimeoutDelete) - startTime := resource_container_node_pool_time.Now() - - var operation *resource_container_node_pool_container.Operation - err = resource_container_node_pool_resource.Retry(timeout, func() *resource_container_node_pool_resource.RetryError { - clusterNodePoolsDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(nodePoolInfo.fullyQualifiedName(name)) - if config.UserProjectOverride { - clusterNodePoolsDeleteCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - operation, err = clusterNodePoolsDeleteCall.Do() - - if err != nil { - if isFailedPreconditionError(err) { - - return resource_container_node_pool_resource.RetryableError(err) - } - return resource_container_node_pool_resource.NonRetryableError(err) - } - - return nil - }) - - if err != nil { - return resource_container_node_pool_fmt.Errorf("Error deleting NodePool: %s", err) - } - - timeout -= resource_container_node_pool_time.Since(startTime) - - waitErr := containerOperationWait(config, operation, nodePoolInfo.project, nodePoolInfo.location, "deleting GKE NodePool", userAgent, timeout) - if waitErr != nil { - return waitErr - } - - resource_container_node_pool_log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id()) - - d.SetId("") - - return nil -} - -func resourceContainerNodePoolExists(d *resource_container_node_pool_schema.ResourceData, meta interface{}) (bool, error) { - config := meta.(*Config) - nodePoolInfo, err := extractNodePoolInformation(d, config) - if err != nil { - return false, err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return false, err - } - - name := getNodePoolName(d.Id()) - clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) - if config.UserProjectOverride { - clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - _, err = clusterNodePoolsGetCall.Do() - - if err != nil { - if err = handleNotFoundError(err, d, resource_container_node_pool_fmt.Sprintf("Container NodePool %s", name)); err == nil { - return false, nil - } - - return true, err - } - return true, nil -} - -func resourceContainerNodePoolStateImporter(d *resource_container_node_pool_schema.ResourceData, meta interface{}) ([]*resource_container_node_pool_schema.ResourceData, error) { - config := meta.(*Config) - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - if err := parseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)/nodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}") - if err != nil { - return nil, err - } - - d.SetId(id) - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - nodePoolInfo, err := extractNodePoolInformation(d, config) - if err != nil { - return nil, err - } - - _, err = containerClusterAwaitRestingState(config, nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutCreate)) - if err != nil { - return nil, err - } - - if _, err := containerNodePoolAwaitRestingState(config, d.Id(), project, userAgent, d.Timeout(resource_container_node_pool_schema.TimeoutCreate)); err != nil { - return nil, err - } - - return []*resource_container_node_pool_schema.ResourceData{d}, nil -} - -func expandNodePool(d *resource_container_node_pool_schema.ResourceData, prefix string) (*resource_container_node_pool_container.NodePool, error) { - var name string - if v, ok := d.GetOk(prefix + "name"); ok { - if _, ok := d.GetOk(prefix + "name_prefix"); ok { - return nil, resource_container_node_pool_fmt.Errorf("Cannot specify both name and name_prefix for a node_pool") - } - name = v.(string) - } else if v, ok := d.GetOk(prefix + "name_prefix"); ok { - name = resource_container_node_pool_resource.PrefixedUniqueId(v.(string)) - } else { - name = resource_container_node_pool_resource.UniqueId() - } - - nodeCount := 0 - if initialNodeCount, ok := d.GetOk(prefix + "initial_node_count"); ok { - nodeCount = initialNodeCount.(int) - } - if nc, ok := d.GetOk(prefix + "node_count"); ok { - if nodeCount != 0 { - return nil, resource_container_node_pool_fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %s", name) - } - nodeCount = nc.(int) - } - - var locations []string - if v, ok := d.GetOk("node_locations"); ok && v.(*resource_container_node_pool_schema.Set).Len() > 0 { - locations = convertStringSet(v.(*resource_container_node_pool_schema.Set)) - } - - np := &resource_container_node_pool_container.NodePool{ - Name: name, - InitialNodeCount: int64(nodeCount), - Config: expandNodeConfig(d.Get(prefix + "node_config")), - Locations: locations, - Version: d.Get(prefix + "version").(string), - } - - if v, ok := d.GetOk(prefix + "autoscaling"); ok { - autoscaling := v.([]interface{})[0].(map[string]interface{}) - np.Autoscaling = &resource_container_node_pool_container.NodePoolAutoscaling{ - Enabled: true, - MinNodeCount: int64(autoscaling["min_node_count"].(int)), - MaxNodeCount: int64(autoscaling["max_node_count"].(int)), - ForceSendFields: []string{"MinNodeCount"}, - } - } - - if v, ok := d.GetOk(prefix + "max_pods_per_node"); ok { - np.MaxPodsConstraint = &resource_container_node_pool_container.MaxPodsConstraint{ - MaxPodsPerNode: int64(v.(int)), - } - } - - if v, ok := d.GetOk(prefix + "management"); ok { - managementConfig := v.([]interface{})[0].(map[string]interface{}) - np.Management = &resource_container_node_pool_container.NodeManagement{} - - if v, ok := managementConfig["auto_repair"]; ok { - np.Management.AutoRepair = v.(bool) - } - - if v, ok := managementConfig["auto_upgrade"]; ok { - np.Management.AutoUpgrade = v.(bool) - } - } - - if v, ok := d.GetOk(prefix + "upgrade_settings"); ok { - upgradeSettingsConfig := v.([]interface{})[0].(map[string]interface{}) - np.UpgradeSettings = &resource_container_node_pool_container.UpgradeSettings{} - - if v, ok := upgradeSettingsConfig["max_surge"]; ok { - np.UpgradeSettings.MaxSurge = int64(v.(int)) - } - - if v, ok := upgradeSettingsConfig["max_unavailable"]; ok { - np.UpgradeSettings.MaxUnavailable = int64(v.(int)) - } - } - - return np, nil -} - -func flattenNodePool(d *resource_container_node_pool_schema.ResourceData, config *Config, np *resource_container_node_pool_container.NodePool, prefix string) (map[string]interface{}, error) { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - size := 0 - igmUrls := []string{} - managedIgmUrls := []string{} - for _, url := range np.InstanceGroupUrls { - - matches := instanceGroupManagerURL.FindStringSubmatch(url) - if len(matches) < 4 { - return nil, resource_container_node_pool_fmt.Errorf("Error reading instance group manage URL '%q'", url) - } - igm, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() - if isGoogleApiErrorWithCode(err, 404) { - - continue - } - if err != nil { - return nil, resource_container_node_pool_fmt.Errorf("Error reading instance group manager returned as an instance group URL: %q", err) - } - size += int(igm.TargetSize) - igmUrls = append(igmUrls, url) - managedIgmUrls = append(managedIgmUrls, igm.InstanceGroup) - } - nodeCount := 0 - if len(igmUrls) > 0 { - nodeCount = size / len(igmUrls) - } - nodePool := map[string]interface{}{ - "name": np.Name, - "name_prefix": d.Get(prefix + "name_prefix"), - "initial_node_count": np.InitialNodeCount, - "node_locations": resource_container_node_pool_schema.NewSet(resource_container_node_pool_schema.HashString, convertStringArrToInterface(np.Locations)), - "node_count": nodeCount, - "node_config": flattenNodeConfig(np.Config), - "instance_group_urls": igmUrls, - "managed_instance_group_urls": managedIgmUrls, - "version": np.Version, - } - - if np.Autoscaling != nil { - if np.Autoscaling.Enabled { - nodePool["autoscaling"] = []map[string]interface{}{ - { - "min_node_count": np.Autoscaling.MinNodeCount, - "max_node_count": np.Autoscaling.MaxNodeCount, - }, - } - } else { - nodePool["autoscaling"] = []map[string]interface{}{} - } - } - - if np.MaxPodsConstraint != nil { - nodePool["max_pods_per_node"] = np.MaxPodsConstraint.MaxPodsPerNode - } - - nodePool["management"] = []map[string]interface{}{ - { - "auto_repair": np.Management.AutoRepair, - "auto_upgrade": np.Management.AutoUpgrade, - }, - } - - if np.UpgradeSettings != nil { - nodePool["upgrade_settings"] = []map[string]interface{}{ - { - "max_surge": np.UpgradeSettings.MaxSurge, - "max_unavailable": np.UpgradeSettings.MaxUnavailable, - }, - } - } else { - delete(nodePool, "upgrade_settings") - } - - return nodePool, nil -} - -func nodePoolUpdate(d *resource_container_node_pool_schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeout resource_container_node_pool_time.Duration) error { - config := meta.(*Config) - name := d.Get(prefix + "name").(string) - - lockKey := nodePoolInfo.lockKey() - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - if d.HasChange(prefix + "autoscaling") { - update := &resource_container_node_pool_container.ClusterUpdate{ - DesiredNodePoolId: name, - } - if v, ok := d.GetOk(prefix + "autoscaling"); ok { - autoscaling := v.([]interface{})[0].(map[string]interface{}) - update.DesiredNodePoolAutoscaling = &resource_container_node_pool_container.NodePoolAutoscaling{ - Enabled: true, - MinNodeCount: int64(autoscaling["min_node_count"].(int)), - MaxNodeCount: int64(autoscaling["max_node_count"].(int)), - ForceSendFields: []string{"MinNodeCount"}, - } - } else { - update.DesiredNodePoolAutoscaling = &resource_container_node_pool_container.NodePoolAutoscaling{ - Enabled: false, - } - } - - req := &resource_container_node_pool_container.UpdateClusterRequest{ - Update: update, - } - - updateF := func() error { - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, "updating GKE node pool", userAgent, - timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id()) - } - - if d.HasChange(prefix + "node_config") { - if d.HasChange(prefix + "node_config.0.image_type") { - req := &resource_container_node_pool_container.UpdateClusterRequest{ - Update: &resource_container_node_pool_container.ClusterUpdate{ - DesiredNodePoolId: name, - DesiredImageType: d.Get(prefix + "node_config.0.image_type").(string), - }, - } - - updateF := func() error { - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - return containerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, "updating GKE node pool", userAgent, - timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] Updated image type in Node Pool %s", d.Id()) - } - - if d.HasChange(prefix + "node_config.0.workload_metadata_config") { - req := &resource_container_node_pool_container.UpdateNodePoolRequest{ - NodePoolId: name, - WorkloadMetadataConfig: expandWorkloadMetadataConfig( - d.Get(prefix + "node_config.0.workload_metadata_config")), - } - if req.WorkloadMetadataConfig == nil { - req.ForceSendFields = []string{"WorkloadMetadataConfig"} - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - - if err != nil { - return err - } - - return containerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, - "updating GKE node pool workload_metadata_config", userAgent, - timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) - } - - } - - if d.HasChange(prefix + "node_count") { - newSize := int64(d.Get(prefix + "node_count").(int)) - req := &resource_container_node_pool_container.SetNodePoolSizeRequest{ - NodeCount: newSize, - } - updateF := func() error { - clusterNodePoolsSetSizeCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.SetSize(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsSetSizeCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsSetSizeCall.Do() - - if err != nil { - return err - } - - return containerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, "updating GKE node pool size", userAgent, - timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] GKE node pool %s size has been updated to %d", name, newSize) - } - - if d.HasChange(prefix + "management") { - management := &resource_container_node_pool_container.NodeManagement{} - if v, ok := d.GetOk(prefix + "management"); ok { - managementConfig := v.([]interface{})[0].(map[string]interface{}) - management.AutoRepair = managementConfig["auto_repair"].(bool) - management.AutoUpgrade = managementConfig["auto_upgrade"].(bool) - management.ForceSendFields = []string{"AutoRepair", "AutoUpgrade"} - } - req := &resource_container_node_pool_container.SetNodePoolManagementRequest{ - Management: management, - } - - updateF := func() error { - clusterNodePoolsSetManagementCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.SetManagement(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsSetManagementCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsSetManagementCall.Do() - - if err != nil { - return err - } - - return containerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, "updating GKE node pool management", userAgent, timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] Updated management in Node Pool %s", name) - } - - if d.HasChange(prefix + "version") { - req := &resource_container_node_pool_container.UpdateNodePoolRequest{ - NodePoolId: name, - NodeVersion: d.Get(prefix + "version").(string), - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - - if err != nil { - return err - } - - return containerOperationWait(config, op, - nodePoolInfo.project, - nodePoolInfo.location, "updating GKE node pool version", userAgent, timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] Updated version in Node Pool %s", name) - } - - if d.HasChange(prefix + "node_locations") { - req := &resource_container_node_pool_container.UpdateNodePoolRequest{ - Locations: convertStringSet(d.Get(prefix + "node_locations").(*resource_container_node_pool_schema.Set)), - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - - if err != nil { - return err - } - - return containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool node locations", userAgent, timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] Updated node locations in Node Pool %s", name) - } - - if d.HasChange(prefix + "upgrade_settings") { - upgradeSettings := &resource_container_node_pool_container.UpgradeSettings{} - if v, ok := d.GetOk(prefix + "upgrade_settings"); ok { - upgradeSettingsConfig := v.([]interface{})[0].(map[string]interface{}) - upgradeSettings.MaxSurge = int64(upgradeSettingsConfig["max_surge"].(int)) - upgradeSettings.MaxUnavailable = int64(upgradeSettingsConfig["max_unavailable"].(int)) - } - req := &resource_container_node_pool_container.UpdateNodePoolRequest{ - UpgradeSettings: upgradeSettings, - } - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - - if err != nil { - return err - } - - return containerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool upgrade settings", userAgent, timeout) - } - - if err := lockedCall(lockKey, updateF); err != nil { - return err - } - - resource_container_node_pool_log.Printf("[INFO] Updated upgrade settings in Node Pool %s", name) - } - - return nil -} - -func getNodePoolName(id string) string { - - splits := resource_container_node_pool_strings.Split(id, "/") - return splits[len(splits)-1] -} - -var containerNodePoolRestingStates = RestingStates{ - "RUNNING": ReadyState, - "RUNNING_WITH_ERROR": ErrorState, - "ERROR": ErrorState, -} - -func containerNodePoolAwaitRestingState(config *Config, name, project, userAgent string, timeout resource_container_node_pool_time.Duration) (state string, err error) { - err = resource_container_node_pool_resource.Retry(timeout, func() *resource_container_node_pool_resource.RetryError { - clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(name) - if config.UserProjectOverride { - clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", project) - } - nodePool, gErr := clusterNodePoolsGetCall.Do() - if gErr != nil { - return resource_container_node_pool_resource.NonRetryableError(gErr) - } - - state = nodePool.Status - switch stateType := containerNodePoolRestingStates[state]; stateType { - case ReadyState: - resource_container_node_pool_log.Printf("[DEBUG] NodePool %q has status %q with message %q.", name, state, nodePool.StatusMessage) - return nil - case ErrorState: - resource_container_node_pool_log.Printf("[DEBUG] NodePool %q has error state %q with message %q.", name, state, nodePool.StatusMessage) - return nil - default: - return resource_container_node_pool_resource.RetryableError(resource_container_node_pool_fmt.Errorf("NodePool %q has state %q with message %q", name, state, nodePool.StatusMessage)) - } - }) - - return state, err -} - -func resourceContainerNodePoolMigrateState(v int, is *resource_container_node_pool_migrate_terraform.InstanceState, meta interface{}) (*resource_container_node_pool_migrate_terraform.InstanceState, error) { - if is.Empty() { - resource_container_node_pool_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - resource_container_node_pool_migrate_log.Println("[INFO] Found Container Node Pool State v0; migrating to v1") - return migrateNodePoolStateV0toV1(is) - default: - return is, resource_container_node_pool_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateNodePoolStateV0toV1(is *resource_container_node_pool_migrate_terraform.InstanceState) (*resource_container_node_pool_migrate_terraform.InstanceState, error) { - resource_container_node_pool_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - resource_container_node_pool_migrate_log.Printf("[DEBUG] ID before migration: %s", is.ID) - - is.ID = resource_container_node_pool_migrate_fmt.Sprintf("%s/%s/%s", is.Attributes["zone"], is.Attributes["cluster"], is.Attributes["name"]) - - resource_container_node_pool_migrate_log.Printf("[DEBUG] ID after migration: %s", is.ID) - return is, nil -} - -func resourceContainerRegistry() *resource_container_registry_schema.Resource { - return &resource_container_registry_schema.Resource{ - Create: resourceContainerRegistryCreate, - Read: resourceContainerRegistryRead, - Delete: resourceContainerRegistryDelete, - - Schema: map[string]*resource_container_registry_schema.Schema{ - "location": { - Type: resource_container_registry_schema.TypeString, - Optional: true, - ForceNew: true, - StateFunc: func(s interface{}) string { - return resource_container_registry_strings.ToUpper(s.(string)) - }, - Description: `The location of the registry. One of ASIA, EU, US or not specified. See the official documentation for more information on registry locations.`, - }, - - "project": { - Type: resource_container_registry_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "bucket_self_link": { - Type: resource_container_registry_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceContainerRegistryCreate(d *resource_container_registry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - resource_container_registry_log.Printf("[DEBUG] Project: %s", project) - - location := d.Get("location").(string) - resource_container_registry_log.Printf("[DEBUG] location: %s", location) - urlBase := "https://gcr.io/v2/token" - if location != "" { - urlBase = resource_container_registry_fmt.Sprintf("https://%s.gcr.io/v2/token", resource_container_registry_strings.ToLower(location)) - } - - url, err := replaceVars(d, config, resource_container_registry_fmt.Sprintf("%s?service=gcr.io&scope=repository:{{project}}/my-repo:push,pull", urlBase)) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(config, "GET", project, url, userAgent, nil, d.Timeout(resource_container_registry_schema.TimeoutCreate)) - - if err != nil { - return err - } - return resourceContainerRegistryRead(d, meta) -} - -func resourceContainerRegistryRead(d *resource_container_registry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - location := d.Get("location").(string) - project, err := getProject(d, config) - if err != nil { - return err - } - name := "" - if location != "" { - name = resource_container_registry_fmt.Sprintf("%s.artifacts.%s.appspot.com", resource_container_registry_strings.ToLower(location), project) - } else { - name = resource_container_registry_fmt.Sprintf("artifacts.%s.appspot.com", project) - } - - res, err := config.NewStorageClient(userAgent).Buckets.Get(name).Do() - if err != nil { - return handleNotFoundError(err, d, resource_container_registry_fmt.Sprintf("Container Registry Storage Bucket %q", name)) - } - resource_container_registry_log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) - - if err := d.Set("bucket_self_link", res.SelfLink); err != nil { - return resource_container_registry_fmt.Errorf("Error setting bucket_self_link: %s", err) - } - - d.SetId(res.Id) - return nil -} - -func resourceContainerRegistryDelete(d *resource_container_registry_schema.ResourceData, meta interface{}) error { - - return nil -} - -func resourceDataCatalogEntry() *resource_data_catalog_entry_schema.Resource { - return &resource_data_catalog_entry_schema.Resource{ - Create: resourceDataCatalogEntryCreate, - Read: resourceDataCatalogEntryRead, - Update: resourceDataCatalogEntryUpdate, - Delete: resourceDataCatalogEntryDelete, - - Importer: &resource_data_catalog_entry_schema.ResourceImporter{ - State: resourceDataCatalogEntryImport, - }, - - Timeouts: &resource_data_catalog_entry_schema.ResourceTimeout{ - Create: resource_data_catalog_entry_schema.DefaultTimeout(4 * resource_data_catalog_entry_time.Minute), - Update: resource_data_catalog_entry_schema.DefaultTimeout(4 * resource_data_catalog_entry_time.Minute), - Delete: resource_data_catalog_entry_schema.DefaultTimeout(4 * resource_data_catalog_entry_time.Minute), - }, - - Schema: map[string]*resource_data_catalog_entry_schema.Schema{ - "entry_group": { - Type: resource_data_catalog_entry_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the entry group this entry is in.`, - }, - "entry_id": { - Type: resource_data_catalog_entry_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The id of the entry to create.`, - }, - "description": { - Type: resource_data_catalog_entry_schema.TypeString, - Optional: true, - Description: `Entry description, which can consist of several sentences or paragraphs that describe entry contents.`, - }, - "display_name": { - Type: resource_data_catalog_entry_schema.TypeString, - Optional: true, - Description: `Display information such as title and description. A short name to identify the entry, -for example, "Analytics Data - Jan 2011".`, - }, - "gcs_fileset_spec": { - Type: resource_data_catalog_entry_schema.TypeList, - Optional: true, - Description: `Specification that applies to a Cloud Storage fileset. This is only valid on entries of type FILESET.`, - MaxItems: 1, - Elem: &resource_data_catalog_entry_schema.Resource{ - Schema: map[string]*resource_data_catalog_entry_schema.Schema{ - "file_patterns": { - Type: resource_data_catalog_entry_schema.TypeList, - Required: true, - Description: `Patterns to identify a set of files in Google Cloud Storage. -See [Cloud Storage documentation](https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames) -for more information. Note that bucket wildcards are currently not supported. Examples of valid filePatterns: - -* gs://bucket_name/dir/*: matches all files within bucket_name/dir directory. -* gs://bucket_name/dir/**: matches all files in bucket_name/dir spanning all subdirectories. -* gs://bucket_name/file*: matches files prefixed by file in bucket_name -* gs://bucket_name/??.txt: matches files with two characters followed by .txt in bucket_name -* gs://bucket_name/[aeiou].txt: matches files that contain a single vowel character followed by .txt in bucket_name -* gs://bucket_name/[a-m].txt: matches files that contain a, b, ... or m followed by .txt in bucket_name -* gs://bucket_name/a/*/b: matches all files in bucket_name that match a/*/b pattern, such as a/c/b, a/d/b -* gs://another_bucket/a.txt: matches gs://another_bucket/a.txt`, - Elem: &resource_data_catalog_entry_schema.Schema{ - Type: resource_data_catalog_entry_schema.TypeString, - }, - }, - "sample_gcs_file_specs": { - Type: resource_data_catalog_entry_schema.TypeList, - Computed: true, - Description: `Sample files contained in this fileset, not all files contained in this fileset are represented here.`, - Elem: &resource_data_catalog_entry_schema.Resource{ - Schema: map[string]*resource_data_catalog_entry_schema.Schema{ - "file_path": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `The full file path`, - }, - "size_bytes": { - Type: resource_data_catalog_entry_schema.TypeInt, - Computed: true, - Description: `The size of the file, in bytes.`, - }, - }, - }, - }, - }, - }, - }, - "linked_resource": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Optional: true, - Description: `The resource this metadata entry refers to. -For Google Cloud Platform resources, linkedResource is the full name of the resource. -For example, the linkedResource for a table resource from BigQuery is: -//bigquery.googleapis.com/projects/projectId/datasets/datasetId/tables/tableId -Output only when Entry is of type in the EntryType enum. For entries with userSpecifiedType, -this field is optional and defaults to an empty string.`, - }, - "schema": { - Type: resource_data_catalog_entry_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_catalog_entry_validation.StringIsJSON, - StateFunc: func(v interface{}) string { - s, _ := resource_data_catalog_entry_structure.NormalizeJsonString(v) - return s - }, - Description: `Schema of the entry (e.g. BigQuery, GoogleSQL, Avro schema), as a json string. An entry might not have any schema -attached to it. See -https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.entryGroups.entries#schema -for what fields this schema can contain.`, - }, - "type": { - Type: resource_data_catalog_entry_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_data_catalog_entry_validation.StringInSlice([]string{"FILESET", ""}, false), - Description: `The type of the entry. Only used for Entries with types in the EntryType enum. -Currently, only FILESET enum value is allowed. All other entries created through Data Catalog must use userSpecifiedType. Possible values: ["FILESET"]`, - ExactlyOneOf: []string{"type", "user_specified_type"}, - }, - "user_specified_system": { - Type: resource_data_catalog_entry_schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), - Description: `This field indicates the entry's source system that Data Catalog does not integrate with. -userSpecifiedSystem strings must begin with a letter or underscore and can only contain letters, numbers, -and underscores; are case insensitive; must be at least 1 character and at most 64 characters long.`, - }, - "user_specified_type": { - Type: resource_data_catalog_entry_schema.TypeString, - Optional: true, - ValidateFunc: validateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), - Description: `Entry type if it does not fit any of the input-allowed values listed in EntryType enum above. -When creating an entry, users should check the enum values first, if nothing matches the entry -to be created, then provide a custom value, for example "my_special_type". -userSpecifiedType strings must begin with a letter or underscore and can only contain letters, -numbers, and underscores; are case insensitive; must be at least 1 character and at most 64 characters long.`, - ExactlyOneOf: []string{"type", "user_specified_type"}, - }, - "bigquery_date_sharded_spec": { - Type: resource_data_catalog_entry_schema.TypeList, - Computed: true, - Description: `Specification for a group of BigQuery tables with name pattern [prefix]YYYYMMDD. -Context: https://cloud.google.com/bigquery/docs/partitioned-tables#partitioning_versus_sharding.`, - Elem: &resource_data_catalog_entry_schema.Resource{ - Schema: map[string]*resource_data_catalog_entry_schema.Schema{ - "dataset": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `The Data Catalog resource name of the dataset entry the current table belongs to, for example, -projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/entries/{entryId}`, - }, - "shard_count": { - Type: resource_data_catalog_entry_schema.TypeInt, - Computed: true, - Description: `Total number of shards.`, - }, - "table_prefix": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `The table name prefix of the shards. The name of any given shard is [tablePrefix]YYYYMMDD, -for example, for shard MyTable20180101, the tablePrefix is MyTable.`, - }, - }, - }, - }, - "bigquery_table_spec": { - Type: resource_data_catalog_entry_schema.TypeList, - Computed: true, - Description: `Specification that applies to a BigQuery table. This is only valid on entries of type TABLE.`, - Elem: &resource_data_catalog_entry_schema.Resource{ - Schema: map[string]*resource_data_catalog_entry_schema.Schema{ - "table_source_type": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `The table source type.`, - }, - "table_spec": { - Type: resource_data_catalog_entry_schema.TypeList, - Computed: true, - Description: `Spec of a BigQuery table. This field should only be populated if tableSourceType is BIGQUERY_TABLE.`, - Elem: &resource_data_catalog_entry_schema.Resource{ - Schema: map[string]*resource_data_catalog_entry_schema.Schema{ - "grouped_entry": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `If the table is a dated shard, i.e., with name pattern [prefix]YYYYMMDD, groupedEntry is the -Data Catalog resource name of the date sharded grouped entry, for example, -projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/entries/{entryId}. -Otherwise, groupedEntry is empty.`, - }, - }, - }, - }, - "view_spec": { - Type: resource_data_catalog_entry_schema.TypeList, - Computed: true, - Description: `Table view specification. This field should only be populated if tableSourceType is BIGQUERY_VIEW.`, - Elem: &resource_data_catalog_entry_schema.Resource{ - Schema: map[string]*resource_data_catalog_entry_schema.Schema{ - "view_query": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `The query that defines the table view.`, - }, - }, - }, - }, - }, - }, - }, - "integrated_system": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `This field indicates the entry's source system that Data Catalog integrates with, such as BigQuery or Pub/Sub.`, - }, - "name": { - Type: resource_data_catalog_entry_schema.TypeString, - Computed: true, - Description: `The Data Catalog resource name of the entry in URL format. -Example: projects/{project_id}/locations/{location}/entryGroups/{entryGroupId}/entries/{entryId}. -Note that this Entry and its child resources may not actually be stored in the location in this name.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataCatalogEntryCreate(d *resource_data_catalog_entry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - linkedResourceProp, err := expandDataCatalogEntryLinkedResource(d.Get("linked_resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("linked_resource"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(linkedResourceProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, linkedResourceProp)) { - obj["linkedResource"] = linkedResourceProp - } - displayNameProp, err := expandDataCatalogEntryDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(displayNameProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogEntryDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(descriptionProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - schemaProp, err := expandDataCatalogEntrySchema(d.Get("schema"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schema"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(schemaProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, schemaProp)) { - obj["schema"] = schemaProp - } - typeProp, err := expandDataCatalogEntryType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(typeProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - userSpecifiedTypeProp, err := expandDataCatalogEntryUserSpecifiedType(d.Get("user_specified_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_specified_type"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(userSpecifiedTypeProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, userSpecifiedTypeProp)) { - obj["userSpecifiedType"] = userSpecifiedTypeProp - } - userSpecifiedSystemProp, err := expandDataCatalogEntryUserSpecifiedSystem(d.Get("user_specified_system"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_specified_system"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(userSpecifiedSystemProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, userSpecifiedSystemProp)) { - obj["userSpecifiedSystem"] = userSpecifiedSystemProp - } - gcsFilesetSpecProp, err := expandDataCatalogEntryGcsFilesetSpec(d.Get("gcs_fileset_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gcs_fileset_spec"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(gcsFilesetSpecProp)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, gcsFilesetSpecProp)) { - obj["gcsFilesetSpec"] = gcsFilesetSpecProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{entry_group}}/entries?entryId={{entry_id}}") - if err != nil { - return err - } - - resource_data_catalog_entry_log.Printf("[DEBUG] Creating new Entry: %#v", obj) - billingProject := "" - - if parts := resource_data_catalog_entry_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_entry_schema.TimeoutCreate)) - if err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error creating Entry: %s", err) - } - if err := d.Set("name", flattenDataCatalogEntryName(res["name"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_data_catalog_entry_log.Printf("[DEBUG] Finished creating Entry %q: %#v", d.Id(), res) - - return resourceDataCatalogEntryRead(d, meta) -} - -func resourceDataCatalogEntryRead(d *resource_data_catalog_entry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := resource_data_catalog_entry_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_catalog_entry_fmt.Sprintf("DataCatalogEntry %q", d.Id())) - } - - if err := d.Set("name", flattenDataCatalogEntryName(res["name"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("linked_resource", flattenDataCatalogEntryLinkedResource(res["linkedResource"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("display_name", flattenDataCatalogEntryDisplayName(res["displayName"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("description", flattenDataCatalogEntryDescription(res["description"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("schema", flattenDataCatalogEntrySchema(res["schema"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("type", flattenDataCatalogEntryType(res["type"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("user_specified_type", flattenDataCatalogEntryUserSpecifiedType(res["userSpecifiedType"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("integrated_system", flattenDataCatalogEntryIntegratedSystem(res["integratedSystem"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("user_specified_system", flattenDataCatalogEntryUserSpecifiedSystem(res["userSpecifiedSystem"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("gcs_fileset_spec", flattenDataCatalogEntryGcsFilesetSpec(res["gcsFilesetSpec"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("bigquery_table_spec", flattenDataCatalogEntryBigqueryTableSpec(res["bigqueryTableSpec"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - if err := d.Set("bigquery_date_sharded_spec", flattenDataCatalogEntryBigqueryDateShardedSpec(res["bigqueryDateShardedSpec"], d, config)); err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error reading Entry: %s", err) - } - - return nil -} - -func resourceDataCatalogEntryUpdate(d *resource_data_catalog_entry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - linkedResourceProp, err := expandDataCatalogEntryLinkedResource(d.Get("linked_resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("linked_resource"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, linkedResourceProp)) { - obj["linkedResource"] = linkedResourceProp - } - displayNameProp, err := expandDataCatalogEntryDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogEntryDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - schemaProp, err := expandDataCatalogEntrySchema(d.Get("schema"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schema"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, schemaProp)) { - obj["schema"] = schemaProp - } - userSpecifiedTypeProp, err := expandDataCatalogEntryUserSpecifiedType(d.Get("user_specified_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_specified_type"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, userSpecifiedTypeProp)) { - obj["userSpecifiedType"] = userSpecifiedTypeProp - } - userSpecifiedSystemProp, err := expandDataCatalogEntryUserSpecifiedSystem(d.Get("user_specified_system"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_specified_system"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, userSpecifiedSystemProp)) { - obj["userSpecifiedSystem"] = userSpecifiedSystemProp - } - gcsFilesetSpecProp, err := expandDataCatalogEntryGcsFilesetSpec(d.Get("gcs_fileset_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gcs_fileset_spec"); !isEmptyValue(resource_data_catalog_entry_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_reflect.DeepEqual(v, gcsFilesetSpecProp)) { - obj["gcsFilesetSpec"] = gcsFilesetSpecProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - resource_data_catalog_entry_log.Printf("[DEBUG] Updating Entry %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("linked_resource") { - updateMask = append(updateMask, "linkedResource") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("schema") { - updateMask = append(updateMask, "schema") - } - - if d.HasChange("user_specified_type") { - updateMask = append(updateMask, "userSpecifiedType") - } - - if d.HasChange("user_specified_system") { - updateMask = append(updateMask, "userSpecifiedSystem") - } - - if d.HasChange("gcs_fileset_spec") { - updateMask = append(updateMask, "gcsFilesetSpec") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_catalog_entry_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if parts := resource_data_catalog_entry_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_entry_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_catalog_entry_fmt.Errorf("Error updating Entry %q: %s", d.Id(), err) - } else { - resource_data_catalog_entry_log.Printf("[DEBUG] Finished updating Entry %q: %#v", d.Id(), res) - } - - return resourceDataCatalogEntryRead(d, meta) -} - -func resourceDataCatalogEntryDelete(d *resource_data_catalog_entry_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if parts := resource_data_catalog_entry_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - resource_data_catalog_entry_log.Printf("[DEBUG] Deleting Entry %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_entry_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Entry") - } - - resource_data_catalog_entry_log.Printf("[DEBUG] Finished deleting Entry %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogEntryImport(d *resource_data_catalog_entry_schema.ResourceData, meta interface{}) ([]*resource_data_catalog_entry_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - egRegex := resource_data_catalog_entry_regexp.MustCompile("(projects/.+/locations/.+/entryGroups/.+)/entries/(.+)") - - parts := egRegex.FindStringSubmatch(name) - if len(parts) != 3 { - return nil, resource_data_catalog_entry_fmt.Errorf("entry name does not fit the format %s", egRegex) - } - if err := d.Set("entry_group", parts[1]); err != nil { - return nil, resource_data_catalog_entry_fmt.Errorf("Error setting entry_group: %s", err) - } - if err := d.Set("entry_id", parts[2]); err != nil { - return nil, resource_data_catalog_entry_fmt.Errorf("Error setting entry_id: %s", err) - } - return []*resource_data_catalog_entry_schema.ResourceData{d}, nil -} - -func flattenDataCatalogEntryName(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryLinkedResource(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryDisplayName(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryDescription(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntrySchema(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := resource_data_catalog_entry_json.Marshal(v) - if err != nil { - - resource_data_catalog_entry_log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenDataCatalogEntryType(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryUserSpecifiedType(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryIntegratedSystem(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryUserSpecifiedSystem(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryGcsFilesetSpec(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["file_patterns"] = - flattenDataCatalogEntryGcsFilesetSpecFilePatterns(original["filePatterns"], d, config) - transformed["sample_gcs_file_specs"] = - flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(original["sampleGcsFileSpecs"], d, config) - return []interface{}{transformed} -} - -func flattenDataCatalogEntryGcsFilesetSpecFilePatterns(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "file_path": flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(original["filePath"], d, config), - "size_bytes": flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(original["sizeBytes"], d, config), - }) - } - return transformed -} - -func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_catalog_entry_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataCatalogEntryBigqueryTableSpec(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["table_source_type"] = - flattenDataCatalogEntryBigqueryTableSpecTableSourceType(original["tableSourceType"], d, config) - transformed["view_spec"] = - flattenDataCatalogEntryBigqueryTableSpecViewSpec(original["viewSpec"], d, config) - transformed["table_spec"] = - flattenDataCatalogEntryBigqueryTableSpecTableSpec(original["tableSpec"], d, config) - return []interface{}{transformed} -} - -func flattenDataCatalogEntryBigqueryTableSpecTableSourceType(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryBigqueryTableSpecViewSpec(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["view_query"] = - flattenDataCatalogEntryBigqueryTableSpecViewSpecViewQuery(original["viewQuery"], d, config) - return []interface{}{transformed} -} - -func flattenDataCatalogEntryBigqueryTableSpecViewSpecViewQuery(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryBigqueryTableSpecTableSpec(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["grouped_entry"] = - flattenDataCatalogEntryBigqueryTableSpecTableSpecGroupedEntry(original["groupedEntry"], d, config) - return []interface{}{transformed} -} - -func flattenDataCatalogEntryBigqueryTableSpecTableSpecGroupedEntry(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryBigqueryDateShardedSpec(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset"] = - flattenDataCatalogEntryBigqueryDateShardedSpecDataset(original["dataset"], d, config) - transformed["table_prefix"] = - flattenDataCatalogEntryBigqueryDateShardedSpecTablePrefix(original["tablePrefix"], d, config) - transformed["shard_count"] = - flattenDataCatalogEntryBigqueryDateShardedSpecShardCount(original["shardCount"], d, config) - return []interface{}{transformed} -} - -func flattenDataCatalogEntryBigqueryDateShardedSpecDataset(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryBigqueryDateShardedSpecTablePrefix(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryBigqueryDateShardedSpecShardCount(v interface{}, d *resource_data_catalog_entry_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_catalog_entry_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandDataCatalogEntryLinkedResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntrySchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := resource_data_catalog_entry_json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandDataCatalogEntryType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryUserSpecifiedType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryUserSpecifiedSystem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryGcsFilesetSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilePatterns, err := expandDataCatalogEntryGcsFilesetSpecFilePatterns(original["file_patterns"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_entry_reflect.ValueOf(transformedFilePatterns); val.IsValid() && !isEmptyValue(val) { - transformed["filePatterns"] = transformedFilePatterns - } - - transformedSampleGcsFileSpecs, err := expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(original["sample_gcs_file_specs"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_entry_reflect.ValueOf(transformedSampleGcsFileSpecs); val.IsValid() && !isEmptyValue(val) { - transformed["sampleGcsFileSpecs"] = transformedSampleGcsFileSpecs - } - - return transformed, nil -} - -func expandDataCatalogEntryGcsFilesetSpecFilePatterns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilePath, err := expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(original["file_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_entry_reflect.ValueOf(transformedFilePath); val.IsValid() && !isEmptyValue(val) { - transformed["filePath"] = transformedFilePath - } - - transformedSizeBytes, err := expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(original["size_bytes"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_entry_reflect.ValueOf(transformedSizeBytes); val.IsValid() && !isEmptyValue(val) { - transformed["sizeBytes"] = transformedSizeBytes - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsFilePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryGcsFilesetSpecSampleGcsFileSpecsSizeBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataCatalogEntryGroup() *resource_data_catalog_entry_group_schema.Resource { - return &resource_data_catalog_entry_group_schema.Resource{ - Create: resourceDataCatalogEntryGroupCreate, - Read: resourceDataCatalogEntryGroupRead, - Update: resourceDataCatalogEntryGroupUpdate, - Delete: resourceDataCatalogEntryGroupDelete, - - Importer: &resource_data_catalog_entry_group_schema.ResourceImporter{ - State: resourceDataCatalogEntryGroupImport, - }, - - Timeouts: &resource_data_catalog_entry_group_schema.ResourceTimeout{ - Create: resource_data_catalog_entry_group_schema.DefaultTimeout(4 * resource_data_catalog_entry_group_time.Minute), - Update: resource_data_catalog_entry_group_schema.DefaultTimeout(4 * resource_data_catalog_entry_group_time.Minute), - Delete: resource_data_catalog_entry_group_schema.DefaultTimeout(4 * resource_data_catalog_entry_group_time.Minute), - }, - - Schema: map[string]*resource_data_catalog_entry_group_schema.Schema{ - "entry_group_id": { - Type: resource_data_catalog_entry_group_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[A-z_][A-z0-9_]{0,63}$`), - Description: `The id of the entry group to create. The id must begin with a letter or underscore, -contain only English letters, numbers and underscores, and be at most 64 characters.`, - }, - "description": { - Type: resource_data_catalog_entry_group_schema.TypeString, - Optional: true, - Description: `Entry group description, which can consist of several sentences or paragraphs that describe entry group contents.`, - }, - "display_name": { - Type: resource_data_catalog_entry_group_schema.TypeString, - Optional: true, - Description: `A short name to identify the entry group, for example, "analytics data - jan 2011".`, - }, - "region": { - Type: resource_data_catalog_entry_group_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `EntryGroup location region.`, - }, - "name": { - Type: resource_data_catalog_entry_group_schema.TypeString, - Computed: true, - Description: `The resource name of the entry group in URL format. Example: projects/{project}/locations/{location}/entryGroups/{entryGroupId}`, - }, - "project": { - Type: resource_data_catalog_entry_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataCatalogEntryGroupCreate(d *resource_data_catalog_entry_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogEntryGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_catalog_entry_group_reflect.ValueOf(displayNameProp)) && (ok || !resource_data_catalog_entry_group_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogEntryGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_catalog_entry_group_reflect.ValueOf(descriptionProp)) && (ok || !resource_data_catalog_entry_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/entryGroups?entryGroupId={{entry_group_id}}") - if err != nil { - return err - } - - resource_data_catalog_entry_group_log.Printf("[DEBUG] Creating new EntryGroup: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_entry_group_schema.TimeoutCreate)) - if err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error creating EntryGroup: %s", err) - } - if err := d.Set("name", flattenDataCatalogEntryGroupName(res["name"], d, config)); err != nil { - return resource_data_catalog_entry_group_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_data_catalog_entry_group_log.Printf("[DEBUG] Finished creating EntryGroup %q: %#v", d.Id(), res) - - return resourceDataCatalogEntryGroupRead(d, meta) -} - -func resourceDataCatalogEntryGroupRead(d *resource_data_catalog_entry_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_catalog_entry_group_fmt.Sprintf("DataCatalogEntryGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error reading EntryGroup: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error reading EntryGroup: %s", err) - } - - if err := d.Set("name", flattenDataCatalogEntryGroupName(res["name"], d, config)); err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error reading EntryGroup: %s", err) - } - if err := d.Set("display_name", flattenDataCatalogEntryGroupDisplayName(res["displayName"], d, config)); err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error reading EntryGroup: %s", err) - } - if err := d.Set("description", flattenDataCatalogEntryGroupDescription(res["description"], d, config)); err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error reading EntryGroup: %s", err) - } - - return nil -} - -func resourceDataCatalogEntryGroupUpdate(d *resource_data_catalog_entry_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogEntryGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_catalog_entry_group_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_group_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDataCatalogEntryGroupDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_catalog_entry_group_reflect.ValueOf(v)) && (ok || !resource_data_catalog_entry_group_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - resource_data_catalog_entry_group_log.Printf("[DEBUG] Updating EntryGroup %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_catalog_entry_group_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_entry_group_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error updating EntryGroup %q: %s", d.Id(), err) - } else { - resource_data_catalog_entry_group_log.Printf("[DEBUG] Finished updating EntryGroup %q: %#v", d.Id(), res) - } - - return resourceDataCatalogEntryGroupRead(d, meta) -} - -func resourceDataCatalogEntryGroupDelete(d *resource_data_catalog_entry_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_entry_group_fmt.Errorf("Error fetching project for EntryGroup: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_data_catalog_entry_group_log.Printf("[DEBUG] Deleting EntryGroup %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_entry_group_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EntryGroup") - } - - resource_data_catalog_entry_group_log.Printf("[DEBUG] Finished deleting EntryGroup %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogEntryGroupImport(d *resource_data_catalog_entry_group_schema.ResourceData, meta interface{}) ([]*resource_data_catalog_entry_group_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - egRegex := resource_data_catalog_entry_group_regexp.MustCompile("projects/(.+)/locations/(.+)/entryGroups/(.+)") - - parts := egRegex.FindStringSubmatch(name) - if len(parts) != 4 { - return nil, resource_data_catalog_entry_group_fmt.Errorf("entry group name does not fit the format %s", egRegex) - } - if err := d.Set("project", parts[1]); err != nil { - return nil, resource_data_catalog_entry_group_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", parts[2]); err != nil { - return nil, resource_data_catalog_entry_group_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("entry_group_id", parts[3]); err != nil { - return nil, resource_data_catalog_entry_group_fmt.Errorf("Error setting entry_group_id: %s", err) - } - return []*resource_data_catalog_entry_group_schema.ResourceData{d}, nil -} - -func flattenDataCatalogEntryGroupName(v interface{}, d *resource_data_catalog_entry_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryGroupDisplayName(v interface{}, d *resource_data_catalog_entry_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogEntryGroupDescription(v interface{}, d *resource_data_catalog_entry_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataCatalogEntryGroupDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogEntryGroupDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataCatalogTag() *resource_data_catalog_tag_schema.Resource { - return &resource_data_catalog_tag_schema.Resource{ - Create: resourceDataCatalogTagCreate, - Read: resourceDataCatalogTagRead, - Update: resourceDataCatalogTagUpdate, - Delete: resourceDataCatalogTagDelete, - - Importer: &resource_data_catalog_tag_schema.ResourceImporter{ - State: resourceDataCatalogTagImport, - }, - - Timeouts: &resource_data_catalog_tag_schema.ResourceTimeout{ - Create: resource_data_catalog_tag_schema.DefaultTimeout(4 * resource_data_catalog_tag_time.Minute), - Update: resource_data_catalog_tag_schema.DefaultTimeout(4 * resource_data_catalog_tag_time.Minute), - Delete: resource_data_catalog_tag_schema.DefaultTimeout(4 * resource_data_catalog_tag_time.Minute), - }, - - Schema: map[string]*resource_data_catalog_tag_schema.Schema{ - "fields": { - Type: resource_data_catalog_tag_schema.TypeSet, - Required: true, - Description: `This maps the ID of a tag field to the value of and additional information about that field. -Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields.`, - Elem: &resource_data_catalog_tag_schema.Resource{ - Schema: map[string]*resource_data_catalog_tag_schema.Schema{ - "field_name": { - Type: resource_data_catalog_tag_schema.TypeString, - Required: true, - }, - "bool_value": { - Type: resource_data_catalog_tag_schema.TypeBool, - Optional: true, - Description: `Holds the value for a tag field with boolean type.`, - }, - "double_value": { - Type: resource_data_catalog_tag_schema.TypeFloat, - Optional: true, - Description: `Holds the value for a tag field with double type.`, - }, - "enum_value": { - Type: resource_data_catalog_tag_schema.TypeString, - Optional: true, - Description: `The display name of the enum value.`, - }, - - "string_value": { - Type: resource_data_catalog_tag_schema.TypeString, - Optional: true, - Description: `Holds the value for a tag field with string type.`, - }, - "timestamp_value": { - Type: resource_data_catalog_tag_schema.TypeString, - Optional: true, - Description: `Holds the value for a tag field with timestamp type.`, - }, - "display_name": { - Type: resource_data_catalog_tag_schema.TypeString, - Computed: true, - Description: `The display name of this field`, - }, - "order": { - Type: resource_data_catalog_tag_schema.TypeInt, - Computed: true, - Description: `The order of this field with respect to other fields in this tag. For example, a higher value can indicate -a more important field. The value can be negative. Multiple fields can have the same order, and field orders -within a tag do not have to be sequential.`, - }, - }, - }, - }, - "template": { - Type: resource_data_catalog_tag_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the tag template that this tag uses. Example: -projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId} -This field cannot be modified after creation.`, - }, - "column": { - Type: resource_data_catalog_tag_schema.TypeString, - Optional: true, - Description: `Resources like Entry can have schemas associated with them. This scope allows users to attach tags to an -individual column based on that schema. - -For attaching a tag to a nested column, use '.' to separate the column names. Example: -'outer_column.inner_column'`, - }, - "parent": { - Type: resource_data_catalog_tag_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of the parent this tag is attached to. This can be the name of an entry or an entry group. If an entry group, the tag will be attached to -all entries in that group.`, - }, - "name": { - Type: resource_data_catalog_tag_schema.TypeString, - Computed: true, - Description: `The resource name of the tag in URL format. Example: -projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/entries/{entryId}/tags/{tag_id} or -projects/{project_id}/locations/{location}/entrygroups/{entryGroupId}/tags/{tag_id} -where tag_id is a system-generated identifier. Note that this Tag may not actually be stored in the location in this name.`, - }, - "template_displayname": { - Type: resource_data_catalog_tag_schema.TypeString, - Computed: true, - Description: `The display name of the tag template.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataCatalogTagCreate(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - templateProp, err := expandNestedDataCatalogTagTemplate(d.Get("template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("template"); !isEmptyValue(resource_data_catalog_tag_reflect.ValueOf(templateProp)) && (ok || !resource_data_catalog_tag_reflect.DeepEqual(v, templateProp)) { - obj["template"] = templateProp - } - fieldsProp, err := expandNestedDataCatalogTagFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(resource_data_catalog_tag_reflect.ValueOf(fieldsProp)) && (ok || !resource_data_catalog_tag_reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - columnProp, err := expandNestedDataCatalogTagColumn(d.Get("column"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("column"); !isEmptyValue(resource_data_catalog_tag_reflect.ValueOf(columnProp)) && (ok || !resource_data_catalog_tag_reflect.DeepEqual(v, columnProp)) { - obj["column"] = columnProp - } - - obj, err = resourceDataCatalogTagEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{parent}}/tags") - if err != nil { - return err - } - - resource_data_catalog_tag_log.Printf("[DEBUG] Creating new Tag: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_tag_schema.TimeoutCreate)) - if err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error creating Tag: %s", err) - } - if err := d.Set("name", flattenNestedDataCatalogTagName(res["name"], d, config)); err != nil { - return resource_data_catalog_tag_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_data_catalog_tag_log.Printf("[DEBUG] Finished creating Tag %q: %#v", d.Id(), res) - - return resourceDataCatalogTagRead(d, meta) -} - -func resourceDataCatalogTagRead(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{parent}}/tags") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_catalog_tag_fmt.Sprintf("DataCatalogTag %q", d.Id())) - } - - res, err = flattenNestedDataCatalogTag(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_data_catalog_tag_log.Printf("[DEBUG] Removing DataCatalogTag because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenNestedDataCatalogTagName(res["name"], d, config)); err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("template", flattenNestedDataCatalogTagTemplate(res["template"], d, config)); err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("template_displayname", flattenNestedDataCatalogTagTemplateDisplayname(res["templateDisplayName"], d, config)); err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("fields", flattenNestedDataCatalogTagFields(res["fields"], d, config)); err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error reading Tag: %s", err) - } - if err := d.Set("column", flattenNestedDataCatalogTagColumn(res["column"], d, config)); err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error reading Tag: %s", err) - } - - return nil -} - -func resourceDataCatalogTagUpdate(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - fieldsProp, err := expandNestedDataCatalogTagFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(resource_data_catalog_tag_reflect.ValueOf(v)) && (ok || !resource_data_catalog_tag_reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - columnProp, err := expandNestedDataCatalogTagColumn(d.Get("column"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("column"); !isEmptyValue(resource_data_catalog_tag_reflect.ValueOf(v)) && (ok || !resource_data_catalog_tag_reflect.DeepEqual(v, columnProp)) { - obj["column"] = columnProp - } - - obj, err = resourceDataCatalogTagEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - resource_data_catalog_tag_log.Printf("[DEBUG] Updating Tag %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("fields") { - updateMask = append(updateMask, "fields") - } - - if d.HasChange("column") { - updateMask = append(updateMask, "column") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_catalog_tag_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_tag_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_catalog_tag_fmt.Errorf("Error updating Tag %q: %s", d.Id(), err) - } else { - resource_data_catalog_tag_log.Printf("[DEBUG] Finished updating Tag %q: %#v", d.Id(), res) - } - - return resourceDataCatalogTagRead(d, meta) -} - -func resourceDataCatalogTagDelete(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_data_catalog_tag_log.Printf("[DEBUG] Deleting Tag %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_tag_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Tag") - } - - resource_data_catalog_tag_log.Printf("[DEBUG] Finished deleting Tag %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogTagImport(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}) ([]*resource_data_catalog_tag_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - egRegex := resource_data_catalog_tag_regexp.MustCompile("(.+)/tags") - - parts := egRegex.FindStringSubmatch(name) - if len(parts) != 2 { - return nil, resource_data_catalog_tag_fmt.Errorf("entry name does not fit the format %s", egRegex) - } - - if err := d.Set("parent", parts[1]); err != nil { - return nil, resource_data_catalog_tag_fmt.Errorf("Error setting parent: %s", err) - } - return []*resource_data_catalog_tag_schema.ResourceData{d}, nil -} - -func flattenNestedDataCatalogTagName(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagTemplate(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagTemplateDisplayname(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFields(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "field_name": k, - "display_name": flattenNestedDataCatalogTagFieldsDisplayName(original["display_name"], d, config), - "order": flattenNestedDataCatalogTagFieldsOrder(original["order"], d, config), - "double_value": flattenNestedDataCatalogTagFieldsDoubleValue(original["doubleValue"], d, config), - "string_value": flattenNestedDataCatalogTagFieldsStringValue(original["stringValue"], d, config), - "bool_value": flattenNestedDataCatalogTagFieldsBoolValue(original["boolValue"], d, config), - "timestamp_value": flattenNestedDataCatalogTagFieldsTimestampValue(original["timestampValue"], d, config), - "enum_value": flattenNestedDataCatalogTagFieldsEnumValue(original["enumValue"], d, config), - }) - } - return transformed -} - -func flattenNestedDataCatalogTagFieldsDisplayName(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsOrder(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_catalog_tag_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNestedDataCatalogTagFieldsDoubleValue(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsStringValue(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsBoolValue(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsTimestampValue(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedDataCatalogTagFieldsEnumValue(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - - return v.(map[string]interface{})["displayName"] -} - -func flattenNestedDataCatalogTagColumn(v interface{}, d *resource_data_catalog_tag_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedDataCatalogTagTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFields(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_data_catalog_tag_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisplayName, err := expandNestedDataCatalogTagFieldsDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["display_name"] = transformedDisplayName - } - - transformedOrder, err := expandNestedDataCatalogTagFieldsOrder(original["order"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_reflect.ValueOf(transformedOrder); val.IsValid() && !isEmptyValue(val) { - transformed["order"] = transformedOrder - } - - transformedDoubleValue, err := expandNestedDataCatalogTagFieldsDoubleValue(original["double_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_reflect.ValueOf(transformedDoubleValue); val.IsValid() && !isEmptyValue(val) { - transformed["doubleValue"] = transformedDoubleValue - } - - transformedStringValue, err := expandNestedDataCatalogTagFieldsStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBoolValue, err := expandNestedDataCatalogTagFieldsBoolValue(original["bool_value"], d, config) - if err != nil { - return nil, err - } else { - transformed["boolValue"] = transformedBoolValue - } - - transformedTimestampValue, err := expandNestedDataCatalogTagFieldsTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedEnumValue, err := expandNestedDataCatalogTagFieldsEnumValue(original["enum_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_reflect.ValueOf(transformedEnumValue); val.IsValid() && !isEmptyValue(val) { - transformed["enumValue"] = transformedEnumValue - } - - transformedFieldName, err := expandString(original["field_name"], d, config) - if err != nil { - return nil, err - } - m[transformedFieldName] = transformed - } - return m, nil -} - -func expandNestedDataCatalogTagFieldsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsDoubleValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsBoolValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedDataCatalogTagFieldsEnumValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - transformed := make(map[string]interface{}) - if val := resource_data_catalog_tag_reflect.ValueOf(v); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = v - } - - return transformed, nil -} - -func expandNestedDataCatalogTagColumn(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataCatalogTagEncoder(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - if obj["fields"] != nil { - - fields := obj["fields"].(map[string]interface{}) - for _, elements := range fields { - values := elements.(map[string]interface{}) - if len(values) > 1 { - for val := range values { - if val == "boolValue" { - delete(values, "boolValue") - } - } - } - } - } - return obj, nil -} - -func flattenNestedDataCatalogTag(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["tags"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_data_catalog_tag_fmt.Errorf("expected list or map for value tags. Actual value: %v", v) - } - - _, item, err := resourceDataCatalogTagFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceDataCatalogTagFindNestedObjectInList(d *resource_data_catalog_tag_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName := d.Get("name") - expectedFlattenedName := flattenNestedDataCatalogTagName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedDataCatalogTagName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_data_catalog_tag_reflect.ValueOf(itemName)) && isEmptyValue(resource_data_catalog_tag_reflect.ValueOf(expectedFlattenedName))) && !resource_data_catalog_tag_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_data_catalog_tag_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_data_catalog_tag_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceDataCatalogTagTemplate() *resource_data_catalog_tag_template_schema.Resource { - return &resource_data_catalog_tag_template_schema.Resource{ - Create: resourceDataCatalogTagTemplateCreate, - Read: resourceDataCatalogTagTemplateRead, - Update: resourceDataCatalogTagTemplateUpdate, - Delete: resourceDataCatalogTagTemplateDelete, - - Importer: &resource_data_catalog_tag_template_schema.ResourceImporter{ - State: resourceDataCatalogTagTemplateImport, - }, - - Timeouts: &resource_data_catalog_tag_template_schema.ResourceTimeout{ - Create: resource_data_catalog_tag_template_schema.DefaultTimeout(4 * resource_data_catalog_tag_template_time.Minute), - Update: resource_data_catalog_tag_template_schema.DefaultTimeout(4 * resource_data_catalog_tag_template_time.Minute), - Delete: resource_data_catalog_tag_template_schema.DefaultTimeout(4 * resource_data_catalog_tag_template_time.Minute), - }, - - Schema: map[string]*resource_data_catalog_tag_template_schema.Schema{ - "fields": { - Type: resource_data_catalog_tag_template_schema.TypeSet, - Required: true, - ForceNew: true, - Description: `Set of tag template field IDs and the settings for the field. This set is an exhaustive list of the allowed fields. This set must contain at least one field and at most 500 fields.`, - Elem: &resource_data_catalog_tag_template_schema.Resource{ - Schema: map[string]*resource_data_catalog_tag_template_schema.Schema{ - "field_id": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Required: true, - ForceNew: true, - }, - "type": { - Type: resource_data_catalog_tag_template_schema.TypeList, - Required: true, - Description: `The type of value this tag field can contain.`, - MaxItems: 1, - Elem: &resource_data_catalog_tag_template_schema.Resource{ - Schema: map[string]*resource_data_catalog_tag_template_schema.Schema{ - "enum_type": { - Type: resource_data_catalog_tag_template_schema.TypeList, - Optional: true, - Description: `Represents an enum type. - Exactly one of 'primitive_type' or 'enum_type' must be set`, - MaxItems: 1, - Elem: &resource_data_catalog_tag_template_schema.Resource{ - Schema: map[string]*resource_data_catalog_tag_template_schema.Schema{ - "allowed_values": { - Type: resource_data_catalog_tag_template_schema.TypeSet, - Required: true, - Description: `The set of allowed values for this enum. The display names of the -values must be case-insensitively unique within this set. Currently, -enum values can only be added to the list of allowed values. Deletion -and renaming of enum values are not supported. -Can have up to 500 allowed values.`, - Elem: datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema(), - }, - }, - }, - }, - "primitive_type": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_catalog_tag_template_validation.StringInSlice([]string{"DOUBLE", "STRING", "BOOL", "TIMESTAMP", ""}, false), - Description: `Represents primitive types - string, bool etc. - Exactly one of 'primitive_type' or 'enum_type' must be set Possible values: ["DOUBLE", "STRING", "BOOL", "TIMESTAMP"]`, - }, - }, - }, - }, - "description": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Optional: true, - Description: `A description for this field.`, - }, - "display_name": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Optional: true, - Description: `The display name for this field.`, - }, - "is_required": { - Type: resource_data_catalog_tag_template_schema.TypeBool, - Optional: true, - Description: `Whether this is a required field. Defaults to false.`, - }, - "order": { - Type: resource_data_catalog_tag_template_schema.TypeInt, - Optional: true, - Description: `The order of this field with respect to other fields in this tag template. -A higher value indicates a more important field. The value can be negative. -Multiple fields can have the same order, and field orders within a tag do not have to be sequential.`, - }, - "name": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Computed: true, - Description: `The resource name of the tag template field in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}/fields/{field}`, - }, - }, - }, - }, - "tag_template_id": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z_][a-z0-9_]{0,63}$`), - Description: `The id of the tag template to create.`, - }, - "display_name": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Optional: true, - Description: `The display name for this template.`, - }, - "force_delete": { - Type: resource_data_catalog_tag_template_schema.TypeBool, - Optional: true, - Description: `This confirms the deletion of any possible tags using this template. Must be set to true in order to delete the tag template.`, - Default: false, - }, - "region": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Template location region.`, - }, - "name": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Computed: true, - Description: `The resource name of the tag template in URL format. Example: projects/{project_id}/locations/{location}/tagTemplates/{tagTemplateId}`, - }, - "project": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema() *resource_data_catalog_tag_template_schema.Resource { - return &resource_data_catalog_tag_template_schema.Resource{ - Schema: map[string]*resource_data_catalog_tag_template_schema.Schema{ - "display_name": { - Type: resource_data_catalog_tag_template_schema.TypeString, - Required: true, - Description: `The display name of the enum value.`, - }, - }, - } -} - -func resourceDataCatalogTagTemplateCreate(d *resource_data_catalog_tag_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogTagTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_catalog_tag_template_reflect.ValueOf(displayNameProp)) && (ok || !resource_data_catalog_tag_template_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - fieldsProp, err := expandDataCatalogTagTemplateFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(resource_data_catalog_tag_template_reflect.ValueOf(fieldsProp)) && (ok || !resource_data_catalog_tag_template_reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}projects/{{project}}/locations/{{region}}/tagTemplates?tagTemplateId={{tag_template_id}}") - if err != nil { - return err - } - - resource_data_catalog_tag_template_log.Printf("[DEBUG] Creating new TagTemplate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_tag_template_schema.TimeoutCreate)) - if err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error creating TagTemplate: %s", err) - } - if err := d.Set("name", flattenDataCatalogTagTemplateName(res["name"], d, config)); err != nil { - return resource_data_catalog_tag_template_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_data_catalog_tag_template_log.Printf("[DEBUG] Finished creating TagTemplate %q: %#v", d.Id(), res) - - return resourceDataCatalogTagTemplateRead(d, meta) -} - -func resourceDataCatalogTagTemplateRead(d *resource_data_catalog_tag_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_catalog_tag_template_fmt.Sprintf("DataCatalogTagTemplate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error reading TagTemplate: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error reading TagTemplate: %s", err) - } - - if err := d.Set("name", flattenDataCatalogTagTemplateName(res["name"], d, config)); err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error reading TagTemplate: %s", err) - } - if err := d.Set("display_name", flattenDataCatalogTagTemplateDisplayName(res["displayName"], d, config)); err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error reading TagTemplate: %s", err) - } - if err := d.Set("fields", flattenDataCatalogTagTemplateFields(res["fields"], d, config)); err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error reading TagTemplate: %s", err) - } - - return nil -} - -func resourceDataCatalogTagTemplateUpdate(d *resource_data_catalog_tag_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDataCatalogTagTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_catalog_tag_template_reflect.ValueOf(v)) && (ok || !resource_data_catalog_tag_template_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}") - if err != nil { - return err - } - - resource_data_catalog_tag_template_log.Printf("[DEBUG] Updating TagTemplate %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_catalog_tag_template_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_tag_template_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error updating TagTemplate %q: %s", d.Id(), err) - } else { - resource_data_catalog_tag_template_log.Printf("[DEBUG] Finished updating TagTemplate %q: %#v", d.Id(), res) - } - - return resourceDataCatalogTagTemplateRead(d, meta) -} - -func resourceDataCatalogTagTemplateDelete(d *resource_data_catalog_tag_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_data_catalog_tag_template_fmt.Errorf("Error fetching project for TagTemplate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataCatalogBasePath}}{{name}}?force={{force_delete}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_data_catalog_tag_template_log.Printf("[DEBUG] Deleting TagTemplate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_catalog_tag_template_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagTemplate") - } - - resource_data_catalog_tag_template_log.Printf("[DEBUG] Finished deleting TagTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceDataCatalogTagTemplateImport(d *resource_data_catalog_tag_template_schema.ResourceData, meta interface{}) ([]*resource_data_catalog_tag_template_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - egRegex := resource_data_catalog_tag_template_regexp.MustCompile("projects/(.+)/locations/(.+)/tagTemplates/(.+)") - - parts := egRegex.FindStringSubmatch(name) - if len(parts) != 4 { - return nil, resource_data_catalog_tag_template_fmt.Errorf("tag template name does not fit the format %s", egRegex) - } - if err := d.Set("project", parts[1]); err != nil { - return nil, resource_data_catalog_tag_template_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", parts[2]); err != nil { - return nil, resource_data_catalog_tag_template_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("tag_template_id", parts[3]); err != nil { - return nil, resource_data_catalog_tag_template_fmt.Errorf("Error setting tag_template_id: %s", err) - } - return []*resource_data_catalog_tag_template_schema.ResourceData{d}, nil -} - -func flattenDataCatalogTagTemplateName(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateDisplayName(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFields(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "field_id": k, - "name": flattenDataCatalogTagTemplateFieldsName(original["name"], d, config), - "display_name": flattenDataCatalogTagTemplateFieldsDisplayName(original["displayName"], d, config), - "description": flattenDataCatalogTagTemplateFieldsDescription(original["description"], d, config), - "type": flattenDataCatalogTagTemplateFieldsType(original["type"], d, config), - "is_required": flattenDataCatalogTagTemplateFieldsIsRequired(original["isRequired"], d, config), - "order": flattenDataCatalogTagTemplateFieldsOrder(original["order"], d, config), - }) - } - return transformed -} - -func flattenDataCatalogTagTemplateFieldsName(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsDisplayName(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsDescription(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsType(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["primitive_type"] = - flattenDataCatalogTagTemplateFieldsTypePrimitiveType(original["primitiveType"], d, config) - transformed["enum_type"] = - flattenDataCatalogTagTemplateFieldsTypeEnumType(original["enumType"], d, config) - return []interface{}{transformed} -} - -func flattenDataCatalogTagTemplateFieldsTypePrimitiveType(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsTypeEnumType(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_values"] = - flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(original["allowedValues"], d, config) - return []interface{}{transformed} -} - -func flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_data_catalog_tag_template_schema.NewSet(resource_data_catalog_tag_template_schema.HashResource(datacatalogTagTemplateFieldsFieldsTypeEnumTypeAllowedValuesSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "display_name": flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(original["displayName"], d, config), - }) - } - return transformed -} - -func flattenDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsIsRequired(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataCatalogTagTemplateFieldsOrder(v interface{}, d *resource_data_catalog_tag_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_catalog_tag_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandDataCatalogTagTemplateDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFields(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_data_catalog_tag_template_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataCatalogTagTemplateFieldsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedDisplayName, err := expandDataCatalogTagTemplateFieldsDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - transformedDescription, err := expandDataCatalogTagTemplateFieldsDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedType, err := expandDataCatalogTagTemplateFieldsType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedIsRequired, err := expandDataCatalogTagTemplateFieldsIsRequired(original["is_required"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedIsRequired); val.IsValid() && !isEmptyValue(val) { - transformed["isRequired"] = transformedIsRequired - } - - transformedOrder, err := expandDataCatalogTagTemplateFieldsOrder(original["order"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedOrder); val.IsValid() && !isEmptyValue(val) { - transformed["order"] = transformedOrder - } - - transformedFieldId, err := expandString(original["field_id"], d, config) - if err != nil { - return nil, err - } - m[transformedFieldId] = transformed - } - return m, nil -} - -func expandDataCatalogTagTemplateFieldsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPrimitiveType, err := expandDataCatalogTagTemplateFieldsTypePrimitiveType(original["primitive_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedPrimitiveType); val.IsValid() && !isEmptyValue(val) { - transformed["primitiveType"] = transformedPrimitiveType - } - - transformedEnumType, err := expandDataCatalogTagTemplateFieldsTypeEnumType(original["enum_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedEnumType); val.IsValid() && !isEmptyValue(val) { - transformed["enumType"] = transformedEnumType - } - - return transformed, nil -} - -func expandDataCatalogTagTemplateFieldsTypePrimitiveType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsTypeEnumType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedValues, err := expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(original["allowed_values"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedAllowedValues); val.IsValid() && !isEmptyValue(val) { - transformed["allowedValues"] = transformedAllowedValues - } - - return transformed, nil -} - -func expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValues(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_data_catalog_tag_template_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisplayName, err := expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_catalog_tag_template_reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataCatalogTagTemplateFieldsTypeEnumTypeAllowedValuesDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsIsRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataCatalogTagTemplateFieldsOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionDeidentifyTemplate() *resource_data_loss_prevention_deidentify_template_schema.Resource { - return &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Create: resourceDataLossPreventionDeidentifyTemplateCreate, - Read: resourceDataLossPreventionDeidentifyTemplateRead, - Update: resourceDataLossPreventionDeidentifyTemplateUpdate, - Delete: resourceDataLossPreventionDeidentifyTemplateDelete, - - Importer: &resource_data_loss_prevention_deidentify_template_schema.ResourceImporter{ - State: resourceDataLossPreventionDeidentifyTemplateImport, - }, - - Timeouts: &resource_data_loss_prevention_deidentify_template_schema.ResourceTimeout{ - Create: resource_data_loss_prevention_deidentify_template_schema.DefaultTimeout(4 * resource_data_loss_prevention_deidentify_template_time.Minute), - Update: resource_data_loss_prevention_deidentify_template_schema.DefaultTimeout(4 * resource_data_loss_prevention_deidentify_template_time.Minute), - Delete: resource_data_loss_prevention_deidentify_template_schema.DefaultTimeout(4 * resource_data_loss_prevention_deidentify_template_time.Minute), - }, - - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "deidentify_config": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Required: true, - Description: `Configuration of the deidentify template`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "info_type_transformations": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Required: true, - Description: `Specifies free-text based transformations to be applied to the dataset.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "transformations": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Required: true, - Description: `Transformation for each infoType. Cannot specify more than one for a given infoType.`, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "primitive_transformation": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Required: true, - Description: `Primitive transformation to apply to the infoType.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "character_mask_config": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Partially mask a string by replacing a given number of characters with a fixed character. -Masking can start from the beginning or end of the string.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "characters_to_ignore": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Characters to skip when doing de-identification of a value. These will be left alone and skipped.`, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "character_to_skip": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `Characters to not transform when masking.`, - }, - "common_characters_to_ignore": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_deidentify_template_validation.StringInSlice([]string{"NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE", ""}, false), - Description: `Common characters to not transform when masking. Useful to avoid removing punctuation. Possible values: ["NUMERIC", "ALPHA_UPPER_CASE", "ALPHA_LOWER_CASE", "PUNCTUATION", "WHITESPACE"]`, - }, - }, - }, - }, - "masking_character": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `Character to use to mask the sensitive values—for example, * for an alphabetic string such as a name, or 0 for a numeric string -such as ZIP code or credit card number. This string must have a length of 1. If not supplied, this value defaults to * for -strings, and 0 for digits.`, - }, - "number_to_mask": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Number of characters to mask. If not set, all matching chars will be masked. Skipped characters do not count towards this tally.`, - }, - "reverse_order": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeBool, - Optional: true, - Description: `Mask characters in reverse order. For example, if masking_character is 0, number_to_mask is 14, and reverse_order is 'false', then the -input string '1234-5678-9012-3456' is masked as '00000000000000-3456'.`, - }, - }, - }, - }, - "crypto_deterministic_config": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Pseudonymization method that generates deterministic encryption for the given input. Outputs a base64 encoded representation of the encrypted output. Uses AES-SIV based on the RFC [https://tools.ietf.org/html/rfc5297](https://tools.ietf.org/html/rfc5297).`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "context": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `A context may be used for higher security and maintaining referential integrity such that the same identifier in two different contexts will be given a distinct surrogate. The context is appended to plaintext value being encrypted. On decryption the provided context is validated against the value used during encryption. If a context was provided during encryption, same context must be provided during decryption as well. - -If the context is not set, plaintext would be used as is for encryption. If the context is set but: - -1. there is no record present when transforming a given value or -2. the field is not present when transforming a given value, - -plaintext would be used as is for encryption. - -Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "crypto_key": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `The key used by the encryption function.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "kms_wrapped": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Kms wrapped key`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "crypto_key_name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Transient crypto key`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "key": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - "surrogate_info_type": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `The custom info type to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom info type followed by the number of characters comprising the surrogate. The following scheme defines the format: {info type name}({surrogate character count}):{surrogate} - -For example, if the name of custom info type is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' - -This annotation identifies the surrogate when inspecting content using the custom info type 'Surrogate'. This facilitates reversal of the surrogate when it occurs in free text. - -Note: For record transformations where the entire cell in a table is being transformed, surrogates are not mandatory. Surrogates are used to denote the location of the token and are necessary for re-identification in free form text. - -In order for inspection to work properly, the name of this info type must not occur naturally anywhere in your data; otherwise, inspection may either - -* reverse a surrogate that does not correspond to an actual identifier -* be unable to parse the surrogate and result in an error - -Therefore, choose your custom info type name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, - }, - }, - }, - }, - }, - }, - }, - "crypto_replace_ffx_fpe_config": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Replaces an identifier with a surrogate using Format Preserving Encryption (FPE) with the FFX mode of operation; however when used in the 'content.reidentify' API method, it serves the opposite function by reversing the surrogate back into the original identifier. The identifier must be encoded as ASCII. For a given crypto key and context, the same identifier will be replaced with the same surrogate. Identifiers must be at least two characters long. In the case that the identifier is the empty string, it will be skipped. See [https://cloud.google.com/dlp/docs/pseudonymization](https://cloud.google.com/dlp/docs/pseudonymization) to learn more. - -Note: We recommend using CryptoDeterministicConfig for all use cases which do not require preserving the input alphabet space and size, plus warrant referential integrity.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "common_alphabet": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_deidentify_template_validation.StringInSlice([]string{"FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC", ""}, false), - Description: `Common alphabets. Possible values: ["FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED", "NUMERIC", "HEXADECIMAL", "UPPER_CASE_ALPHA_NUMERIC", "ALPHA_NUMERIC"]`, - }, - "context": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `The 'tweak', a context may be used for higher security since the same identifier in two different contexts won't be given the same surrogate. If the context is not set, a default tweak will be used. - -If the context is set but: - -1. there is no record present when transforming a given value or -2. the field is not present when transforming a given value, - -a default tweak will be used. - -Note that case (1) is expected when an 'InfoTypeTransformation' is applied to both structured and non-structured 'ContentItem's. Currently, the referenced field may be of value type integer or string. - -The tweak is constructed as a sequence of bytes in big endian byte order such that: - -* a 64 bit integer is encoded followed by a single byte of value 1 -* a string is encoded in UTF-8 format followed by a single byte of value 2`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "crypto_key": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `The key used by the encryption algorithm.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "kms_wrapped": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Kms wrapped key`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "crypto_key_name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `The resource name of the KMS CryptoKey to use for unwrapping.`, - }, - "wrapped_key": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `The wrapped data crypto key. - -A base64-encoded string.`, - }, - }, - }, - }, - "transient": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Transient crypto key`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `Name of the key. This is an arbitrary string used to differentiate different keys. A unique key is generated per name: two separate 'TransientCryptoKey' protos share the same generated key if their names are the same. When the data crypto key is generated, this name is not used in any way (repeating the api call will result in a different key being generated).`, - }, - }, - }, - }, - "unwrapped": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Unwrapped crypto key`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "key": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `A 128/192/256 bit key. - -A base64-encoded string.`, - }, - }, - }, - }, - }, - }, - }, - "custom_alphabet": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `This is supported by mapping these to the alphanumeric characters that the FFX mode natively supports. This happens before/after encryption/decryption. Each character listed must appear only once. Number of characters must be in the range \[2, 95\]. This must be encoded as ASCII. The order of characters does not matter. The full list of allowed characters is: - -''0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz ~'!@#$%^&*()_-+={[}]|:;"'<,>.?/''`, - }, - "radix": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `The native way to select the alphabet. Must be in the range \[2, 95\].`, - }, - "surrogate_info_type": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `The custom infoType to annotate the surrogate with. This annotation will be applied to the surrogate by prefixing it with the name of the custom infoType followed by the number of characters comprising the surrogate. The following scheme defines the format: info\_type\_name(surrogate\_character\_count):surrogate - -For example, if the name of custom infoType is 'MY\_TOKEN\_INFO\_TYPE' and the surrogate is 'abc', the full replacement value will be: 'MY\_TOKEN\_INFO\_TYPE(3):abc' - -This annotation identifies the surrogate when inspecting content using the custom infoType ['SurrogateType'](https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#surrogatetype). This facilitates reversal of the surrogate when it occurs in free text. - -In order for inspection to work properly, the name of this infoType must not occur naturally anywhere in your data; otherwise, inspection may find a surrogate that does not correspond to an actual identifier. Therefore, choose your custom infoType name carefully after considering what your data looks like. One way to select a name that has a high chance of yielding reliable detection is to include one or more unicode characters that are highly improbable to exist in your data. For example, assuming your data is entered from a regular ASCII keyboard, the symbol with the hex code point 29DD might be used like so: ⧝MY\_TOKEN\_TYPE`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern '[A-Za-z0-9$-_]{1,64}'.`, - }, - }, - }, - }, - }, - }, - }, - "replace_config": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Replace each input value with a given value.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "new_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Required: true, - Description: `Replace each input value with a given value.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "boolean_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeBool, - Optional: true, - Description: `A boolean value.`, - }, - "date_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Represents a whole or partial calendar date.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "day": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a -year by itself or a year and month where the day is not significant.`, - }, - "month": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Month of year. Must be from 1 to 12, or 0 if specifying a year without a month and day.`, - }, - "year": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Year of date. Must be from 1 to 9999, or 0 if specifying a date without a year.`, - }, - }, - }, - }, - "day_of_week_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_deidentify_template_validation.StringInSlice([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY", ""}, false), - Description: `Represents a day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "float_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeFloat, - Optional: true, - Description: `A float value.`, - }, - "integer_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `An integer value.`, - }, - "string_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `A string value.`, - }, - "time_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `Represents a time of day.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "hours": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Hours of day in 24 hour format. Should be from 0 to 23.`, - }, - "minutes": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "nanos": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - "seconds": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeInt, - Optional: true, - Description: `Seconds of minutes of the time. Must normally be from 0 to 59.`, - }, - }, - }, - }, - "timestamp_value": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. -Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - }, - }, - }, - "replace_with_info_type_config": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeBool, - Optional: true, - Description: `Replace each matching finding with the name of the info type.`, - }, - }, - }, - }, - "info_types": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeList, - Optional: true, - Description: `InfoTypes to apply the transformation to. Leaving this empty will apply the transformation to apply to -all findings that correspond to infoTypes that were requested in InspectConfig.`, - Elem: &resource_data_loss_prevention_deidentify_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_deidentify_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - Description: `Name of the information type.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "parent": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the template in any of the following formats: - -* 'projects/{{project}}' -* 'projects/{{project}}/locations/{{location}}' -* 'organizations/{{organization_id}}' -* 'organizations/{{organization_id}}/locations/{{location}}'`, - }, - "description": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `A description of the template.`, - }, - "display_name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Optional: true, - Description: `User set display name of the template.`, - }, - "name": { - Type: resource_data_loss_prevention_deidentify_template_schema.TypeString, - Computed: true, - Description: `The resource name of the template. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionDeidentifyTemplateCreate(d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionDeidentifyTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_deidentify_template_reflect.ValueOf(descriptionProp)) && (ok || !resource_data_loss_prevention_deidentify_template_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionDeidentifyTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_deidentify_template_reflect.ValueOf(displayNameProp)) && (ok || !resource_data_loss_prevention_deidentify_template_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - deidentifyConfigProp, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(d.Get("deidentify_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deidentify_config"); !isEmptyValue(resource_data_loss_prevention_deidentify_template_reflect.ValueOf(deidentifyConfigProp)) && (ok || !resource_data_loss_prevention_deidentify_template_reflect.DeepEqual(v, deidentifyConfigProp)) { - obj["deidentifyConfig"] = deidentifyConfigProp - } - - obj, err = resourceDataLossPreventionDeidentifyTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates") - if err != nil { - return err - } - - resource_data_loss_prevention_deidentify_template_log.Printf("[DEBUG] Creating new DeidentifyTemplate: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_deidentify_template_schema.TimeoutCreate)) - if err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error creating DeidentifyTemplate: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionDeidentifyTemplateName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_data_loss_prevention_deidentify_template_log.Printf("[DEBUG] Finished creating DeidentifyTemplate %q: %#v", d.Id(), res) - - return resourceDataLossPreventionDeidentifyTemplateRead(d, meta) -} - -func resourceDataLossPreventionDeidentifyTemplateRead(d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_loss_prevention_deidentify_template_fmt.Sprintf("DataLossPreventionDeidentifyTemplate %q", d.Id())) - } - - if err := d.Set("name", flattenDataLossPreventionDeidentifyTemplateName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionDeidentifyTemplateDescription(res["description"], d, config)); err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionDeidentifyTemplateDisplayName(res["displayName"], d, config)); err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - if err := d.Set("deidentify_config", flattenDataLossPreventionDeidentifyTemplateDeidentifyConfig(res["deidentifyConfig"], d, config)); err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error reading DeidentifyTemplate: %s", err) - } - - return nil -} - -func resourceDataLossPreventionDeidentifyTemplateUpdate(d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionDeidentifyTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_deidentify_template_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_deidentify_template_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionDeidentifyTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_deidentify_template_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_deidentify_template_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - deidentifyConfigProp, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(d.Get("deidentify_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("deidentify_config"); !isEmptyValue(resource_data_loss_prevention_deidentify_template_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_deidentify_template_reflect.DeepEqual(v, deidentifyConfigProp)) { - obj["deidentifyConfig"] = deidentifyConfigProp - } - - obj, err = resourceDataLossPreventionDeidentifyTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return err - } - - resource_data_loss_prevention_deidentify_template_log.Printf("[DEBUG] Updating DeidentifyTemplate %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("deidentify_config") { - updateMask = append(updateMask, "deidentifyConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_loss_prevention_deidentify_template_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_deidentify_template_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error updating DeidentifyTemplate %q: %s", d.Id(), err) - } else { - resource_data_loss_prevention_deidentify_template_log.Printf("[DEBUG] Finished updating DeidentifyTemplate %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionDeidentifyTemplateRead(d, meta) -} - -func resourceDataLossPreventionDeidentifyTemplateDelete(d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_data_loss_prevention_deidentify_template_log.Printf("[DEBUG] Deleting DeidentifyTemplate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_deidentify_template_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DeidentifyTemplate") - } - - resource_data_loss_prevention_deidentify_template_log.Printf("[DEBUG] Finished deleting DeidentifyTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionDeidentifyTemplateImport(d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, meta interface{}) ([]*resource_data_loss_prevention_deidentify_template_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := resource_data_loss_prevention_deidentify_template_strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_data_loss_prevention_deidentify_template_fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/deidentifyTemplate/{{name}}", d.Get("name").(string)) - } - - parts = parts[:len(parts)-2] - if err := d.Set("parent", resource_data_loss_prevention_deidentify_template_strings.Join(parts, "/")); err != nil { - return nil, resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error setting parent: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/deidentifyTemplates/{{name}}") - if err != nil { - return nil, resource_data_loss_prevention_deidentify_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_data_loss_prevention_deidentify_template_schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionDeidentifyTemplateName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionDeidentifyTemplateDescription(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDisplayName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfig(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["info_type_transformations"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(original["infoTypeTransformations"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transformations"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(original["transformations"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_types": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(original["infoTypes"], d, config), - "primitive_transformation": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitiveTransformation"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["replace_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replaceConfig"], d, config) - transformed["replace_with_info_type_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replaceWithInfoTypeConfig"], d, config) - transformed["character_mask_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["characterMaskConfig"], d, config) - transformed["crypto_deterministic_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["cryptoDeterministicConfig"], d, config) - transformed["crypto_replace_ffx_fpe_config"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["cryptoReplaceFfxFpeConfig"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["new_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["newValue"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["integer_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integerValue"], d, config) - transformed["float_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["floatValue"], d, config) - transformed["string_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["stringValue"], d, config) - transformed["boolean_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["booleanValue"], d, config) - transformed["timestamp_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestampValue"], d, config) - transformed["time_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["timeValue"], d, config) - transformed["date_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["dateValue"], d, config) - transformed["day_of_week_value"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["dayOfWeekValue"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) - transformed["minutes"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["year"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) - transformed["month"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) - transformed["day"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v != nil -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["masking_character"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["maskingCharacter"], d, config) - transformed["number_to_mask"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["numberToMask"], d, config) - transformed["reverse_order"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverseOrder"], d, config) - transformed["characters_to_ignore"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["charactersToIgnore"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "character_to_skip": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharacterToSkip(original["characterToSkip"], d, config), - "common_characters_to_ignore": flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["commonCharactersToIgnore"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharacterToSkip(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["cryptoKey"], d, config) - transformed["surrogate_info_type"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogateInfoType"], d, config) - transformed["context"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["crypto_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["cryptoKey"], d, config) - transformed["context"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) - transformed["surrogate_info_type"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogateInfoType"], d, config) - transformed["common_alphabet"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["commonAlphabet"], d, config) - transformed["custom_alphabet"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["customAlphabet"], d, config) - transformed["radix"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["transient"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) - transformed["unwrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - transformed["kms_wrapped"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kmsWrapped"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["wrapped_key"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrappedKey"], d, config) - transformed["crypto_key_name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["cryptoKeyName"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_deidentify_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandDataLossPreventionDeidentifyTemplateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypeTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(original["info_type_transformations"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedInfoTypeTransformations); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypeTransformations"] = transformedInfoTypeTransformations - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransformations, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(original["transformations"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedTransformations); val.IsValid() && !isEmptyValue(val) { - transformed["transformations"] = transformedTransformations - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - transformedPrimitiveTransformation, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(original["primitive_transformation"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedPrimitiveTransformation); val.IsValid() && !isEmptyValue(val) { - transformed["primitiveTransformation"] = transformedPrimitiveTransformation - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedReplaceConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(original["replace_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedReplaceConfig); val.IsValid() && !isEmptyValue(val) { - transformed["replaceConfig"] = transformedReplaceConfig - } - - transformedReplaceWithInfoTypeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(original["replace_with_info_type_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedReplaceWithInfoTypeConfig); val.IsValid() && !isEmptyValue(val) { - transformed["replaceWithInfoTypeConfig"] = transformedReplaceWithInfoTypeConfig - } - - transformedCharacterMaskConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(original["character_mask_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCharacterMaskConfig); val.IsValid() && !isEmptyValue(val) { - transformed["characterMaskConfig"] = transformedCharacterMaskConfig - } - - transformedCryptoDeterministicConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(original["crypto_deterministic_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCryptoDeterministicConfig); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoDeterministicConfig"] = transformedCryptoDeterministicConfig - } - - transformedCryptoReplaceFfxFpeConfig, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(original["crypto_replace_ffx_fpe_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCryptoReplaceFfxFpeConfig); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoReplaceFfxFpeConfig"] = transformedCryptoReplaceFfxFpeConfig - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNewValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(original["new_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedNewValue); val.IsValid() && !isEmptyValue(val) { - transformed["newValue"] = transformedNewValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIntegerValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(original["integer_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedIntegerValue); val.IsValid() && !isEmptyValue(val) { - transformed["integerValue"] = transformedIntegerValue - } - - transformedFloatValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(original["float_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedFloatValue); val.IsValid() && !isEmptyValue(val) { - transformed["floatValue"] = transformedFloatValue - } - - transformedStringValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(original["string_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedStringValue); val.IsValid() && !isEmptyValue(val) { - transformed["stringValue"] = transformedStringValue - } - - transformedBooleanValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(original["boolean_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedBooleanValue); val.IsValid() && !isEmptyValue(val) { - transformed["booleanValue"] = transformedBooleanValue - } - - transformedTimestampValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(original["timestamp_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedTimestampValue); val.IsValid() && !isEmptyValue(val) { - transformed["timestampValue"] = transformedTimestampValue - } - - transformedTimeValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(original["time_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedTimeValue); val.IsValid() && !isEmptyValue(val) { - transformed["timeValue"] = transformedTimeValue - } - - transformedDateValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(original["date_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedDateValue); val.IsValid() && !isEmptyValue(val) { - transformed["dateValue"] = transformedDateValue - } - - transformedDayOfWeekValue, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(original["day_of_week_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedDayOfWeekValue); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeekValue"] = transformedDayOfWeekValue - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueIntegerValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueFloatValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueStringValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueBooleanValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimestampValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueTimeValueNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedYear, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(original["year"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedYear); val.IsValid() && !isEmptyValue(val) { - transformed["year"] = transformedYear - } - - transformedMonth, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(original["month"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedMonth); val.IsValid() && !isEmptyValue(val) { - transformed["month"] = transformedMonth - } - - transformedDay, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(original["day"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedDay); val.IsValid() && !isEmptyValue(val) { - transformed["day"] = transformedDay - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueYear(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDateValueDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceConfigNewValueDayOfWeekValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationReplaceWithInfoTypeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || !v.(bool) { - return nil, nil - } - - return struct{}{}, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaskingCharacter, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(original["masking_character"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedMaskingCharacter); val.IsValid() && !isEmptyValue(val) { - transformed["maskingCharacter"] = transformedMaskingCharacter - } - - transformedNumberToMask, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(original["number_to_mask"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedNumberToMask); val.IsValid() && !isEmptyValue(val) { - transformed["numberToMask"] = transformedNumberToMask - } - - transformedReverseOrder, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(original["reverse_order"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedReverseOrder); val.IsValid() && !isEmptyValue(val) { - transformed["reverseOrder"] = transformedReverseOrder - } - - transformedCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(original["characters_to_ignore"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCharactersToIgnore); val.IsValid() && !isEmptyValue(val) { - transformed["charactersToIgnore"] = transformedCharactersToIgnore - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigMaskingCharacter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigNumberToMask(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigReverseOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCharacterToSkip, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharacterToSkip(original["character_to_skip"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCharacterToSkip); val.IsValid() && !isEmptyValue(val) { - transformed["characterToSkip"] = transformedCharacterToSkip - } - - transformedCommonCharactersToIgnore, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(original["common_characters_to_ignore"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCommonCharactersToIgnore); val.IsValid() && !isEmptyValue(val) { - transformed["commonCharactersToIgnore"] = transformedCommonCharactersToIgnore - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCharacterToSkip(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCharacterMaskConfigCharactersToIgnoreCommonCharactersToIgnore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(original["surrogate_info_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["surrogateInfoType"] = transformedSurrogateInfoType - } - - transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(original["context"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedContext); val.IsValid() && !isEmptyValue(val) { - transformed["context"] = transformedContext - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoDeterministicConfigContextName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCryptoKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(original["crypto_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCryptoKey); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKey"] = transformedCryptoKey - } - - transformedContext, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(original["context"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedContext); val.IsValid() && !isEmptyValue(val) { - transformed["context"] = transformedContext - } - - transformedSurrogateInfoType, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(original["surrogate_info_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedSurrogateInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["surrogateInfoType"] = transformedSurrogateInfoType - } - - transformedCommonAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(original["common_alphabet"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCommonAlphabet); val.IsValid() && !isEmptyValue(val) { - transformed["commonAlphabet"] = transformedCommonAlphabet - } - - transformedCustomAlphabet, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(original["custom_alphabet"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCustomAlphabet); val.IsValid() && !isEmptyValue(val) { - transformed["customAlphabet"] = transformedCustomAlphabet - } - - transformedRadix, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(original["radix"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedRadix); val.IsValid() && !isEmptyValue(val) { - transformed["radix"] = transformedRadix - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTransient, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(original["transient"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedTransient); val.IsValid() && !isEmptyValue(val) { - transformed["transient"] = transformedTransient - } - - transformedUnwrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(original["unwrapped"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedUnwrapped); val.IsValid() && !isEmptyValue(val) { - transformed["unwrapped"] = transformedUnwrapped - } - - transformedKmsWrapped, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(original["kms_wrapped"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedKmsWrapped); val.IsValid() && !isEmptyValue(val) { - transformed["kmsWrapped"] = transformedKmsWrapped - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyTransientName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyUnwrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrapped(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWrappedKey, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(original["wrapped_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedWrappedKey); val.IsValid() && !isEmptyValue(val) { - transformed["wrappedKey"] = transformedWrappedKey - } - - transformedCryptoKeyName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(original["crypto_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedCryptoKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["cryptoKeyName"] = transformedCryptoKeyName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedWrappedKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCryptoKeyKmsWrappedCryptoKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigContextName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_deidentify_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigSurrogateInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCommonAlphabet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigCustomAlphabet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationsPrimitiveTransformationCryptoReplaceFfxFpeConfigRadix(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionDeidentifyTemplateEncoder(d *resource_data_loss_prevention_deidentify_template_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["deidentifyTemplate"] = obj - return newObj, nil -} - -func resourceDataLossPreventionInspectTemplate() *resource_data_loss_prevention_inspect_template_schema.Resource { - return &resource_data_loss_prevention_inspect_template_schema.Resource{ - Create: resourceDataLossPreventionInspectTemplateCreate, - Read: resourceDataLossPreventionInspectTemplateRead, - Update: resourceDataLossPreventionInspectTemplateUpdate, - Delete: resourceDataLossPreventionInspectTemplateDelete, - - Importer: &resource_data_loss_prevention_inspect_template_schema.ResourceImporter{ - State: resourceDataLossPreventionInspectTemplateImport, - }, - - Timeouts: &resource_data_loss_prevention_inspect_template_schema.ResourceTimeout{ - Create: resource_data_loss_prevention_inspect_template_schema.DefaultTimeout(4 * resource_data_loss_prevention_inspect_template_time.Minute), - Update: resource_data_loss_prevention_inspect_template_schema.DefaultTimeout(4 * resource_data_loss_prevention_inspect_template_time.Minute), - Delete: resource_data_loss_prevention_inspect_template_schema.DefaultTimeout(4 * resource_data_loss_prevention_inspect_template_time.Minute), - }, - - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "parent": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the inspect template in any of the following formats: - -* 'projects/{{project}}' -* 'projects/{{project}}/locations/{{location}}' -* 'organizations/{{organization_id}}' -* 'organizations/{{organization_id}}/locations/{{location}}'`, - }, - "description": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Optional: true, - Description: `A description of the inspect template.`, - }, - "display_name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Optional: true, - Description: `User set display name of the inspect template.`, - }, - "inspect_config": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `The core content of the template.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "content_options": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `List of options defining data content to scan. If empty, text, images, and other content will be included. Possible values: ["CONTENT_TEXT", "CONTENT_IMAGE"]`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Schema{ - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - ValidateFunc: resource_data_loss_prevention_inspect_template_validation.StringInSlice([]string{"CONTENT_TEXT", "CONTENT_IMAGE"}, false), - }, - }, - "custom_info_types": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Custom info types to be used. See https://cloud.google.com/dlp/docs/creating-custom-infotypes to learn more.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "info_type": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `CustomInfoType can either be a new infoType, or an extension of built-in infoType, when the name matches one of existing -infoTypes and that infoType is specified in 'info_types' field. Specifying the latter adds findings to the -one detected by the system. If built-in info type is not specified in 'info_types' list then the name is -treated as a custom info type.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names -listed at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - "dictionary": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "cloud_storage_path": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "path": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - }, - "word_list": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `List of words or phrases to search for.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "words": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `Words or phrases defining the dictionary. The dictionary must contain at least one -phrase and every phrase must contain at least 2 characters that are letters or digits.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Schema{ - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "exclusion_type": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_inspect_template_validation.StringInSlice([]string{"EXCLUSION_TYPE_EXCLUDE", ""}, false), - Description: `If set to EXCLUSION_TYPE_EXCLUDE this infoType will not cause a finding to be returned. It still can be used for rules matching. Possible values: ["EXCLUSION_TYPE_EXCLUDE"]`, - }, - "likelihood": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_inspect_template_validation.StringInSlice([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}, false), - Description: `Likelihood to return for this CustomInfoType. This base value can be altered by a detection rule if the finding meets the criteria -specified by the rule. Default value: "VERY_LIKELY" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, - Default: "VERY_LIKELY", - }, - "regex": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Regular expression which defines the rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "pattern": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. -Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Schema{ - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - }, - }, - }, - }, - }, - "stored_type": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A reference to a StoredInfoType to use with scanning.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Resource name of the requested StoredInfoType, for example 'organizations/433245324/storedInfoTypes/432452342' -or 'projects/project-id/storedInfoTypes/432452342'.`, - }, - }, - }, - }, - }, - }, - }, - "exclude_info_types": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeBool, - Optional: true, - Description: `When true, excludes type information of the findings.`, - }, - "include_quote": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeBool, - Optional: true, - Description: `When true, a contextual quote from the data that triggered a finding is included in the response.`, - }, - "info_types": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Restricts what infoTypes to look for. The values must correspond to InfoType values returned by infoTypes.list -or listed at https://cloud.google.com/dlp/docs/infotypes-reference. - -When no InfoTypes or CustomInfoTypes are specified in a request, the system may automatically choose what detectors to run. -By default this may be all types, but may change over time as detectors are updated.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - "limits": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Configuration to control the number of findings returned.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "max_findings_per_item": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - Required: true, - Description: `Max number of findings that will be returned for each item scanned. The maximum returned is 2000.`, - }, - "max_findings_per_request": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - Required: true, - Description: `Max number of findings that will be returned per request/job. The maximum returned is 2000.`, - }, - "max_findings_per_info_type": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Configuration of findings limit given for specified infoTypes.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "info_type": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does -not have an infoType, the DLP API applies the limit against all infoTypes that are found but not -specified in another InfoTypeLimit.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - "max_findings": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - Required: true, - Description: `Max findings limit for the given infoType.`, - }, - }, - }, - }, - }, - }, - }, - "min_likelihood": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_inspect_template_validation.StringInSlice([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}, false), - Description: `Only returns findings equal or above this threshold. See https://cloud.google.com/dlp/docs/likelihood for more info Default value: "POSSIBLE" Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, - Default: "POSSIBLE", - }, - "rule_set": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Set of rules to apply to the findings for this InspectConfig. Exclusion rules, contained in the set are executed in the end, -other rules are executed in the order they are specified for each info type.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "info_types": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `List of infoTypes this rule set is applied to.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - "rules": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `Set of rules to be applied to infoTypes. The rules are applied in order.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "exclusion_rule": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `The rule that specifies conditions when findings of infoTypes specified in InspectionRuleSet are removed from results.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "matching_type": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - ValidateFunc: resource_data_loss_prevention_inspect_template_validation.StringInSlice([]string{"MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"}, false), - Description: `How the rule is applied. See the documentation for more information: https://cloud.google.com/dlp/docs/reference/rest/v2/InspectConfig#MatchingType Possible values: ["MATCHING_TYPE_FULL_MATCH", "MATCHING_TYPE_PARTIAL_MATCH", "MATCHING_TYPE_INVERSE_MATCH"]`, - }, - "dictionary": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "cloud_storage_path": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "path": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - }, - "word_list": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `List of words or phrases to search for.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "words": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `Words or phrases defining the dictionary. The dictionary must contain at least one -phrase and every phrase must contain at least 2 characters that are letters or digits.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Schema{ - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "exclude_info_types": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Set of infoTypes for which findings would affect this rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "info_types": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `If a finding is matched by any of the infoType detectors listed here, the finding will be excluded from the scan results.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed -at https://cloud.google.com/dlp/docs/infotypes-reference when specifying a built-in type.`, - }, - }, - }, - }, - }, - }, - }, - "regex": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Regular expression which defines the rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "pattern": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. -Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Schema{ - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - "hotword_rule": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `Hotword-based detection rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "hotword_regex": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `Regular expression pattern defining what qualifies as a hotword.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "pattern": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. Its syntax -(https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Optional: true, - Description: `The index of the submatch to extract as findings. When not specified, -the entire match is returned. No more than 3 may be included.`, - Elem: &resource_data_loss_prevention_inspect_template_schema.Schema{ - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - }, - }, - }, - }, - }, - "likelihood_adjustment": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `Likelihood adjustment to apply to all matching findings.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "fixed_likelihood": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_inspect_template_validation.StringInSlice([]string{"VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY", ""}, false), - Description: `Set the likelihood of a finding to a fixed value. Either this or relative_likelihood can be set. Possible values: ["VERY_UNLIKELY", "UNLIKELY", "POSSIBLE", "LIKELY", "VERY_LIKELY"]`, - }, - "relative_likelihood": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - Optional: true, - Description: `Increase or decrease the likelihood by the specified number of levels. For example, -if a finding would be POSSIBLE without the detection rule and relativeLikelihood is 1, -then it is upgraded to LIKELY, while a value of -1 would downgrade it to UNLIKELY. -Likelihood may never drop below VERY_UNLIKELY or exceed VERY_LIKELY, so applying an -adjustment of 1 followed by an adjustment of -1 when base likelihood is VERY_LIKELY -will result in a final likelihood of LIKELY. Either this or fixed_likelihood can be set.`, - }, - }, - }, - }, - "proximity": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeList, - Required: true, - Description: `Proximity of the finding within which the entire hotword must reside. The total length of the window cannot -exceed 1000 characters. Note that the finding itself will be included in the window, so that hotwords may be -used to match substrings of the finding itself. For example, the certainty of a phone number regex -'(\d{3}) \d{3}-\d{4}' could be adjusted upwards if the area code is known to be the local area code of a company -office using the hotword regex '(xxx)', where 'xxx' is the area code in question.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_inspect_template_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_inspect_template_schema.Schema{ - "window_after": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - Optional: true, - Description: `Number of characters after the finding to consider. Either this or window_before must be specified`, - }, - "window_before": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeInt, - Optional: true, - Description: `Number of characters before the finding to consider. Either this or window_after must be specified`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: resource_data_loss_prevention_inspect_template_schema.TypeString, - Computed: true, - Description: `The resource name of the inspect template. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionInspectTemplateCreate(d *resource_data_loss_prevention_inspect_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionInspectTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_inspect_template_reflect.ValueOf(descriptionProp)) && (ok || !resource_data_loss_prevention_inspect_template_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionInspectTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_inspect_template_reflect.ValueOf(displayNameProp)) && (ok || !resource_data_loss_prevention_inspect_template_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - inspectConfigProp, err := expandDataLossPreventionInspectTemplateInspectConfig(d.Get("inspect_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_config"); !isEmptyValue(resource_data_loss_prevention_inspect_template_reflect.ValueOf(inspectConfigProp)) && (ok || !resource_data_loss_prevention_inspect_template_reflect.DeepEqual(v, inspectConfigProp)) { - obj["inspectConfig"] = inspectConfigProp - } - - obj, err = resourceDataLossPreventionInspectTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates") - if err != nil { - return err - } - - resource_data_loss_prevention_inspect_template_log.Printf("[DEBUG] Creating new InspectTemplate: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_inspect_template_schema.TimeoutCreate)) - if err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf("Error creating InspectTemplate: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionInspectTemplateName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_data_loss_prevention_inspect_template_log.Printf("[DEBUG] Finished creating InspectTemplate %q: %#v", d.Id(), res) - - return resourceDataLossPreventionInspectTemplateRead(d, meta) -} - -func resourceDataLossPreventionInspectTemplateRead(d *resource_data_loss_prevention_inspect_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_loss_prevention_inspect_template_fmt.Sprintf("DataLossPreventionInspectTemplate %q", d.Id())) - } - - if err := d.Set("name", flattenDataLossPreventionInspectTemplateName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf("Error reading InspectTemplate: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionInspectTemplateDescription(res["description"], d, config)); err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf("Error reading InspectTemplate: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionInspectTemplateDisplayName(res["displayName"], d, config)); err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf("Error reading InspectTemplate: %s", err) - } - if err := d.Set("inspect_config", flattenDataLossPreventionInspectTemplateInspectConfig(res["inspectConfig"], d, config)); err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf("Error reading InspectTemplate: %s", err) - } - - return nil -} - -func resourceDataLossPreventionInspectTemplateUpdate(d *resource_data_loss_prevention_inspect_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionInspectTemplateDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_inspect_template_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_inspect_template_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionInspectTemplateDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_inspect_template_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_inspect_template_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - inspectConfigProp, err := expandDataLossPreventionInspectTemplateInspectConfig(d.Get("inspect_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_config"); !isEmptyValue(resource_data_loss_prevention_inspect_template_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_inspect_template_reflect.DeepEqual(v, inspectConfigProp)) { - obj["inspectConfig"] = inspectConfigProp - } - - obj, err = resourceDataLossPreventionInspectTemplateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return err - } - - resource_data_loss_prevention_inspect_template_log.Printf("[DEBUG] Updating InspectTemplate %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("inspect_config") { - updateMask = append(updateMask, "inspectConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_loss_prevention_inspect_template_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_inspect_template_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_loss_prevention_inspect_template_fmt.Errorf("Error updating InspectTemplate %q: %s", d.Id(), err) - } else { - resource_data_loss_prevention_inspect_template_log.Printf("[DEBUG] Finished updating InspectTemplate %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionInspectTemplateRead(d, meta) -} - -func resourceDataLossPreventionInspectTemplateDelete(d *resource_data_loss_prevention_inspect_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_data_loss_prevention_inspect_template_log.Printf("[DEBUG] Deleting InspectTemplate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_inspect_template_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InspectTemplate") - } - - resource_data_loss_prevention_inspect_template_log.Printf("[DEBUG] Finished deleting InspectTemplate %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionInspectTemplateImport(d *resource_data_loss_prevention_inspect_template_schema.ResourceData, meta interface{}) ([]*resource_data_loss_prevention_inspect_template_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := resource_data_loss_prevention_inspect_template_strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, resource_data_loss_prevention_inspect_template_fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, resource_data_loss_prevention_inspect_template_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_data_loss_prevention_inspect_template_fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/inspectTemplate/{{name}}", d.Get("name").(string)) - } - - parts = parts[:len(parts)-2] - if err := d.Set("parent", resource_data_loss_prevention_inspect_template_strings.Join(parts, "/")); err != nil { - return nil, resource_data_loss_prevention_inspect_template_fmt.Errorf("Error setting parent: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/inspectTemplates/{{name}}") - if err != nil { - return nil, resource_data_loss_prevention_inspect_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_data_loss_prevention_inspect_template_schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionInspectTemplateName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionInspectTemplateDescription(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateDisplayName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfig(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["exclude_info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(original["excludeInfoTypes"], d, config) - transformed["include_quote"] = - flattenDataLossPreventionInspectTemplateInspectConfigIncludeQuote(original["includeQuote"], d, config) - transformed["min_likelihood"] = - flattenDataLossPreventionInspectTemplateInspectConfigMinLikelihood(original["minLikelihood"], d, config) - transformed["limits"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimits(original["limits"], d, config) - transformed["info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigInfoTypes(original["infoTypes"], d, config) - transformed["content_options"] = - flattenDataLossPreventionInspectTemplateInspectConfigContentOptions(original["contentOptions"], d, config) - transformed["rule_set"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSet(original["ruleSet"], d, config) - transformed["custom_info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(original["customInfoTypes"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigIncludeQuote(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigMinLikelihood(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimits(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_findings_per_item"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(original["maxFindingsPerItem"], d, config) - transformed["max_findings_per_request"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(original["maxFindingsPerRequest"], d, config) - transformed["max_findings_per_info_type"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(original["maxFindingsPerInfoType"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_inspect_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_inspect_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_type": flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["infoType"], d, config), - "max_findings": flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["maxFindings"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_inspect_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesName(original["name"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionInspectTemplateInspectConfigInfoTypesName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigContentOptions(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSet(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_types": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(original["infoTypes"], d, config), - "rules": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRules(original["rules"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(original["name"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRules(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "hotword_rule": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(original["hotwordRule"], d, config), - "exclusion_rule": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(original["exclusionRule"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hotword_regex"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotwordRegex"], d, config) - transformed["proximity"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) - transformed["likelihood_adjustment"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihoodAdjustment"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["window_before"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["windowBefore"], d, config) - transformed["window_after"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["windowAfter"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_inspect_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_inspect_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed_likelihood"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixedLikelihood"], d, config) - transformed["relative_likelihood"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relativeLikelihood"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_inspect_template_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["matching_type"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matchingType"], d, config) - transformed["dictionary"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) - transformed["regex"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) - transformed["exclude_info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["excludeInfoTypes"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["word_list"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["wordList"], d, config) - transformed["cloud_storage_path"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["words"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["info_types"] = - flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["infoTypes"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "info_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(original["infoType"], d, config), - "likelihood": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config), - "exclusion_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(original["exclusionType"], d, config), - "regex": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(original["regex"], d, config), - "dictionary": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config), - "stored_type": flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(original["storedType"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["word_list"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(original["wordList"], d, config) - transformed["cloud_storage_path"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["words"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d *resource_data_loss_prevention_inspect_template_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataLossPreventionInspectTemplateDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExcludeInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(original["exclude_info_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["excludeInfoTypes"] = transformedExcludeInfoTypes - } - - transformedIncludeQuote, err := expandDataLossPreventionInspectTemplateInspectConfigIncludeQuote(original["include_quote"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedIncludeQuote); val.IsValid() && !isEmptyValue(val) { - transformed["includeQuote"] = transformedIncludeQuote - } - - transformedMinLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigMinLikelihood(original["min_likelihood"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedMinLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["minLikelihood"] = transformedMinLikelihood - } - - transformedLimits, err := expandDataLossPreventionInspectTemplateInspectConfigLimits(original["limits"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedLimits); val.IsValid() && !isEmptyValue(val) { - transformed["limits"] = transformedLimits - } - - transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - transformedContentOptions, err := expandDataLossPreventionInspectTemplateInspectConfigContentOptions(original["content_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedContentOptions); val.IsValid() && !isEmptyValue(val) { - transformed["contentOptions"] = transformedContentOptions - } - - transformedRuleSet, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSet(original["rule_set"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedRuleSet); val.IsValid() && !isEmptyValue(val) { - transformed["ruleSet"] = transformedRuleSet - } - - transformedCustomInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(original["custom_info_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedCustomInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["customInfoTypes"] = transformedCustomInfoTypes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigExcludeInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigIncludeQuote(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigMinLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxFindingsPerItem, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(original["max_findings_per_item"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedMaxFindingsPerItem); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindingsPerItem"] = transformedMaxFindingsPerItem - } - - transformedMaxFindingsPerRequest, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(original["max_findings_per_request"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedMaxFindingsPerRequest); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindingsPerRequest"] = transformedMaxFindingsPerRequest - } - - transformedMaxFindingsPerInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(original["max_findings_per_info_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedMaxFindingsPerInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindingsPerInfoType"] = transformedMaxFindingsPerInfoType - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerItem(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(original["info_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["infoType"] = transformedInfoType - } - - transformedMaxFindings, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(original["max_findings"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedMaxFindings); val.IsValid() && !isEmptyValue(val) { - transformed["maxFindings"] = transformedMaxFindings - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigLimitsMaxFindingsPerInfoTypeMaxFindings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigContentOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - transformedRules, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRules(original["rules"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedRules); val.IsValid() && !isEmptyValue(val) { - transformed["rules"] = transformedRules - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHotwordRule, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(original["hotword_rule"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedHotwordRule); val.IsValid() && !isEmptyValue(val) { - transformed["hotwordRule"] = transformedHotwordRule - } - - transformedExclusionRule, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(original["exclusion_rule"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedExclusionRule); val.IsValid() && !isEmptyValue(val) { - transformed["exclusionRule"] = transformedExclusionRule - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHotwordRegex, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(original["hotword_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedHotwordRegex); val.IsValid() && !isEmptyValue(val) { - transformed["hotwordRegex"] = transformedHotwordRegex - } - - transformedProximity, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(original["proximity"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedProximity); val.IsValid() && !isEmptyValue(val) { - transformed["proximity"] = transformedProximity - } - - transformedLikelihoodAdjustment, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(original["likelihood_adjustment"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedLikelihoodAdjustment); val.IsValid() && !isEmptyValue(val) { - transformed["likelihoodAdjustment"] = transformedLikelihoodAdjustment - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleHotwordRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWindowBefore, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(original["window_before"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedWindowBefore); val.IsValid() && !isEmptyValue(val) { - transformed["windowBefore"] = transformedWindowBefore - } - - transformedWindowAfter, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(original["window_after"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedWindowAfter); val.IsValid() && !isEmptyValue(val) { - transformed["windowAfter"] = transformedWindowAfter - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowBefore(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleProximityWindowAfter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixedLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(original["fixed_likelihood"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedFixedLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["fixedLikelihood"] = transformedFixedLikelihood - } - - transformedRelativeLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(original["relative_likelihood"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedRelativeLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["relativeLikelihood"] = transformedRelativeLikelihood - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentFixedLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesHotwordRuleLikelihoodAdjustmentRelativeLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMatchingType, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(original["matching_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedMatchingType); val.IsValid() && !isEmptyValue(val) { - transformed["matchingType"] = transformedMatchingType - } - - transformedDictionary, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(original["dictionary"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedDictionary); val.IsValid() && !isEmptyValue(val) { - transformed["dictionary"] = transformedDictionary - } - - transformedRegex, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(original["regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedRegex); val.IsValid() && !isEmptyValue(val) { - transformed["regex"] = transformedRegex - } - - transformedExcludeInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(original["exclude_info_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedExcludeInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["excludeInfoTypes"] = transformedExcludeInfoTypes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleMatchingType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWordList, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(original["word_list"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedWordList); val.IsValid() && !isEmptyValue(val) { - transformed["wordList"] = transformedWordList - } - - transformedCloudStoragePath, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStoragePath"] = transformedCloudStoragePath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWords, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(original["words"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedWords); val.IsValid() && !isEmptyValue(val) { - transformed["words"] = transformedWords - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryWordListWords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleDictionaryCloudStoragePathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoTypes, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(original["info_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedInfoTypes); val.IsValid() && !isEmptyValue(val) { - transformed["infoTypes"] = transformedInfoTypes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigRuleSetRulesExclusionRuleExcludeInfoTypesInfoTypesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInfoType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(original["info_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedInfoType); val.IsValid() && !isEmptyValue(val) { - transformed["infoType"] = transformedInfoType - } - - transformedLikelihood, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(original["likelihood"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedLikelihood); val.IsValid() && !isEmptyValue(val) { - transformed["likelihood"] = transformedLikelihood - } - - transformedExclusionType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(original["exclusion_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedExclusionType); val.IsValid() && !isEmptyValue(val) { - transformed["exclusionType"] = transformedExclusionType - } - - transformedRegex, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(original["regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedRegex); val.IsValid() && !isEmptyValue(val) { - transformed["regex"] = transformedRegex - } - - transformedDictionary, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(original["dictionary"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedDictionary); val.IsValid() && !isEmptyValue(val) { - transformed["dictionary"] = transformedDictionary - } - - transformedStoredType, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(original["stored_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedStoredType); val.IsValid() && !isEmptyValue(val) { - transformed["storedType"] = transformedStoredType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesInfoTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesLikelihood(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesExclusionType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWordList, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(original["word_list"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedWordList); val.IsValid() && !isEmptyValue(val) { - transformed["wordList"] = transformedWordList - } - - transformedCloudStoragePath, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStoragePath"] = transformedCloudStoragePath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWords, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(original["words"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedWords); val.IsValid() && !isEmptyValue(val) { - transformed["words"] = transformedWords - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryWordListWords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesDictionaryCloudStoragePathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_inspect_template_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionInspectTemplateInspectConfigCustomInfoTypesStoredTypeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionInspectTemplateEncoder(d *resource_data_loss_prevention_inspect_template_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["inspectTemplate"] = obj - return newObj, nil -} - -func resourceDataLossPreventionJobTrigger() *resource_data_loss_prevention_job_trigger_schema.Resource { - return &resource_data_loss_prevention_job_trigger_schema.Resource{ - Create: resourceDataLossPreventionJobTriggerCreate, - Read: resourceDataLossPreventionJobTriggerRead, - Update: resourceDataLossPreventionJobTriggerUpdate, - Delete: resourceDataLossPreventionJobTriggerDelete, - - Importer: &resource_data_loss_prevention_job_trigger_schema.ResourceImporter{ - State: resourceDataLossPreventionJobTriggerImport, - }, - - Timeouts: &resource_data_loss_prevention_job_trigger_schema.ResourceTimeout{ - Create: resource_data_loss_prevention_job_trigger_schema.DefaultTimeout(4 * resource_data_loss_prevention_job_trigger_time.Minute), - Update: resource_data_loss_prevention_job_trigger_schema.DefaultTimeout(4 * resource_data_loss_prevention_job_trigger_time.Minute), - Delete: resource_data_loss_prevention_job_trigger_schema.DefaultTimeout(4 * resource_data_loss_prevention_job_trigger_time.Minute), - }, - - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "parent": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the trigger, either in the format 'projects/{{project}}' -or 'projects/{{project}}/locations/{{location}}'`, - }, - "triggers": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `What event needs to occur for a new job to be started.`, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "schedule": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `Schedule for triggered jobs`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "recurrence_period_duration": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `With this option a job is started a regular periodic basis. For example: every day (86400 seconds). - -A scheduled start time will be skipped if the previous execution has not ended when its scheduled time occurs. - -This value must be set to a time duration greater than or equal to 1 day and can be no longer than 60 days. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - }, - }, - }, - }, - }, - }, - "description": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `A description of the job trigger.`, - }, - "display_name": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `User set display name of the job trigger.`, - }, - "inspect_job": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `Controls what and how to inspect for findings.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "actions": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `A task to execute on the completion of a job.`, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "save_findings": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Schedule for triggered jobs`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "output_config": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Information on where to store output`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "table": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Information on the location of the target BigQuery Table.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "dataset_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `Dataset ID of the table.`, - }, - "project_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The Google Cloud Platform project ID of the project containing the table.`, - }, - "table_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `Name of the table. If is not set a new one will be generated for you with the following format: -'dlp_googleapis_yyyy_mm_dd_[dlp_job_id]'. Pacific timezone will be used for generating the date details.`, - }, - }, - }, - }, - "output_schema": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_job_trigger_validation.StringInSlice([]string{"BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS", ""}, false), - Description: `Schema used for writing the findings for Inspect jobs. This field is only used for -Inspect and must be unspecified for Risk jobs. Columns are derived from the Finding -object. If appending to an existing table, any columns from the predefined schema -that are missing will be added. No columns in the existing table will be deleted. - -If unspecified, then all available columns will be used for a new table or an (existing) -table with no schema, and no changes will be made to an existing table that has a schema. -Only for use with external storage. Possible values: ["BASIC_COLUMNS", "GCS_COLUMNS", "DATASTORE_COLUMNS", "BIG_QUERY_COLUMNS", "ALL_COLUMNS"]`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "inspect_template_name": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The name of the template to run when this job is triggered.`, - }, - "storage_config": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Information on where to inspect`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "big_query_options": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `Options defining BigQuery table and row identifiers.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "table_reference": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Set of files to scan.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "dataset_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The dataset ID of the table.`, - }, - "project_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The Google Cloud Platform project ID of the project containing the table.`, - }, - "table_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The name of the table.`, - }, - }, - }, - }, - }, - }, - }, - "cloud_storage_options": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `Options defining a file or a set of files within a Google Cloud Storage bucket.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "file_set": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Set of files to scan.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "regex_file_set": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `The regex-filtered set of files to scan.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "bucket_name": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The name of a Cloud Storage bucket.`, - }, - "exclude_regex": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `A list of regular expressions matching file paths to exclude. All files in the bucket that match at -least one of these regular expressions will be excluded from the scan.`, - Elem: &resource_data_loss_prevention_job_trigger_schema.Schema{ - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - }, - }, - "include_regex": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `A list of regular expressions matching file paths to include. All files in the bucket -that match at least one of these regular expressions will be included in the set of files, -except for those that also match an item in excludeRegex. Leaving this field empty will -match all files by default (this is equivalent to including .* in the list)`, - Elem: &resource_data_loss_prevention_job_trigger_schema.Schema{ - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - }, - }, - }, - }, - ExactlyOneOf: []string{"inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.url", "inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.regex_file_set"}, - }, - "url": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `The Cloud Storage url of the file(s) to scan, in the format 'gs:///'. Trailing wildcard -in the path is allowed. - -If the url ends in a trailing slash, the bucket or directory represented by the url will be scanned -non-recursively (content in sub-directories will not be scanned). This means that 'gs://mybucket/' is -equivalent to 'gs://mybucket/*', and 'gs://mybucket/directory/' is equivalent to 'gs://mybucket/directory/*'.`, - ExactlyOneOf: []string{"inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.url", "inspect_job.0.storage_config.0.cloud_storage_options.0.file_set.0.regex_file_set"}, - }, - }, - }, - }, - "bytes_limit_per_file": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeInt, - Optional: true, - Description: `Max number of bytes to scan from a file. If a scanned file's size is bigger than this value -then the rest of the bytes are omitted.`, - }, - "bytes_limit_per_file_percent": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeInt, - Optional: true, - Description: `Max percentage of bytes to scan from a file. The rest are omitted. The number of bytes scanned is rounded down. -Must be between 0 and 100, inclusively. Both 0 and 100 means no limit.`, - }, - "file_types": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `List of file type groups to include in the scan. If empty, all files are scanned and available data -format processors are applied. In addition, the binary content of the selected files is always scanned as well. -Images are scanned only as binary if the specified region does not support image inspection and no fileTypes were specified. Possible values: ["BINARY_FILE", "TEXT_FILE", "IMAGE", "WORD", "PDF", "AVRO", "CSV", "TSV"]`, - Elem: &resource_data_loss_prevention_job_trigger_schema.Schema{ - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - ValidateFunc: resource_data_loss_prevention_job_trigger_validation.StringInSlice([]string{"BINARY_FILE", "TEXT_FILE", "IMAGE", "WORD", "PDF", "AVRO", "CSV", "TSV"}, false), - }, - }, - "files_limit_percent": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeInt, - Optional: true, - Description: `Limits the number of files to scan to this percentage of the input FileSet. Number of files scanned is rounded down. -Must be between 0 and 100, inclusively. Both 0 and 100 means no limit.`, - }, - "sample_method": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_job_trigger_validation.StringInSlice([]string{"TOP", "RANDOM_START", ""}, false), - Description: `How to sample bytes if not all bytes are scanned. Meaningful only when used in conjunction with bytesLimitPerFile. -If not specified, scanning would start from the top. Possible values: ["TOP", "RANDOM_START"]`, - }, - }, - }, - }, - "datastore_options": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `Options defining a data set within Google Cloud Datastore.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "kind": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `A representation of a Datastore kind.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The name of the Datastore kind.`, - }, - }, - }, - }, - "partition_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Datastore partition ID. A partition ID identifies a grouping of entities. The grouping -is always by project and namespace, however the namespace ID may be empty.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "project_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `The ID of the project to which the entities belong.`, - }, - "namespace_id": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `If not empty, the ID of the namespace to which the entities belong.`, - }, - }, - }, - }, - }, - }, - }, - "timespan_config": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Optional: true, - Description: `Information on where to inspect`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "timestamp_field": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeList, - Required: true, - Description: `Information on where to inspect`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_job_trigger_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_job_trigger_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Required: true, - Description: `Specification of the field containing the timestamp of scanned items. Used for data sources like Datastore and BigQuery. - -For BigQuery: Required to filter out rows based on the given start and end times. If not specified and the table was -modified between the given start and end times, the entire table will be scanned. The valid data types of the timestamp -field are: INTEGER, DATE, TIMESTAMP, or DATETIME BigQuery column. - -For Datastore. Valid data types of the timestamp field are: TIMESTAMP. Datastore entity will be scanned if the -timestamp property does not exist or its value is empty or invalid.`, - }, - }, - }, - }, - "enable_auto_population_of_timespan_config": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeBool, - Optional: true, - Description: `When the job is started by a JobTrigger we will automatically figure out a valid startTime to avoid -scanning files that have not been modified since the last time the JobTrigger executed. This will -be based on the time of the execution of the last run of the JobTrigger.`, - }, - "end_time": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `Exclude files or rows newer than this value. If set to zero, no upper time limit is applied.`, - AtLeastOneOf: []string{"inspect_job.0.storage_config.0.timespan_config.0.start_time", "inspect_job.0.storage_config.0.timespan_config.0.end_time"}, - }, - "start_time": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - Description: `Exclude files or rows older than this value.`, - AtLeastOneOf: []string{"inspect_job.0.storage_config.0.timespan_config.0.start_time", "inspect_job.0.storage_config.0.timespan_config.0.end_time"}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "status": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Optional: true, - ValidateFunc: resource_data_loss_prevention_job_trigger_validation.StringInSlice([]string{"PAUSED", "HEALTHY", "CANCELLED", ""}, false), - Description: `Whether the trigger is currently active. Default value: "HEALTHY" Possible values: ["PAUSED", "HEALTHY", "CANCELLED"]`, - Default: "HEALTHY", - }, - "last_run_time": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Computed: true, - Description: `The timestamp of the last time this trigger executed.`, - }, - "name": { - Type: resource_data_loss_prevention_job_trigger_schema.TypeString, - Computed: true, - Description: `The resource name of the job trigger. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionJobTriggerCreate(d *resource_data_loss_prevention_job_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionJobTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(descriptionProp)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionJobTriggerDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(displayNameProp)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - statusProp, err := expandDataLossPreventionJobTriggerStatus(d.Get("status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(statusProp)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, statusProp)) { - obj["status"] = statusProp - } - triggersProp, err := expandDataLossPreventionJobTriggerTriggers(d.Get("triggers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("triggers"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(triggersProp)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, triggersProp)) { - obj["triggers"] = triggersProp - } - inspectJobProp, err := expandDataLossPreventionJobTriggerInspectJob(d.Get("inspect_job"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_job"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(inspectJobProp)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, inspectJobProp)) { - obj["inspectJob"] = inspectJobProp - } - - obj, err = resourceDataLossPreventionJobTriggerEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers") - if err != nil { - return err - } - - resource_data_loss_prevention_job_trigger_log.Printf("[DEBUG] Creating new JobTrigger: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_job_trigger_schema.TimeoutCreate)) - if err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error creating JobTrigger: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionJobTriggerName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/jobTriggers/{{name}}") - if err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_data_loss_prevention_job_trigger_log.Printf("[DEBUG] Finished creating JobTrigger %q: %#v", d.Id(), res) - - return resourceDataLossPreventionJobTriggerRead(d, meta) -} - -func resourceDataLossPreventionJobTriggerRead(d *resource_data_loss_prevention_job_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_loss_prevention_job_trigger_fmt.Sprintf("DataLossPreventionJobTrigger %q", d.Id())) - } - - if err := d.Set("name", flattenDataLossPreventionJobTriggerName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionJobTriggerDescription(res["description"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionJobTriggerDisplayName(res["displayName"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("last_run_time", flattenDataLossPreventionJobTriggerLastRunTime(res["lastRunTime"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("status", flattenDataLossPreventionJobTriggerStatus(res["status"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("triggers", flattenDataLossPreventionJobTriggerTriggers(res["triggers"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error reading JobTrigger: %s", err) - } - if err := d.Set("inspect_job", flattenDataLossPreventionJobTriggerInspectJob(res["inspectJob"], d, config)); err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error reading JobTrigger: %s", err) - } - - return nil -} - -func resourceDataLossPreventionJobTriggerUpdate(d *resource_data_loss_prevention_job_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionJobTriggerDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionJobTriggerDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - statusProp, err := expandDataLossPreventionJobTriggerStatus(d.Get("status"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("status"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, statusProp)) { - obj["status"] = statusProp - } - triggersProp, err := expandDataLossPreventionJobTriggerTriggers(d.Get("triggers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("triggers"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, triggersProp)) { - obj["triggers"] = triggersProp - } - inspectJobProp, err := expandDataLossPreventionJobTriggerInspectJob(d.Get("inspect_job"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("inspect_job"); !isEmptyValue(resource_data_loss_prevention_job_trigger_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_job_trigger_reflect.DeepEqual(v, inspectJobProp)) { - obj["inspectJob"] = inspectJobProp - } - - obj, err = resourceDataLossPreventionJobTriggerEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") - if err != nil { - return err - } - - resource_data_loss_prevention_job_trigger_log.Printf("[DEBUG] Updating JobTrigger %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("status") { - updateMask = append(updateMask, "status") - } - - if d.HasChange("triggers") { - updateMask = append(updateMask, "triggers") - } - - if d.HasChange("inspect_job") { - updateMask = append(updateMask, "inspectJob") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_loss_prevention_job_trigger_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_job_trigger_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_loss_prevention_job_trigger_fmt.Errorf("Error updating JobTrigger %q: %s", d.Id(), err) - } else { - resource_data_loss_prevention_job_trigger_log.Printf("[DEBUG] Finished updating JobTrigger %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionJobTriggerRead(d, meta) -} - -func resourceDataLossPreventionJobTriggerDelete(d *resource_data_loss_prevention_job_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/jobTriggers/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_data_loss_prevention_job_trigger_log.Printf("[DEBUG] Deleting JobTrigger %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_job_trigger_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "JobTrigger") - } - - resource_data_loss_prevention_job_trigger_log.Printf("[DEBUG] Finished deleting JobTrigger %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionJobTriggerImport(d *resource_data_loss_prevention_job_trigger_schema.ResourceData, meta interface{}) ([]*resource_data_loss_prevention_job_trigger_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := resource_data_loss_prevention_job_trigger_strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, resource_data_loss_prevention_job_trigger_fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, resource_data_loss_prevention_job_trigger_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_data_loss_prevention_job_trigger_fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/jobTrigger/{{name}}", d.Get("name").(string)) - } - - parts = parts[:len(parts)-2] - if err := d.Set("parent", resource_data_loss_prevention_job_trigger_strings.Join(parts, "/")); err != nil { - return nil, resource_data_loss_prevention_job_trigger_fmt.Errorf("Error setting parent: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/jobTriggers/{{name}}") - if err != nil { - return nil, resource_data_loss_prevention_job_trigger_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_data_loss_prevention_job_trigger_schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionJobTriggerName(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionJobTriggerDescription(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerDisplayName(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerLastRunTime(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerStatus(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerTriggers(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "schedule": flattenDataLossPreventionJobTriggerTriggersSchedule(original["schedule"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionJobTriggerTriggersSchedule(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["recurrence_period_duration"] = - flattenDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(original["recurrencePeriodDuration"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJob(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["inspect_template_name"] = - flattenDataLossPreventionJobTriggerInspectJobInspectTemplateName(original["inspectTemplateName"], d, config) - transformed["storage_config"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfig(original["storageConfig"], d, config) - transformed["actions"] = - flattenDataLossPreventionJobTriggerInspectJobActions(original["actions"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobInspectTemplateName(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfig(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["timespan_config"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(original["timespanConfig"], d, config) - transformed["datastore_options"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(original["datastoreOptions"], d, config) - transformed["cloud_storage_options"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(original["cloudStorageOptions"], d, config) - transformed["big_query_options"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(original["bigQueryOptions"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["start_time"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(original["endTime"], d, config) - transformed["enable_auto_population_of_timespan_config"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(original["enableAutoPopulationOfTimespanConfig"], d, config) - transformed["timestamp_field"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(original["timestampField"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["partition_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(original["partitionId"], d, config) - transformed["kind"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(original["kind"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(original["projectId"], d, config) - transformed["namespace_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(original["namespaceId"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["file_set"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(original["fileSet"], d, config) - transformed["bytes_limit_per_file"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(original["bytesLimitPerFile"], d, config) - transformed["bytes_limit_per_file_percent"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(original["bytesLimitPerFilePercent"], d, config) - transformed["files_limit_percent"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(original["filesLimitPercent"], d, config) - transformed["file_types"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(original["fileTypes"], d, config) - transformed["sample_method"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(original["sampleMethod"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(original["url"], d, config) - transformed["regex_file_set"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(original["regexFileSet"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket_name"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(original["bucketName"], d, config) - transformed["include_regex"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(original["includeRegex"], d, config) - transformed["exclude_regex"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(original["excludeRegex"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_job_trigger_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_job_trigger_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_data_loss_prevention_job_trigger_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["table_reference"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(original["tableReference"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(original["projectId"], d, config) - transformed["dataset_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(original["datasetId"], d, config) - transformed["table_id"] = - flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(original["tableId"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActions(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "save_findings": flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindings(original["saveFindings"], d, config), - }) - } - return transformed -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindings(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["output_config"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(original["outputConfig"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["table"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(original["table"], d, config) - transformed["output_schema"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["outputSchema"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(original["projectId"], d, config) - transformed["dataset_id"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(original["datasetId"], d, config) - transformed["table_id"] = - flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(original["tableId"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d *resource_data_loss_prevention_job_trigger_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataLossPreventionJobTriggerDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerStatus(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerTriggers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchedule, err := expandDataLossPreventionJobTriggerTriggersSchedule(original["schedule"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedSchedule); val.IsValid() && !isEmptyValue(val) { - transformed["schedule"] = transformedSchedule - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionJobTriggerTriggersSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRecurrencePeriodDuration, err := expandDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(original["recurrence_period_duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedRecurrencePeriodDuration); val.IsValid() && !isEmptyValue(val) { - transformed["recurrencePeriodDuration"] = transformedRecurrencePeriodDuration - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerTriggersScheduleRecurrencePeriodDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJob(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInspectTemplateName, err := expandDataLossPreventionJobTriggerInspectJobInspectTemplateName(original["inspect_template_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedInspectTemplateName); val.IsValid() && !isEmptyValue(val) { - transformed["inspectTemplateName"] = transformedInspectTemplateName - } - - transformedStorageConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfig(original["storage_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedStorageConfig); val.IsValid() && !isEmptyValue(val) { - transformed["storageConfig"] = transformedStorageConfig - } - - transformedActions, err := expandDataLossPreventionJobTriggerInspectJobActions(original["actions"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedActions); val.IsValid() && !isEmptyValue(val) { - transformed["actions"] = transformedActions - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobInspectTemplateName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTimespanConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(original["timespan_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedTimespanConfig); val.IsValid() && !isEmptyValue(val) { - transformed["timespanConfig"] = transformedTimespanConfig - } - - transformedDatastoreOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(original["datastore_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedDatastoreOptions); val.IsValid() && !isEmptyValue(val) { - transformed["datastoreOptions"] = transformedDatastoreOptions - } - - transformedCloudStorageOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(original["cloud_storage_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedCloudStorageOptions); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStorageOptions"] = transformedCloudStorageOptions - } - - transformedBigQueryOptions, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(original["big_query_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedBigQueryOptions); val.IsValid() && !isEmptyValue(val) { - transformed["bigQueryOptions"] = transformedBigQueryOptions - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - transformedEnableAutoPopulationOfTimespanConfig, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(original["enable_auto_population_of_timespan_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedEnableAutoPopulationOfTimespanConfig); val.IsValid() && !isEmptyValue(val) { - transformed["enableAutoPopulationOfTimespanConfig"] = transformedEnableAutoPopulationOfTimespanConfig - } - - transformedTimestampField, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(original["timestamp_field"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedTimestampField); val.IsValid() && !isEmptyValue(val) { - transformed["timestampField"] = transformedTimestampField - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigEnableAutoPopulationOfTimespanConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigTimespanConfigTimestampFieldName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPartitionId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(original["partition_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedPartitionId); val.IsValid() && !isEmptyValue(val) { - transformed["partitionId"] = transformedPartitionId - } - - transformedKind, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(original["kind"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedKind); val.IsValid() && !isEmptyValue(val) { - transformed["kind"] = transformedKind - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedNamespaceId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(original["namespace_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedNamespaceId); val.IsValid() && !isEmptyValue(val) { - transformed["namespaceId"] = transformedNamespaceId - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsPartitionIdNamespaceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigDatastoreOptionsKindName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFileSet, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(original["file_set"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedFileSet); val.IsValid() && !isEmptyValue(val) { - transformed["fileSet"] = transformedFileSet - } - - transformedBytesLimitPerFile, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(original["bytes_limit_per_file"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedBytesLimitPerFile); val.IsValid() && !isEmptyValue(val) { - transformed["bytesLimitPerFile"] = transformedBytesLimitPerFile - } - - transformedBytesLimitPerFilePercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(original["bytes_limit_per_file_percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedBytesLimitPerFilePercent); val.IsValid() && !isEmptyValue(val) { - transformed["bytesLimitPerFilePercent"] = transformedBytesLimitPerFilePercent - } - - transformedFilesLimitPercent, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(original["files_limit_percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedFilesLimitPercent); val.IsValid() && !isEmptyValue(val) { - transformed["filesLimitPercent"] = transformedFilesLimitPercent - } - - transformedFileTypes, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(original["file_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedFileTypes); val.IsValid() && !isEmptyValue(val) { - transformed["fileTypes"] = transformedFileTypes - } - - transformedSampleMethod, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(original["sample_method"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedSampleMethod); val.IsValid() && !isEmptyValue(val) { - transformed["sampleMethod"] = transformedSampleMethod - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - transformedRegexFileSet, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(original["regex_file_set"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedRegexFileSet); val.IsValid() && !isEmptyValue(val) { - transformed["regexFileSet"] = transformedRegexFileSet - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucketName, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(original["bucket_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedBucketName); val.IsValid() && !isEmptyValue(val) { - transformed["bucketName"] = transformedBucketName - } - - transformedIncludeRegex, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(original["include_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedIncludeRegex); val.IsValid() && !isEmptyValue(val) { - transformed["includeRegex"] = transformedIncludeRegex - } - - transformedExcludeRegex, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(original["exclude_regex"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedExcludeRegex); val.IsValid() && !isEmptyValue(val) { - transformed["excludeRegex"] = transformedExcludeRegex - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetBucketName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetIncludeRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileSetRegexFileSetExcludeRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFile(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsBytesLimitPerFilePercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFilesLimitPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsFileTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigCloudStorageOptionsSampleMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTableReference, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(original["table_reference"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedTableReference); val.IsValid() && !isEmptyValue(val) { - transformed["tableReference"] = transformedTableReference - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId, err := expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobStorageConfigBigQueryOptionsTableReferenceTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSaveFindings, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindings(original["save_findings"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedSaveFindings); val.IsValid() && !isEmptyValue(val) { - transformed["saveFindings"] = transformedSaveFindings - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOutputConfig, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(original["output_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedOutputConfig); val.IsValid() && !isEmptyValue(val) { - transformed["outputConfig"] = transformedOutputConfig - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTable, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(original["table"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { - transformed["table"] = transformedTable - } - - transformedOutputSchema, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(original["output_schema"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedOutputSchema); val.IsValid() && !isEmptyValue(val) { - transformed["outputSchema"] = transformedOutputSchema - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId, err := expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_job_trigger_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigTableTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionJobTriggerInspectJobActionsSaveFindingsOutputConfigOutputSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionJobTriggerEncoder(d *resource_data_loss_prevention_job_trigger_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["jobTrigger"] = obj - return newObj, nil -} - -func resourceDataLossPreventionStoredInfoType() *resource_data_loss_prevention_stored_info_type_schema.Resource { - return &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Create: resourceDataLossPreventionStoredInfoTypeCreate, - Read: resourceDataLossPreventionStoredInfoTypeRead, - Update: resourceDataLossPreventionStoredInfoTypeUpdate, - Delete: resourceDataLossPreventionStoredInfoTypeDelete, - - Importer: &resource_data_loss_prevention_stored_info_type_schema.ResourceImporter{ - State: resourceDataLossPreventionStoredInfoTypeImport, - }, - - Timeouts: &resource_data_loss_prevention_stored_info_type_schema.ResourceTimeout{ - Create: resource_data_loss_prevention_stored_info_type_schema.DefaultTimeout(4 * resource_data_loss_prevention_stored_info_type_time.Minute), - Update: resource_data_loss_prevention_stored_info_type_schema.DefaultTimeout(4 * resource_data_loss_prevention_stored_info_type_time.Minute), - Delete: resource_data_loss_prevention_stored_info_type_schema.DefaultTimeout(4 * resource_data_loss_prevention_stored_info_type_time.Minute), - }, - - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "parent": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent of the info type in any of the following formats: - -* 'projects/{{project}}' -* 'projects/{{project}}/locations/{{location}}' -* 'organizations/{{organization_id}}' -* 'organizations/{{organization_id}}/locations/{{location}}'`, - }, - "description": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Optional: true, - Description: `A description of the info type.`, - }, - "dictionary": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "cloud_storage_path": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - Description: `Newline-delimited file of words in Cloud Storage. Only a single file is accepted.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "path": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - ExactlyOneOf: []string{"dictionary.0.word_list", "dictionary.0.cloud_storage_path"}, - }, - "word_list": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - Description: `List of words or phrases to search for.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "words": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Required: true, - Description: `Words or phrases defining the dictionary. The dictionary must contain at least one -phrase and every phrase must contain at least 2 characters that are letters or digits.`, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Schema{ - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - }, - }, - }, - }, - ExactlyOneOf: []string{"dictionary.0.word_list", "dictionary.0.cloud_storage_path"}, - }, - }, - }, - ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, - }, - "display_name": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Optional: true, - Description: `User set display name of the info type.`, - }, - "large_custom_dictionary": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Dictionary which defines the rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "output_path": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Required: true, - Description: `Location to store dictionary artifacts in Google Cloud Storage. These files will only be accessible by project owners and the DLP API. -If any of these artifacts are modified, the dictionary is considered invalid and can no longer be used.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "path": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `A url representing a file or path (no wildcards) in Cloud Storage. Example: 'gs://[BUCKET_NAME]/dictionary.txt'`, - }, - }, - }, - }, - "big_query_field": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - Description: `Field in a BigQuery table where each cell represents a dictionary phrase.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "field": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Required: true, - Description: `Designated field in the BigQuery table.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "name": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `Name describing the field.`, - }, - }, - }, - }, - "table": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Required: true, - Description: `Field in a BigQuery table where each cell represents a dictionary phrase.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "dataset_id": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `The dataset ID of the table.`, - }, - "project_id": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `The Google Cloud Platform project ID of the project containing the table.`, - }, - "table_id": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `The name of the table.`, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"large_custom_dictionary.0.cloud_storage_file_set", "large_custom_dictionary.0.big_query_field"}, - }, - "cloud_storage_file_set": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - Description: `Set of files containing newline-delimited lists of dictionary phrases.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "url": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `The url, in the format 'gs:///'. Trailing wildcard in the path is allowed.`, - }, - }, - }, - ExactlyOneOf: []string{"large_custom_dictionary.0.cloud_storage_file_set", "large_custom_dictionary.0.big_query_field"}, - }, - }, - }, - ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, - }, - "regex": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Regular expression which defines the rule.`, - MaxItems: 1, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Resource{ - Schema: map[string]*resource_data_loss_prevention_stored_info_type_schema.Schema{ - "pattern": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Required: true, - Description: `Pattern defining the regular expression. -Its syntax (https://github.com/google/re2/wiki/Syntax) can be found under the google/re2 repository on GitHub.`, - }, - "group_indexes": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The index of the submatch to extract as findings. When not specified, the entire match is returned. No more than 3 may be included.`, - Elem: &resource_data_loss_prevention_stored_info_type_schema.Schema{ - Type: resource_data_loss_prevention_stored_info_type_schema.TypeInt, - }, - }, - }, - }, - ExactlyOneOf: []string{"dictionary", "regex", "large_custom_dictionary"}, - }, - "name": { - Type: resource_data_loss_prevention_stored_info_type_schema.TypeString, - Computed: true, - Description: `The resource name of the info type. Set by the server.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataLossPreventionStoredInfoTypeCreate(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionStoredInfoTypeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_stored_info_type_reflect.ValueOf(descriptionProp)) && (ok || !resource_data_loss_prevention_stored_info_type_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionStoredInfoTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_stored_info_type_reflect.ValueOf(displayNameProp)) && (ok || !resource_data_loss_prevention_stored_info_type_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - regexProp, err := expandDataLossPreventionStoredInfoTypeRegex(d.Get("regex"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("regex"); !isEmptyValue(resource_data_loss_prevention_stored_info_type_reflect.ValueOf(regexProp)) && (ok || !resource_data_loss_prevention_stored_info_type_reflect.DeepEqual(v, regexProp)) { - obj["regex"] = regexProp - } - dictionaryProp, err := expandDataLossPreventionStoredInfoTypeDictionary(d.Get("dictionary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dictionary"); !isEmptyValue(resource_data_loss_prevention_stored_info_type_reflect.ValueOf(dictionaryProp)) && (ok || !resource_data_loss_prevention_stored_info_type_reflect.DeepEqual(v, dictionaryProp)) { - obj["dictionary"] = dictionaryProp - } - largeCustomDictionaryProp, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionary(d.Get("large_custom_dictionary"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("large_custom_dictionary"); !isEmptyValue(resource_data_loss_prevention_stored_info_type_reflect.ValueOf(largeCustomDictionaryProp)) && (ok || !resource_data_loss_prevention_stored_info_type_reflect.DeepEqual(v, largeCustomDictionaryProp)) { - obj["largeCustomDictionary"] = largeCustomDictionaryProp - } - - obj, err = resourceDataLossPreventionStoredInfoTypeEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes") - if err != nil { - return err - } - - resource_data_loss_prevention_stored_info_type_log.Printf("[DEBUG] Creating new StoredInfoType: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_stored_info_type_schema.TimeoutCreate)) - if err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error creating StoredInfoType: %s", err) - } - if err := d.Set("name", flattenDataLossPreventionStoredInfoTypeName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceDataLossPreventionStoredInfoTypePollRead(d, meta), PollCheckForExistence, "Creating StoredInfoType", d.Timeout(resource_data_loss_prevention_stored_info_type_schema.TimeoutCreate), 1) - if err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error waiting to create StoredInfoType: %s", err) - } - - resource_data_loss_prevention_stored_info_type_log.Printf("[DEBUG] Finished creating StoredInfoType %q: %#v", d.Id(), res) - - return resourceDataLossPreventionStoredInfoTypeRead(d, meta) -} - -func resourceDataLossPreventionStoredInfoTypePollRead(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = resourceDataLossPreventionStoredInfoTypeDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - - return nil, &resource_data_loss_prevention_stored_info_type_googleapi.Error{ - Code: 404, - Message: "could not find object DataLossPreventionStoredInfoType", - } - } - - return res, nil - } -} - -func resourceDataLossPreventionStoredInfoTypeRead(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_data_loss_prevention_stored_info_type_fmt.Sprintf("DataLossPreventionStoredInfoType %q", d.Id())) - } - - res, err = resourceDataLossPreventionStoredInfoTypeDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_data_loss_prevention_stored_info_type_log.Printf("[DEBUG] Removing DataLossPreventionStoredInfoType because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenDataLossPreventionStoredInfoTypeName(res["name"], d, config)); err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("description", flattenDataLossPreventionStoredInfoTypeDescription(res["description"], d, config)); err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("display_name", flattenDataLossPreventionStoredInfoTypeDisplayName(res["displayName"], d, config)); err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("regex", flattenDataLossPreventionStoredInfoTypeRegex(res["regex"], d, config)); err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("dictionary", flattenDataLossPreventionStoredInfoTypeDictionary(res["dictionary"], d, config)); err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error reading StoredInfoType: %s", err) - } - if err := d.Set("large_custom_dictionary", flattenDataLossPreventionStoredInfoTypeLargeCustomDictionary(res["largeCustomDictionary"], d, config)); err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error reading StoredInfoType: %s", err) - } - - return nil -} - -func resourceDataLossPreventionStoredInfoTypeUpdate(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandDataLossPreventionStoredInfoTypeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_data_loss_prevention_stored_info_type_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_stored_info_type_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandDataLossPreventionStoredInfoTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_data_loss_prevention_stored_info_type_reflect.ValueOf(v)) && (ok || !resource_data_loss_prevention_stored_info_type_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - obj, err = resourceDataLossPreventionStoredInfoTypeEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return err - } - - resource_data_loss_prevention_stored_info_type_log.Printf("[DEBUG] Updating StoredInfoType %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_data_loss_prevention_stored_info_type_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_stored_info_type_schema.TimeoutUpdate)) - - if err != nil { - return resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error updating StoredInfoType %q: %s", d.Id(), err) - } else { - resource_data_loss_prevention_stored_info_type_log.Printf("[DEBUG] Finished updating StoredInfoType %q: %#v", d.Id(), res) - } - - return resourceDataLossPreventionStoredInfoTypeRead(d, meta) -} - -func resourceDataLossPreventionStoredInfoTypeDelete(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DataLossPreventionBasePath}}{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_data_loss_prevention_stored_info_type_log.Printf("[DEBUG] Deleting StoredInfoType %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_data_loss_prevention_stored_info_type_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "StoredInfoType") - } - - resource_data_loss_prevention_stored_info_type_log.Printf("[DEBUG] Finished deleting StoredInfoType %q: %#v", d.Id(), res) - return nil -} - -func resourceDataLossPreventionStoredInfoTypeImport(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}) ([]*resource_data_loss_prevention_stored_info_type_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - parts := resource_data_loss_prevention_stored_info_type_strings.Split(d.Get("name").(string), "/") - if len(parts) == 6 { - if err := d.Set("name", parts[5]); err != nil { - return nil, resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("name", parts[3]); err != nil { - return nil, resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_data_loss_prevention_stored_info_type_fmt.Errorf("Unexpected import id: %s, expected form {{parent}}/storedInfoType/{{name}}", d.Get("name").(string)) - } - - parts = parts[:len(parts)-2] - if err := d.Set("parent", resource_data_loss_prevention_stored_info_type_strings.Join(parts, "/")); err != nil { - return nil, resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error setting parent: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/storedInfoTypes/{{name}}") - if err != nil { - return nil, resource_data_loss_prevention_stored_info_type_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_data_loss_prevention_stored_info_type_schema.ResourceData{d}, nil -} - -func flattenDataLossPreventionStoredInfoTypeName(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDataLossPreventionStoredInfoTypeDescription(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeDisplayName(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeRegex(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pattern"] = - flattenDataLossPreventionStoredInfoTypeRegexPattern(original["pattern"], d, config) - transformed["group_indexes"] = - flattenDataLossPreventionStoredInfoTypeRegexGroupIndexes(original["groupIndexes"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeRegexPattern(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeRegexGroupIndexes(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeDictionary(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["word_list"] = - flattenDataLossPreventionStoredInfoTypeDictionaryWordList(original["wordList"], d, config) - transformed["cloud_storage_path"] = - flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(original["cloudStoragePath"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeDictionaryWordList(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["words"] = - flattenDataLossPreventionStoredInfoTypeDictionaryWordListWords(original["words"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeDictionaryWordListWords(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(original["path"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionary(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["output_path"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(original["outputPath"], d, config) - transformed["cloud_storage_file_set"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(original["cloudStorageFileSet"], d, config) - transformed["big_query_field"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(original["bigQueryField"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(original["path"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["url"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(original["url"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["table"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(original["table"], d, config) - transformed["field"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(original["field"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_id"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(original["projectId"], d, config) - transformed["dataset_id"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(original["datasetId"], d, config) - transformed["table_id"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(original["tableId"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(v interface{}, d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataLossPreventionStoredInfoTypeDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeRegex(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPattern, err := expandDataLossPreventionStoredInfoTypeRegexPattern(original["pattern"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedPattern); val.IsValid() && !isEmptyValue(val) { - transformed["pattern"] = transformedPattern - } - - transformedGroupIndexes, err := expandDataLossPreventionStoredInfoTypeRegexGroupIndexes(original["group_indexes"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedGroupIndexes); val.IsValid() && !isEmptyValue(val) { - transformed["groupIndexes"] = transformedGroupIndexes - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeRegexPattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeRegexGroupIndexes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWordList, err := expandDataLossPreventionStoredInfoTypeDictionaryWordList(original["word_list"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedWordList); val.IsValid() && !isEmptyValue(val) { - transformed["wordList"] = transformedWordList - } - - transformedCloudStoragePath, err := expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(original["cloud_storage_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedCloudStoragePath); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStoragePath"] = transformedCloudStoragePath - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryWordList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWords, err := expandDataLossPreventionStoredInfoTypeDictionaryWordListWords(original["words"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedWords); val.IsValid() && !isEmptyValue(val) { - transformed["words"] = transformedWords - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryWordListWords(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeDictionaryCloudStoragePathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionary(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOutputPath, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(original["output_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedOutputPath); val.IsValid() && !isEmptyValue(val) { - transformed["outputPath"] = transformedOutputPath - } - - transformedCloudStorageFileSet, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(original["cloud_storage_file_set"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedCloudStorageFileSet); val.IsValid() && !isEmptyValue(val) { - transformed["cloudStorageFileSet"] = transformedCloudStorageFileSet - } - - transformedBigQueryField, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(original["big_query_field"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedBigQueryField); val.IsValid() && !isEmptyValue(val) { - transformed["bigQueryField"] = transformedBigQueryField - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPath, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryOutputPathPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUrl, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(original["url"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedUrl); val.IsValid() && !isEmptyValue(val) { - transformed["url"] = transformedUrl - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTable, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(original["table"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedTable); val.IsValid() && !isEmptyValue(val) { - transformed["table"] = transformedTable - } - - transformedField, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(original["field"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedField); val.IsValid() && !isEmptyValue(val) { - transformed["field"] = transformedField - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProjectId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - transformedDatasetId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(original["dataset_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedDatasetId); val.IsValid() && !isEmptyValue(val) { - transformed["datasetId"] = transformedDatasetId - } - - transformedTableId, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(original["table_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedTableId); val.IsValid() && !isEmptyValue(val) { - transformed["tableId"] = transformedTableId - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableDatasetId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableTableId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_data_loss_prevention_stored_info_type_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandDataLossPreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDataLossPreventionStoredInfoTypeEncoder(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["config"] = obj - return newObj, nil -} - -func resourceDataLossPreventionStoredInfoTypeDecoder(d *resource_data_loss_prevention_stored_info_type_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - name := res["name"].(string) - v, ok := res["currentVersion"] - if !ok || v == nil { - return nil, nil - } - - current := v.(map[string]interface{}) - configRaw, ok := current["config"] - if !ok || configRaw == nil { - return nil, nil - } - - config := configRaw.(map[string]interface{}) - - config["name"] = name - - return config, nil -} - -const resourceDataflowJobGoogleProvidedLabelPrefix = "labels.goog-dataflow-provided" - -var dataflowTerminalStatesMap = map[string]struct{}{ - "JOB_STATE_DONE": {}, - "JOB_STATE_FAILED": {}, - "JOB_STATE_CANCELLED": {}, - "JOB_STATE_UPDATED": {}, - "JOB_STATE_DRAINED": {}, -} - -func resourceDataflowJobLabelDiffSuppress(k, old, new string, d *resource_dataflow_job_schema.ResourceData) bool { - - if resource_dataflow_job_strings.HasPrefix(k, resourceDataflowJobGoogleProvidedLabelPrefix) && new == "" { - - return true - } - - if resource_dataflow_job_strings.HasPrefix(k, "labels.%") { - return true - } - - return false -} - -func resourceDataflowJob() *resource_dataflow_job_schema.Resource { - return &resource_dataflow_job_schema.Resource{ - Create: resourceDataflowJobCreate, - Read: resourceDataflowJobRead, - Update: resourceDataflowJobUpdateByReplacement, - Delete: resourceDataflowJobDelete, - Timeouts: &resource_dataflow_job_schema.ResourceTimeout{ - Update: resource_dataflow_job_schema.DefaultTimeout(10 * resource_dataflow_job_time.Minute), - }, - CustomizeDiff: resource_dataflow_job_customdiff.All( - resourceDataflowJobTypeCustomizeDiff, - ), - Schema: map[string]*resource_dataflow_job_schema.Schema{ - "name": { - Type: resource_dataflow_job_schema.TypeString, - Required: true, - - ForceNew: true, - Description: `A unique name for the resource, required by Dataflow.`, - }, - - "template_gcs_path": { - Type: resource_dataflow_job_schema.TypeString, - Required: true, - Description: `The Google Cloud Storage path to the Dataflow job template.`, - }, - - "temp_gcs_location": { - Type: resource_dataflow_job_schema.TypeString, - Required: true, - Description: `A writeable location on Google Cloud Storage for the Dataflow job to dump its temporary data.`, - }, - - "zone": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - - ForceNew: true, - Description: `The zone in which the created job should run. If it is not provided, the provider zone is used.`, - }, - - "region": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - - ForceNew: true, - Description: `The region in which the created job should run.`, - }, - - "max_workers": { - Type: resource_dataflow_job_schema.TypeInt, - Optional: true, - - ForceNew: true, - Description: `The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.`, - }, - - "parameters": { - Type: resource_dataflow_job_schema.TypeMap, - Optional: true, - Description: `Key/Value pairs to be passed to the Dataflow job (as used in the template).`, - }, - - "labels": { - Type: resource_dataflow_job_schema.TypeMap, - Optional: true, - DiffSuppressFunc: resourceDataflowJobLabelDiffSuppress, - Description: `User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. NOTE: Google-provided Dataflow templates often provide default labels that begin with goog-dataflow-provided. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.`, - }, - - "transform_name_mapping": { - Type: resource_dataflow_job_schema.TypeMap, - Optional: true, - Description: `Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.`, - }, - - "on_delete": { - Type: resource_dataflow_job_schema.TypeString, - ValidateFunc: resource_dataflow_job_validation.StringInSlice([]string{"cancel", "drain"}, false), - Optional: true, - Default: "drain", - Description: `One of "drain" or "cancel". Specifies behavior of deletion during terraform destroy.`, - }, - - "project": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - Computed: true, - - ForceNew: true, - Description: `The project in which the resource belongs.`, - }, - - "state": { - Type: resource_dataflow_job_schema.TypeString, - Computed: true, - Description: `The current state of the resource, selected from the JobState enum.`, - }, - "type": { - Type: resource_dataflow_job_schema.TypeString, - Computed: true, - Description: `The type of this job, selected from the JobType enum.`, - }, - "service_account_email": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - Description: `The Service Account email used to create the job.`, - }, - - "network": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The network to which VMs will be assigned. If it is not provided, "default" will be used.`, - }, - - "subnetwork": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, - }, - - "machine_type": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - Description: `The machine type to use for the job.`, - }, - - "kms_key_name": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - Description: `The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`, - }, - - "ip_configuration": { - Type: resource_dataflow_job_schema.TypeString, - Optional: true, - Description: `The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".`, - ValidateFunc: resource_dataflow_job_validation.StringInSlice([]string{"WORKER_IP_PUBLIC", "WORKER_IP_PRIVATE", ""}, false), - }, - - "additional_experiments": { - Type: resource_dataflow_job_schema.TypeSet, - Optional: true, - Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, - Elem: &resource_dataflow_job_schema.Schema{ - Type: resource_dataflow_job_schema.TypeString, - }, - }, - - "job_id": { - Type: resource_dataflow_job_schema.TypeString, - Computed: true, - Description: `The unique ID of this job.`, - }, - - "enable_streaming_engine": { - Type: resource_dataflow_job_schema.TypeBool, - Optional: true, - Description: `Indicates if the job should use the streaming engine feature.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataflowJobTypeCustomizeDiff(_ resource_dataflow_job_context.Context, d *resource_dataflow_job_schema.ResourceDiff, meta interface{}) error { - - if d.Get("type") == "JOB_TYPE_BATCH" { - resourceSchema := resourceDataflowJob().Schema - for field := range resourceSchema { - if field == "on_delete" { - continue - } - - if field == "labels" { - if err := resourceDataflowJobIterateMapForceNew(field, d); err != nil { - return err - } - } else if d.HasChange(field) { - if err := d.ForceNew(field); err != nil { - return err - } - } - } - } - - return nil -} - -func resourceDataflowJobCreate(d *resource_dataflow_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - params := expandStringMap(d, "parameters") - - env, err := resourceDataflowJobSetupEnv(d, config) - if err != nil { - return err - } - - request := resource_dataflow_job_dataflowdataflow.CreateJobFromTemplateRequest{ - JobName: d.Get("name").(string), - GcsPath: d.Get("template_gcs_path").(string), - Parameters: params, - Environment: &env, - } - - job, err := resourceDataflowJobCreateJob(config, project, region, userAgent, &request) - if err != nil { - return err - } - d.SetId(job.Id) - - return resourceDataflowJobRead(d, meta) -} - -func resourceDataflowJobRead(d *resource_dataflow_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - id := d.Id() - - job, err := resourceDataflowJobGetJob(config, project, region, userAgent, id) - if err != nil { - return handleNotFoundError(err, d, resource_dataflow_job_fmt.Sprintf("Dataflow job %s", id)) - } - - if err := d.Set("job_id", job.Id); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting job_id: %s", err) - } - if err := d.Set("state", job.CurrentState); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting state: %s", err) - } - if err := d.Set("name", job.Name); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("type", job.Type); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting type: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("labels", job.Labels); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("kms_key_name", job.Environment.ServiceKmsKeyName); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting kms_key_name: %s", err) - } - - sdkPipelineOptions, err := ConvertToMap(job.Environment.SdkPipelineOptions) - if err != nil { - return err - } - optionsMap := sdkPipelineOptions["options"].(map[string]interface{}) - if err := d.Set("template_gcs_path", optionsMap["templateLocation"]); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting template_gcs_path: %s", err) - } - if err := d.Set("temp_gcs_location", optionsMap["tempLocation"]); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting temp_gcs_location: %s", err) - } - if err := d.Set("machine_type", optionsMap["machineType"]); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting machine_type: %s", err) - } - if err := d.Set("network", optionsMap["network"]); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("service_account_email", optionsMap["serviceAccountEmail"]); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting service_account_email: %s", err) - } - if err := d.Set("additional_experiments", optionsMap["experiments"]); err != nil { - return resource_dataflow_job_fmt.Errorf("Error setting additional_experiments: %s", err) - } - - if _, ok := dataflowTerminalStatesMap[job.CurrentState]; ok { - resource_dataflow_job_log.Printf("[DEBUG] Removing resource '%s' because it is in state %s.\n", job.Name, job.CurrentState) - d.SetId("") - return nil - } - d.SetId(job.Id) - - return nil -} - -func resourceDataflowJobUpdateByReplacement(d *resource_dataflow_job_schema.ResourceData, meta interface{}) error { - - if resourceDataflowJobIsVirtualUpdate(d, resourceDataflowJob().Schema) { - return nil - } - - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - params := expandStringMap(d, "parameters") - tnamemapping := expandStringMap(d, "transform_name_mapping") - - env, err := resourceDataflowJobSetupEnv(d, config) - if err != nil { - return err - } - - request := resource_dataflow_job_dataflowdataflow.LaunchTemplateParameters{ - JobName: d.Get("name").(string), - Parameters: params, - TransformNameMapping: tnamemapping, - Environment: &env, - Update: true, - } - - var response *resource_dataflow_job_dataflowdataflow.LaunchTemplateResponse - err = retryTimeDuration(func() (updateErr error) { - response, updateErr = resourceDataflowJobLaunchTemplate(config, project, region, userAgent, d.Get("template_gcs_path").(string), &request) - return updateErr - }, resource_dataflow_job_time.Minute*resource_dataflow_job_time.Duration(5), isDataflowJobUpdateRetryableError) - if err != nil { - return err - } - - if err := waitForDataflowJobToBeUpdated(d, config, response.Job.Id, userAgent, d.Timeout(resource_dataflow_job_schema.TimeoutUpdate)); err != nil { - return resource_dataflow_job_fmt.Errorf("Error updating job with job ID %q: %v", d.Id(), err) - } - - d.SetId(response.Job.Id) - - return resourceDataflowJobRead(d, meta) -} - -func resourceDataflowJobDelete(d *resource_dataflow_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - id := d.Id() - - requestedState, err := resourceDataflowJobMapRequestedState(d.Get("on_delete").(string)) - if err != nil { - return err - } - - err = resource_dataflow_job_resource.Retry(resource_dataflow_job_time.Minute*resource_dataflow_job_time.Duration(15), func() *resource_dataflow_job_resource.RetryError { - - job := &resource_dataflow_job_dataflowdataflow.Job{ - RequestedState: requestedState, - } - - _, updateErr := resourceDataflowJobUpdateJob(config, project, region, userAgent, id, job) - if updateErr != nil { - gerr, isGoogleErr := updateErr.(*resource_dataflow_job_googleapi.Error) - if !isGoogleErr { - - return resource_dataflow_job_resource.NonRetryableError(updateErr) - } - - if resource_dataflow_job_strings.Contains(gerr.Message, "not yet ready for canceling") { - - resource_dataflow_job_time.Sleep(5 * resource_dataflow_job_time.Second) - return resource_dataflow_job_resource.RetryableError(updateErr) - } - - if resource_dataflow_job_strings.Contains(gerr.Message, "Job has terminated") { - - return nil - } - } - - return nil - }) - if err != nil { - return err - } - - _, ok := dataflowTerminalStatesMap[d.Get("state").(string)] - for !ok { - resource_dataflow_job_log.Printf("[DEBUG] Waiting for job with job state %q to terminate...", d.Get("state").(string)) - resource_dataflow_job_time.Sleep(5 * resource_dataflow_job_time.Second) - - err = resourceDataflowJobRead(d, meta) - if err != nil { - return resource_dataflow_job_fmt.Errorf("Error while reading job to see if it was properly terminated: %v", err) - } - _, ok = dataflowTerminalStatesMap[d.Get("state").(string)] - } - - if _, ok := dataflowTerminalStatesMap[d.Get("state").(string)]; ok { - resource_dataflow_job_log.Printf("[DEBUG] Removing dataflow job with final state %q", d.Get("state").(string)) - d.SetId("") - return nil - } - return resource_dataflow_job_fmt.Errorf("Unable to cancel the dataflow job '%s' - final state was %q.", d.Id(), d.Get("state").(string)) -} - -func resourceDataflowJobMapRequestedState(policy string) (string, error) { - switch policy { - case "cancel": - return "JOB_STATE_CANCELLED", nil - case "drain": - return "JOB_STATE_DRAINING", nil - default: - return "", resource_dataflow_job_fmt.Errorf("Invalid `on_delete` policy: %s", policy) - } -} - -func resourceDataflowJobCreateJob(config *Config, project, region, userAgent string, request *resource_dataflow_job_dataflowdataflow.CreateJobFromTemplateRequest) (*resource_dataflow_job_dataflowdataflow.Job, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Templates.Create(project, request).Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Create(project, region, request).Do() -} - -func resourceDataflowJobGetJob(config *Config, project, region, userAgent string, id string) (*resource_dataflow_job_dataflowdataflow.Job, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Jobs.Get(project, id).View("JOB_VIEW_ALL").Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Get(project, region, id).View("JOB_VIEW_ALL").Do() -} - -func resourceDataflowJobUpdateJob(config *Config, project, region, userAgent string, id string, job *resource_dataflow_job_dataflowdataflow.Job) (*resource_dataflow_job_dataflowdataflow.Job, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Jobs.Update(project, id, job).Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Update(project, region, id, job).Do() -} - -func resourceDataflowJobLaunchTemplate(config *Config, project, region, userAgent string, gcsPath string, request *resource_dataflow_job_dataflowdataflow.LaunchTemplateParameters) (*resource_dataflow_job_dataflowdataflow.LaunchTemplateResponse, error) { - if region == "" { - return config.NewDataflowClient(userAgent).Projects.Templates.Launch(project, request).GcsPath(gcsPath).Do() - } - return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Launch(project, region, request).GcsPath(gcsPath).Do() -} - -func resourceDataflowJobSetupEnv(d *resource_dataflow_job_schema.ResourceData, config *Config) (resource_dataflow_job_dataflowdataflow.RuntimeEnvironment, error) { - zone, _ := getZone(d, config) - - labels := expandStringMap(d, "labels") - - additionalExperiments := convertStringSet(d.Get("additional_experiments").(*resource_dataflow_job_schema.Set)) - - env := resource_dataflow_job_dataflowdataflow.RuntimeEnvironment{ - MaxWorkers: int64(d.Get("max_workers").(int)), - Network: d.Get("network").(string), - ServiceAccountEmail: d.Get("service_account_email").(string), - Subnetwork: d.Get("subnetwork").(string), - TempLocation: d.Get("temp_gcs_location").(string), - MachineType: d.Get("machine_type").(string), - KmsKeyName: d.Get("kms_key_name").(string), - IpConfiguration: d.Get("ip_configuration").(string), - EnableStreamingEngine: d.Get("enable_streaming_engine").(bool), - AdditionalUserLabels: labels, - Zone: zone, - AdditionalExperiments: additionalExperiments, - } - return env, nil -} - -func resourceDataflowJobIterateMapForceNew(mapKey string, d *resource_dataflow_job_schema.ResourceDiff) error { - obj := d.Get(mapKey).(map[string]interface{}) - for k := range obj { - entrySchemaKey := mapKey + "." + k - if d.HasChange(entrySchemaKey) { - - if err := d.ForceNew(mapKey); err != nil { - return err - } - break - } - } - return nil -} - -func resourceDataflowJobIterateMapHasChange(mapKey string, d *resource_dataflow_job_schema.ResourceData) bool { - obj := d.Get(mapKey).(map[string]interface{}) - for k := range obj { - entrySchemaKey := mapKey + "." + k - if d.HasChange(entrySchemaKey) { - return true - } - } - return false -} - -func resourceDataflowJobIsVirtualUpdate(d *resource_dataflow_job_schema.ResourceData, resourceSchema map[string]*resource_dataflow_job_schema.Schema) bool { - - if d.HasChange("on_delete") { - for field := range resourceSchema { - if field == "on_delete" { - continue - } - - if (field == "labels" && resourceDataflowJobIterateMapHasChange(field, d)) || - (field != "labels" && d.HasChange(field)) { - return false - } - } - - return true - } - - return false -} - -func waitForDataflowJobToBeUpdated(d *resource_dataflow_job_schema.ResourceData, config *Config, replacementJobID, userAgent string, timeout resource_dataflow_job_time.Duration) error { - return resource_dataflow_job_resource.Retry(timeout, func() *resource_dataflow_job_resource.RetryError { - project, err := getProject(d, config) - if err != nil { - return resource_dataflow_job_resource.NonRetryableError(err) - } - - region, err := getRegion(d, config) - if err != nil { - return resource_dataflow_job_resource.NonRetryableError(err) - } - - replacementJob, err := resourceDataflowJobGetJob(config, project, region, userAgent, replacementJobID) - if err != nil { - if isRetryableError(err) { - return resource_dataflow_job_resource.RetryableError(err) - } - return resource_dataflow_job_resource.NonRetryableError(err) - } - - state := replacementJob.CurrentState - switch state { - case "", "JOB_STATE_PENDING": - return resource_dataflow_job_resource.RetryableError(resource_dataflow_job_fmt.Errorf("the replacement job with ID %q has pending state %q.", replacementJobID, state)) - case "JOB_STATE_FAILED": - return resource_dataflow_job_resource.NonRetryableError(resource_dataflow_job_fmt.Errorf("the replacement job with ID %q failed with state %q.", replacementJobID, state)) - default: - resource_dataflow_job_log.Printf("[DEBUG] the replacement job with ID %q has state %q.", replacementJobID, state) - return nil - } - }) -} - -func resourceDataprocAutoscalingPolicy() *resource_dataproc_autoscaling_policy_schema.Resource { - return &resource_dataproc_autoscaling_policy_schema.Resource{ - Create: resourceDataprocAutoscalingPolicyCreate, - Read: resourceDataprocAutoscalingPolicyRead, - Update: resourceDataprocAutoscalingPolicyUpdate, - Delete: resourceDataprocAutoscalingPolicyDelete, - - Importer: &resource_dataproc_autoscaling_policy_schema.ResourceImporter{ - State: resourceDataprocAutoscalingPolicyImport, - }, - - Timeouts: &resource_dataproc_autoscaling_policy_schema.ResourceTimeout{ - Create: resource_dataproc_autoscaling_policy_schema.DefaultTimeout(4 * resource_dataproc_autoscaling_policy_time.Minute), - Update: resource_dataproc_autoscaling_policy_schema.DefaultTimeout(4 * resource_dataproc_autoscaling_policy_time.Minute), - Delete: resource_dataproc_autoscaling_policy_schema.DefaultTimeout(4 * resource_dataproc_autoscaling_policy_time.Minute), - }, - - Schema: map[string]*resource_dataproc_autoscaling_policy_schema.Schema{ - "policy_id": { - Type: resource_dataproc_autoscaling_policy_schema.TypeString, - Required: true, - Description: `The policy id. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), -and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between -3 and 50 characters.`, - }, - "basic_algorithm": { - Type: resource_dataproc_autoscaling_policy_schema.TypeList, - Optional: true, - Description: `Basic algorithm for autoscaling.`, - MaxItems: 1, - Elem: &resource_dataproc_autoscaling_policy_schema.Resource{ - Schema: map[string]*resource_dataproc_autoscaling_policy_schema.Schema{ - "yarn_config": { - Type: resource_dataproc_autoscaling_policy_schema.TypeList, - Required: true, - Description: `YARN autoscaling configuration.`, - MaxItems: 1, - Elem: &resource_dataproc_autoscaling_policy_schema.Resource{ - Schema: map[string]*resource_dataproc_autoscaling_policy_schema.Schema{ - "graceful_decommission_timeout": { - Type: resource_dataproc_autoscaling_policy_schema.TypeString, - Required: true, - Description: `Timeout for YARN graceful decommissioning of Node Managers. Specifies the -duration to wait for jobs to complete before forcefully removing workers -(and potentially interrupting jobs). Only applicable to downscaling operations. - -Bounds: [0s, 1d].`, - }, - "scale_down_factor": { - Type: resource_dataproc_autoscaling_policy_schema.TypeFloat, - Required: true, - Description: `Fraction of average pending memory in the last cooldown period for which to -remove workers. A scale-down factor of 1 will result in scaling down so that there -is no available memory remaining after the update (more aggressive scaling). -A scale-down factor of 0 disables removing workers, which can be beneficial for -autoscaling a single job. - -Bounds: [0.0, 1.0].`, - }, - "scale_up_factor": { - Type: resource_dataproc_autoscaling_policy_schema.TypeFloat, - Required: true, - Description: `Fraction of average pending memory in the last cooldown period for which to -add workers. A scale-up factor of 1.0 will result in scaling up so that there -is no pending memory remaining after the update (more aggressive scaling). -A scale-up factor closer to 0 will result in a smaller magnitude of scaling up -(less aggressive scaling). - -Bounds: [0.0, 1.0].`, - }, - "scale_down_min_worker_fraction": { - Type: resource_dataproc_autoscaling_policy_schema.TypeFloat, - Optional: true, - Description: `Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. -For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must -recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 -means the autoscaler will scale down on any recommended change. - -Bounds: [0.0, 1.0]. Default: 0.0.`, - Default: 0.0, - }, - "scale_up_min_worker_fraction": { - Type: resource_dataproc_autoscaling_policy_schema.TypeFloat, - Optional: true, - Description: `Minimum scale-up threshold as a fraction of total cluster size before scaling -occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler -must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of -0 means the autoscaler will scale up on any recommended change. - -Bounds: [0.0, 1.0]. Default: 0.0.`, - Default: 0.0, - }, - }, - }, - }, - "cooldown_period": { - Type: resource_dataproc_autoscaling_policy_schema.TypeString, - Optional: true, - Description: `Duration between scaling events. A scaling period starts after the -update operation from the previous event has completed. - -Bounds: [2m, 1d]. Default: 2m.`, - Default: "120s", - }, - }, - }, - }, - "location": { - Type: resource_dataproc_autoscaling_policy_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The location where the autoscaling policy should reside. -The default value is 'global'.`, - Default: "global", - }, - "secondary_worker_config": { - Type: resource_dataproc_autoscaling_policy_schema.TypeList, - Optional: true, - Description: `Describes how the autoscaler will operate for secondary workers.`, - MaxItems: 1, - Elem: &resource_dataproc_autoscaling_policy_schema.Resource{ - Schema: map[string]*resource_dataproc_autoscaling_policy_schema.Schema{ - "max_instances": { - Type: resource_dataproc_autoscaling_policy_schema.TypeInt, - Optional: true, - Description: `Maximum number of instances for this group. Note that by default, clusters will not use -secondary workers. Required for secondary workers if the minimum secondary instances is set. -Bounds: [minInstances, ). Defaults to 0.`, - Default: 0, - AtLeastOneOf: []string{"secondary_worker_config.0.min_instances", "secondary_worker_config.0.max_instances", "secondary_worker_config.0.weight"}, - }, - "min_instances": { - Type: resource_dataproc_autoscaling_policy_schema.TypeInt, - Optional: true, - Description: `Minimum number of instances for this group. Bounds: [0, maxInstances]. Defaults to 0.`, - Default: 0, - AtLeastOneOf: []string{"secondary_worker_config.0.min_instances", "secondary_worker_config.0.max_instances", "secondary_worker_config.0.weight"}, - }, - "weight": { - Type: resource_dataproc_autoscaling_policy_schema.TypeInt, - Optional: true, - Description: `Weight for the instance group, which is used to determine the fraction of total workers -in the cluster from this instance group. For example, if primary workers have weight 2, -and secondary workers have weight 1, the cluster will have approximately 2 primary workers -for each secondary worker. - -The cluster may not reach the specified balance if constrained by min/max bounds or other -autoscaling settings. For example, if maxInstances for secondary workers is 0, then only -primary workers will be added. The cluster can also be out of balance when created. - -If weight is not set on any instance group, the cluster will default to equal weight for -all groups: the cluster will attempt to maintain an equal number of workers in each group -within the configured size bounds for each group. If weight is set for one group only, -the cluster will default to zero weight on the unset group. For example if weight is set -only on primary workers, the cluster will use primary workers only and no secondary workers.`, - Default: 1, - AtLeastOneOf: []string{"secondary_worker_config.0.min_instances", "secondary_worker_config.0.max_instances", "secondary_worker_config.0.weight"}, - }, - }, - }, - }, - "worker_config": { - Type: resource_dataproc_autoscaling_policy_schema.TypeList, - Optional: true, - Description: `Describes how the autoscaler will operate for primary workers.`, - MaxItems: 1, - Elem: &resource_dataproc_autoscaling_policy_schema.Resource{ - Schema: map[string]*resource_dataproc_autoscaling_policy_schema.Schema{ - "max_instances": { - Type: resource_dataproc_autoscaling_policy_schema.TypeInt, - Required: true, - Description: `Maximum number of instances for this group.`, - }, - "min_instances": { - Type: resource_dataproc_autoscaling_policy_schema.TypeInt, - Optional: true, - Description: `Minimum number of instances for this group. Bounds: [2, maxInstances]. Defaults to 2.`, - Default: 2, - }, - "weight": { - Type: resource_dataproc_autoscaling_policy_schema.TypeInt, - Optional: true, - Description: `Weight for the instance group, which is used to determine the fraction of total workers -in the cluster from this instance group. For example, if primary workers have weight 2, -and secondary workers have weight 1, the cluster will have approximately 2 primary workers -for each secondary worker. - -The cluster may not reach the specified balance if constrained by min/max bounds or other -autoscaling settings. For example, if maxInstances for secondary workers is 0, then only -primary workers will be added. The cluster can also be out of balance when created. - -If weight is not set on any instance group, the cluster will default to equal weight for -all groups: the cluster will attempt to maintain an equal number of workers in each group -within the configured size bounds for each group. If weight is set for one group only, -the cluster will default to zero weight on the unset group. For example if weight is set -only on primary workers, the cluster will use primary workers only and no secondary workers.`, - Default: 1, - }, - }, - }, - }, - "name": { - Type: resource_dataproc_autoscaling_policy_schema.TypeString, - Computed: true, - Description: `The "resource name" of the autoscaling policy.`, - }, - "project": { - Type: resource_dataproc_autoscaling_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDataprocAutoscalingPolicyCreate(d *resource_dataproc_autoscaling_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - idProp, err := expandDataprocAutoscalingPolicyPolicyId(d.Get("policy_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("policy_id"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(idProp)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - workerConfigProp, err := expandDataprocAutoscalingPolicyWorkerConfig(d.Get("worker_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("worker_config"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(workerConfigProp)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, workerConfigProp)) { - obj["workerConfig"] = workerConfigProp - } - secondaryWorkerConfigProp, err := expandDataprocAutoscalingPolicySecondaryWorkerConfig(d.Get("secondary_worker_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_worker_config"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(secondaryWorkerConfigProp)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, secondaryWorkerConfigProp)) { - obj["secondaryWorkerConfig"] = secondaryWorkerConfigProp - } - basicAlgorithmProp, err := expandDataprocAutoscalingPolicyBasicAlgorithm(d.Get("basic_algorithm"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic_algorithm"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(basicAlgorithmProp)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, basicAlgorithmProp)) { - obj["basicAlgorithm"] = basicAlgorithmProp - } - - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies") - if err != nil { - return err - } - - resource_dataproc_autoscaling_policy_log.Printf("[DEBUG] Creating new AutoscalingPolicy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dataproc_autoscaling_policy_schema.TimeoutCreate)) - if err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error creating AutoscalingPolicy: %s", err) - } - if err := d.Set("name", flattenDataprocAutoscalingPolicyName(res["name"], d, config)); err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") - if err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dataproc_autoscaling_policy_log.Printf("[DEBUG] Finished creating AutoscalingPolicy %q: %#v", d.Id(), res) - - return resourceDataprocAutoscalingPolicyRead(d, meta) -} - -func resourceDataprocAutoscalingPolicyRead(d *resource_dataproc_autoscaling_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dataproc_autoscaling_policy_fmt.Sprintf("DataprocAutoscalingPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error reading AutoscalingPolicy: %s", err) - } - - if err := d.Set("policy_id", flattenDataprocAutoscalingPolicyPolicyId(res["id"], d, config)); err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error reading AutoscalingPolicy: %s", err) - } - if err := d.Set("name", flattenDataprocAutoscalingPolicyName(res["name"], d, config)); err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error reading AutoscalingPolicy: %s", err) - } - if err := d.Set("worker_config", flattenDataprocAutoscalingPolicyWorkerConfig(res["workerConfig"], d, config)); err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error reading AutoscalingPolicy: %s", err) - } - if err := d.Set("secondary_worker_config", flattenDataprocAutoscalingPolicySecondaryWorkerConfig(res["secondaryWorkerConfig"], d, config)); err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error reading AutoscalingPolicy: %s", err) - } - if err := d.Set("basic_algorithm", flattenDataprocAutoscalingPolicyBasicAlgorithm(res["basicAlgorithm"], d, config)); err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error reading AutoscalingPolicy: %s", err) - } - - return nil -} - -func resourceDataprocAutoscalingPolicyUpdate(d *resource_dataproc_autoscaling_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - idProp, err := expandDataprocAutoscalingPolicyPolicyId(d.Get("policy_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("policy_id"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(v)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, idProp)) { - obj["id"] = idProp - } - workerConfigProp, err := expandDataprocAutoscalingPolicyWorkerConfig(d.Get("worker_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("worker_config"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(v)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, workerConfigProp)) { - obj["workerConfig"] = workerConfigProp - } - secondaryWorkerConfigProp, err := expandDataprocAutoscalingPolicySecondaryWorkerConfig(d.Get("secondary_worker_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("secondary_worker_config"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(v)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, secondaryWorkerConfigProp)) { - obj["secondaryWorkerConfig"] = secondaryWorkerConfigProp - } - basicAlgorithmProp, err := expandDataprocAutoscalingPolicyBasicAlgorithm(d.Get("basic_algorithm"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("basic_algorithm"); !isEmptyValue(resource_dataproc_autoscaling_policy_reflect.ValueOf(v)) && (ok || !resource_dataproc_autoscaling_policy_reflect.DeepEqual(v, basicAlgorithmProp)) { - obj["basicAlgorithm"] = basicAlgorithmProp - } - - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") - if err != nil { - return err - } - - resource_dataproc_autoscaling_policy_log.Printf("[DEBUG] Updating AutoscalingPolicy %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_dataproc_autoscaling_policy_schema.TimeoutUpdate)) - - if err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error updating AutoscalingPolicy %q: %s", d.Id(), err) - } else { - resource_dataproc_autoscaling_policy_log.Printf("[DEBUG] Finished updating AutoscalingPolicy %q: %#v", d.Id(), res) - } - - return resourceDataprocAutoscalingPolicyRead(d, meta) -} - -func resourceDataprocAutoscalingPolicyDelete(d *resource_dataproc_autoscaling_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dataproc_autoscaling_policy_fmt.Errorf("Error fetching project for AutoscalingPolicy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DataprocBasePath}}projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dataproc_autoscaling_policy_log.Printf("[DEBUG] Deleting AutoscalingPolicy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dataproc_autoscaling_policy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "AutoscalingPolicy") - } - - resource_dataproc_autoscaling_policy_log.Printf("[DEBUG] Finished deleting AutoscalingPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceDataprocAutoscalingPolicyImport(d *resource_dataproc_autoscaling_policy_schema.ResourceData, meta interface{}) ([]*resource_dataproc_autoscaling_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/autoscalingPolicies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/autoscalingPolicies/{{policy_id}}") - if err != nil { - return nil, resource_dataproc_autoscaling_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dataproc_autoscaling_policy_schema.ResourceData{d}, nil -} - -func flattenDataprocAutoscalingPolicyPolicyId(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocAutoscalingPolicyName(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocAutoscalingPolicyWorkerConfig(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_instances"] = - flattenDataprocAutoscalingPolicyWorkerConfigMinInstances(original["minInstances"], d, config) - transformed["max_instances"] = - flattenDataprocAutoscalingPolicyWorkerConfigMaxInstances(original["maxInstances"], d, config) - transformed["weight"] = - flattenDataprocAutoscalingPolicyWorkerConfigWeight(original["weight"], d, config) - return []interface{}{transformed} -} - -func flattenDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dataproc_autoscaling_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dataproc_autoscaling_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dataproc_autoscaling_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_instances"] = - flattenDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(original["minInstances"], d, config) - transformed["max_instances"] = - flattenDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(original["maxInstances"], d, config) - transformed["weight"] = - flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(original["weight"], d, config) - return []interface{}{transformed} -} - -func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dataproc_autoscaling_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dataproc_autoscaling_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dataproc_autoscaling_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cooldown_period"] = - flattenDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(original["cooldownPeriod"], d, config) - transformed["yarn_config"] = - flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(original["yarnConfig"], d, config) - return []interface{}{transformed} -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["graceful_decommission_timeout"] = - flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(original["gracefulDecommissionTimeout"], d, config) - transformed["scale_up_factor"] = - flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(original["scaleUpFactor"], d, config) - transformed["scale_down_factor"] = - flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(original["scaleDownFactor"], d, config) - transformed["scale_up_min_worker_fraction"] = - flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(original["scaleUpMinWorkerFraction"], d, config) - transformed["scale_down_min_worker_fraction"] = - flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(original["scaleDownMinWorkerFraction"], d, config) - return []interface{}{transformed} -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d *resource_dataproc_autoscaling_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDataprocAutoscalingPolicyPolicyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyWorkerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinInstances, err := expandDataprocAutoscalingPolicyWorkerConfigMinInstances(original["min_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minInstances"] = transformedMinInstances - } - - transformedMaxInstances, err := expandDataprocAutoscalingPolicyWorkerConfigMaxInstances(original["max_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstances"] = transformedMaxInstances - } - - transformedWeight, err := expandDataprocAutoscalingPolicyWorkerConfigWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - return transformed, nil -} - -func expandDataprocAutoscalingPolicyWorkerConfigMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyWorkerConfigMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyWorkerConfigWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicySecondaryWorkerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinInstances, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(original["min_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedMinInstances); val.IsValid() && !isEmptyValue(val) { - transformed["minInstances"] = transformedMinInstances - } - - transformedMaxInstances, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(original["max_instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedMaxInstances); val.IsValid() && !isEmptyValue(val) { - transformed["maxInstances"] = transformedMaxInstances - } - - transformedWeight, err := expandDataprocAutoscalingPolicySecondaryWorkerConfigWeight(original["weight"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedWeight); val.IsValid() && !isEmptyValue(val) { - transformed["weight"] = transformedWeight - } - - return transformed, nil -} - -func expandDataprocAutoscalingPolicySecondaryWorkerConfigMinInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicySecondaryWorkerConfigMaxInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicySecondaryWorkerConfigWeight(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCooldownPeriod, err := expandDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(original["cooldown_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedCooldownPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["cooldownPeriod"] = transformedCooldownPeriod - } - - transformedYarnConfig, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(original["yarn_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedYarnConfig); val.IsValid() && !isEmptyValue(val) { - transformed["yarnConfig"] = transformedYarnConfig - } - - return transformed, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithmCooldownPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGracefulDecommissionTimeout, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(original["graceful_decommission_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedGracefulDecommissionTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["gracefulDecommissionTimeout"] = transformedGracefulDecommissionTimeout - } - - transformedScaleUpFactor, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(original["scale_up_factor"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedScaleUpFactor); val.IsValid() && !isEmptyValue(val) { - transformed["scaleUpFactor"] = transformedScaleUpFactor - } - - transformedScaleDownFactor, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(original["scale_down_factor"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedScaleDownFactor); val.IsValid() && !isEmptyValue(val) { - transformed["scaleDownFactor"] = transformedScaleDownFactor - } - - transformedScaleUpMinWorkerFraction, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(original["scale_up_min_worker_fraction"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedScaleUpMinWorkerFraction); val.IsValid() && !isEmptyValue(val) { - transformed["scaleUpMinWorkerFraction"] = transformedScaleUpMinWorkerFraction - } - - transformedScaleDownMinWorkerFraction, err := expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(original["scale_down_min_worker_fraction"], d, config) - if err != nil { - return nil, err - } else if val := resource_dataproc_autoscaling_policy_reflect.ValueOf(transformedScaleDownMinWorkerFraction); val.IsValid() && !isEmptyValue(val) { - transformed["scaleDownMinWorkerFraction"] = transformedScaleDownMinWorkerFraction - } - - return transformed, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigGracefulDecommissionTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleUpMinWorkerFraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDataprocAutoscalingPolicyBasicAlgorithmYarnConfigScaleDownMinWorkerFraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -var ( - resolveDataprocImageVersion = resource_dataproc_cluster_regexp.MustCompile(`(?P[^\s.-]+)\.(?P[^\s.-]+)(?:\.(?P[^\s.-]+))?(?:\-(?P[^\s.-]+))?`) - - gceClusterConfigKeys = []string{ - "cluster_config.0.gce_cluster_config.0.zone", - "cluster_config.0.gce_cluster_config.0.network", - "cluster_config.0.gce_cluster_config.0.subnetwork", - "cluster_config.0.gce_cluster_config.0.tags", - "cluster_config.0.gce_cluster_config.0.service_account", - "cluster_config.0.gce_cluster_config.0.service_account_scopes", - "cluster_config.0.gce_cluster_config.0.internal_ip_only", - "cluster_config.0.gce_cluster_config.0.shielded_instance_config", - "cluster_config.0.gce_cluster_config.0.metadata", - } - - schieldedInstanceConfigKeys = []string{ - "cluster_config.0.gce_cluster_config.0.shielded_instance_config.0.enable_secure_boot", - "cluster_config.0.gce_cluster_config.0.shielded_instance_config.0.enable_vtpm", - "cluster_config.0.gce_cluster_config.0.shielded_instance_config.0.enable_integrity_monitoring", - } - - preemptibleWorkerDiskConfigKeys = []string{ - "cluster_config.0.preemptible_worker_config.0.disk_config.0.num_local_ssds", - "cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_size_gb", - "cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_type", - } - - clusterSoftwareConfigKeys = []string{ - "cluster_config.0.software_config.0.image_version", - "cluster_config.0.software_config.0.override_properties", - "cluster_config.0.software_config.0.optional_components", - } - - clusterConfigKeys = []string{ - "cluster_config.0.staging_bucket", - "cluster_config.0.temp_bucket", - "cluster_config.0.gce_cluster_config", - "cluster_config.0.master_config", - "cluster_config.0.worker_config", - "cluster_config.0.preemptible_worker_config", - "cluster_config.0.security_config", - "cluster_config.0.software_config", - "cluster_config.0.initialization_action", - "cluster_config.0.encryption_config", - "cluster_config.0.autoscaling_config", - } -) - -func resourceDataprocCluster() *resource_dataproc_cluster_schema.Resource { - return &resource_dataproc_cluster_schema.Resource{ - Create: resourceDataprocClusterCreate, - Read: resourceDataprocClusterRead, - Update: resourceDataprocClusterUpdate, - Delete: resourceDataprocClusterDelete, - - Timeouts: &resource_dataproc_cluster_schema.ResourceTimeout{ - Create: resource_dataproc_cluster_schema.DefaultTimeout(20 * resource_dataproc_cluster_time.Minute), - Update: resource_dataproc_cluster_schema.DefaultTimeout(20 * resource_dataproc_cluster_time.Minute), - Delete: resource_dataproc_cluster_schema.DefaultTimeout(20 * resource_dataproc_cluster_time.Minute), - }, - - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "name": { - Type: resource_dataproc_cluster_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the cluster, unique within the project and zone.`, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 55 { - errors = append(errors, resource_dataproc_cluster_fmt.Errorf( - "%q cannot be longer than 55 characters", k)) - } - if !resource_dataproc_cluster_regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { - errors = append(errors, resource_dataproc_cluster_fmt.Errorf( - "%q can only contain lowercase letters, numbers and hyphens", k)) - } - if !resource_dataproc_cluster_regexp.MustCompile("^[a-z]").MatchString(value) { - errors = append(errors, resource_dataproc_cluster_fmt.Errorf( - "%q must start with a letter", k)) - } - if !resource_dataproc_cluster_regexp.MustCompile("[a-z0-9]$").MatchString(value) { - errors = append(errors, resource_dataproc_cluster_fmt.Errorf( - "%q must end with a number or a letter", k)) - } - return - }, - }, - - "project": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the cluster will exist. If it is not provided, the provider project is used.`, - }, - - "region": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Default: "global", - ForceNew: true, - Description: `The region in which the cluster and associated nodes will be created in. Defaults to global.`, - }, - - "graceful_decommission_timeout": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Default: "0s", - Description: `The timeout duration which allows graceful decomissioning when you change the number of worker nodes directly through a terraform apply`, - }, - - "labels": { - Type: resource_dataproc_cluster_schema.TypeMap, - Optional: true, - Elem: &resource_dataproc_cluster_schema.Schema{Type: resource_dataproc_cluster_schema.TypeString}, - - Computed: true, - Description: `The list of labels (key/value pairs) to be applied to instances in the cluster. GCP generates some itself including goog-dataproc-cluster-name which is the name of the cluster.`, - }, - - "cluster_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Allows you to configure various aspects of the cluster.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - - "staging_bucket": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - ForceNew: true, - Description: `The Cloud Storage staging bucket used to stage files, such as Hadoop jars, between client machines and the cluster. Note: If you don't explicitly specify a staging_bucket then GCP will auto create / assign one for you. However, you are not guaranteed an auto generated bucket which is solely dedicated to your cluster; it may be shared with other clusters in the same region/zone also choosing to use the auto generation option.`, - }, - - "bucket": { - Type: resource_dataproc_cluster_schema.TypeString, - Computed: true, - Description: ` The name of the cloud storage bucket ultimately used to house the staging data for the cluster. If staging_bucket is specified, it will contain this value, otherwise it will be the auto generated name.`, - }, - - "temp_bucket": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: clusterConfigKeys, - ForceNew: true, - Description: `The Cloud Storage temp bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. Note: If you don't explicitly specify a temp_bucket then GCP will auto create / assign one for you.`, - }, - - "gce_cluster_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - Computed: true, - MaxItems: 1, - Description: `Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - - "zone": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: gceClusterConfigKeys, - ForceNew: true, - Description: `The GCP zone where your data is stored and used (i.e. where the master and the worker nodes will be created in). If region is set to 'global' (default) then zone is mandatory, otherwise GCP is able to make use of Auto Zone Placement to determine this automatically for you. Note: This setting additionally determines and restricts which computing resources are available for use with other configs such as cluster_config.master_config.machine_type and cluster_config.worker_config.machine_type.`, - }, - - "network": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: gceClusterConfigKeys, - ForceNew: true, - ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.subnetwork"}, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the Google Compute Engine network to the cluster will be part of. Conflicts with subnetwork. If neither is specified, this defaults to the "default" network.`, - }, - - "subnetwork": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - AtLeastOneOf: gceClusterConfigKeys, - ForceNew: true, - ConflictsWith: []string{"cluster_config.0.gce_cluster_config.0.network"}, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name or self_link of the Google Compute Engine subnetwork the cluster will be part of. Conflicts with network.`, - }, - - "tags": { - Type: resource_dataproc_cluster_schema.TypeSet, - Optional: true, - AtLeastOneOf: gceClusterConfigKeys, - ForceNew: true, - Elem: &resource_dataproc_cluster_schema.Schema{Type: resource_dataproc_cluster_schema.TypeString}, - Description: `The list of instance tags applied to instances in the cluster. Tags are used to identify valid sources or targets for network firewalls.`, - }, - - "service_account": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - AtLeastOneOf: gceClusterConfigKeys, - ForceNew: true, - Description: `The service account to be used by the Node VMs. If not specified, the "default" service account is used.`, - }, - - "service_account_scopes": { - Type: resource_dataproc_cluster_schema.TypeSet, - Optional: true, - Computed: true, - AtLeastOneOf: gceClusterConfigKeys, - ForceNew: true, - Description: `The set of Google API scopes to be made available on all of the node VMs under the service_account specified. These can be either FQDNs, or scope aliases.`, - Elem: &resource_dataproc_cluster_schema.Schema{ - Type: resource_dataproc_cluster_schema.TypeString, - StateFunc: func(v interface{}) string { - return canonicalizeServiceScope(v.(string)) - }, - }, - Set: stringScopeHashcode, - }, - - "internal_ip_only": { - Type: resource_dataproc_cluster_schema.TypeBool, - Optional: true, - AtLeastOneOf: gceClusterConfigKeys, - ForceNew: true, - Default: false, - Description: `By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. If set to true, all instances in the cluster will only have internal IP addresses. Note: Private Google Access (also known as privateIpGoogleAccess) must be enabled on the subnetwork that the cluster will be launched in.`, - }, - - "metadata": { - Type: resource_dataproc_cluster_schema.TypeMap, - Optional: true, - AtLeastOneOf: gceClusterConfigKeys, - Elem: &resource_dataproc_cluster_schema.Schema{Type: resource_dataproc_cluster_schema.TypeString}, - ForceNew: true, - Description: `A map of the Compute Engine metadata entries to add to all instances`, - }, - - "shielded_instance_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - AtLeastOneOf: gceClusterConfigKeys, - Computed: true, - MaxItems: 1, - Description: `Shielded Instance Config for clusters using Compute Engine Shielded VMs.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "enable_secure_boot": { - Type: resource_dataproc_cluster_schema.TypeBool, - Optional: true, - Default: false, - AtLeastOneOf: schieldedInstanceConfigKeys, - ForceNew: true, - Description: `Defines whether instances have Secure Boot enabled.`, - }, - "enable_vtpm": { - Type: resource_dataproc_cluster_schema.TypeBool, - Optional: true, - Default: false, - AtLeastOneOf: schieldedInstanceConfigKeys, - ForceNew: true, - Description: `Defines whether instances have the vTPM enabled.`, - }, - "enable_integrity_monitoring": { - Type: resource_dataproc_cluster_schema.TypeBool, - Optional: true, - Default: false, - AtLeastOneOf: schieldedInstanceConfigKeys, - ForceNew: true, - Description: `Defines whether instances have integrity monitoring enabled.`, - }, - }, - }, - }, - }, - }, - }, - - "master_config": instanceConfigSchema("master_config"), - "worker_config": instanceConfigSchema("worker_config"), - - "preemptible_worker_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - Computed: true, - MaxItems: 1, - Description: `The Google Compute Engine config settings for the additional (aka preemptible) instances in a cluster.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "num_instances": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Computed: true, - Description: `Specifies the number of preemptible nodes to create. Defaults to 0.`, - AtLeastOneOf: []string{ - "cluster_config.0.preemptible_worker_config.0.num_instances", - "cluster_config.0.preemptible_worker_config.0.disk_config", - }, - }, - - "disk_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - Computed: true, - Description: `Disk Config`, - AtLeastOneOf: []string{ - "cluster_config.0.preemptible_worker_config.0.num_instances", - "cluster_config.0.preemptible_worker_config.0.disk_config", - }, - MaxItems: 1, - - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "num_local_ssds": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Computed: true, - AtLeastOneOf: preemptibleWorkerDiskConfigKeys, - ForceNew: true, - Description: `The amount of local SSD disks that will be attached to each preemptible worker node. Defaults to 0.`, - }, - - "boot_disk_size_gb": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Computed: true, - AtLeastOneOf: preemptibleWorkerDiskConfigKeys, - ForceNew: true, - ValidateFunc: resource_dataproc_cluster_validation.IntAtLeast(10), - Description: `Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.`, - }, - - "boot_disk_type": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - AtLeastOneOf: preemptibleWorkerDiskConfigKeys, - ForceNew: true, - ValidateFunc: resource_dataproc_cluster_validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), - Default: "pd-standard", - Description: `The disk type of the primary disk attached to each preemptible worker node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".`, - }, - }, - }, - }, - - "instance_names": { - Type: resource_dataproc_cluster_schema.TypeList, - Computed: true, - Elem: &resource_dataproc_cluster_schema.Schema{Type: resource_dataproc_cluster_schema.TypeString}, - Description: `List of preemptible instance names which have been assigned to the cluster.`, - }, - }, - }, - }, - - "security_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `Security related configuration.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "kerberos_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Required: true, - Description: "Kerberos related configuration", - MaxItems: 1, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "cross_realm_trust_admin_server": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.`, - }, - "cross_realm_trust_kdc": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.`, - }, - "cross_realm_trust_realm": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.`, - }, - "cross_realm_trust_shared_password_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster -Kerberos realm and the remote trusted realm, in a cross realm trust relationship.`, - }, - "enable_kerberos": { - Type: resource_dataproc_cluster_schema.TypeBool, - Optional: true, - Description: `Flag to indicate whether to Kerberize the cluster.`, - }, - "kdc_db_key_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.`, - }, - "key_password_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.`, - }, - "keystore_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.`, - }, - "keystore_password_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The Cloud Storage URI of a KMS encrypted file containing -the password to the user provided keystore. For the self-signed certificate, this password is generated -by Dataproc`, - }, - "kms_key_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Required: true, - Description: `The uri of the KMS key used to encrypt various sensitive files.`, - }, - "realm": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.`, - }, - "root_principal_password_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Required: true, - Description: `The cloud Storage URI of a KMS encrypted file containing the root principal password.`, - }, - "tgt_lifetime_hours": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Description: `The lifetime of the ticket granting ticket, in hours.`, - }, - "truststore_password_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.`, - }, - "truststore_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.`, - }, - }, - }, - }, - }, - }, - }, - - "software_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - Computed: true, - MaxItems: 1, - Description: `The config settings for software inside the cluster.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "image_version": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: clusterSoftwareConfigKeys, - ForceNew: true, - DiffSuppressFunc: dataprocImageVersionDiffSuppress, - Description: `The Cloud Dataproc image version to use for the cluster - this controls the sets of software versions installed onto the nodes when you create clusters. If not specified, defaults to the latest version.`, - }, - "override_properties": { - Type: resource_dataproc_cluster_schema.TypeMap, - Optional: true, - AtLeastOneOf: clusterSoftwareConfigKeys, - ForceNew: true, - Elem: &resource_dataproc_cluster_schema.Schema{Type: resource_dataproc_cluster_schema.TypeString}, - Description: `A list of override and additional properties (key/value pairs) used to modify various aspects of the common configuration files used when creating a cluster.`, - }, - - "properties": { - Type: resource_dataproc_cluster_schema.TypeMap, - Computed: true, - Description: `A list of the properties used to set the daemon config files. This will include any values supplied by the user via cluster_config.software_config.override_properties`, - }, - - "optional_components": { - Type: resource_dataproc_cluster_schema.TypeSet, - Optional: true, - AtLeastOneOf: clusterSoftwareConfigKeys, - Description: `The set of optional components to activate on the cluster.`, - Elem: &resource_dataproc_cluster_schema.Schema{ - Type: resource_dataproc_cluster_schema.TypeString, - ValidateFunc: resource_dataproc_cluster_validation.StringInSlice([]string{"COMPONENT_UNSPECIFIED", "ANACONDA", "DOCKER", "DRUID", "HBASE", "FLINK", - "HIVE_WEBHCAT", "JUPYTER", "KERBEROS", "PRESTO", "RANGER", "SOLR", "ZEPPELIN", "ZOOKEEPER"}, false), - }, - }, - }, - }, - }, - - "initialization_action": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - ForceNew: true, - Description: `Commands to execute on each node after config is completed. You can specify multiple versions of these.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "script": { - Type: resource_dataproc_cluster_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The script to be executed during initialization of the cluster. The script must be a GCS file with a gs:// prefix.`, - }, - - "timeout_sec": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Default: 300, - ForceNew: true, - Description: `The maximum duration (in seconds) which script is allowed to take to execute its action. GCP will default to a predetermined computed value if not set (currently 300).`, - }, - }, - }, - }, - "encryption_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - MaxItems: 1, - Description: `The Customer managed encryption keys settings for the cluster.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "kms_key_name": { - Type: resource_dataproc_cluster_schema.TypeString, - Required: true, - Description: `The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.`, - }, - }, - }, - }, - "autoscaling_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - AtLeastOneOf: clusterConfigKeys, - MaxItems: 1, - Description: `The autoscaling policy config associated with the cluster.`, - DiffSuppressFunc: emptyOrUnsetBlockDiffSuppress, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "policy_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Required: true, - Description: `The autoscaling policy used by the cluster.`, - DiffSuppressFunc: locationDiffSuppress, - }, - }, - }, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func instanceConfigSchema(parent string) *resource_dataproc_cluster_schema.Schema { - var instanceConfigKeys = []string{ - "cluster_config.0." + parent + ".0.num_instances", - "cluster_config.0." + parent + ".0.image_uri", - "cluster_config.0." + parent + ".0.machine_type", - "cluster_config.0." + parent + ".0.min_cpu_platform", - "cluster_config.0." + parent + ".0.disk_config", - "cluster_config.0." + parent + ".0.accelerators", - } - - return &resource_dataproc_cluster_schema.Schema{ - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: clusterConfigKeys, - MaxItems: 1, - Description: `The Google Compute Engine config settings for the master/worker instances in a cluster.`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "num_instances": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Computed: true, - Description: `Specifies the number of master/worker nodes to create. If not specified, GCP will default to a predetermined computed value.`, - AtLeastOneOf: instanceConfigKeys, - }, - - "image_uri": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: instanceConfigKeys, - ForceNew: true, - Description: `The URI for the image to use for this master/worker`, - }, - - "machine_type": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: instanceConfigKeys, - ForceNew: true, - Description: `The name of a Google Compute Engine machine type to create for the master/worker`, - }, - - "min_cpu_platform": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Computed: true, - AtLeastOneOf: instanceConfigKeys, - ForceNew: true, - Description: `The name of a minimum generation of CPU family for the master/worker. If not specified, GCP will default to a predetermined computed value for each zone.`, - }, - "disk_config": { - Type: resource_dataproc_cluster_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: instanceConfigKeys, - MaxItems: 1, - Description: `Disk Config`, - Elem: &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "num_local_ssds": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Computed: true, - Description: `The amount of local SSD disks that will be attached to each master cluster node. Defaults to 0.`, - AtLeastOneOf: []string{ - "cluster_config.0." + parent + ".0.disk_config.0.num_local_ssds", - "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_size_gb", - "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_type", - }, - ForceNew: true, - }, - - "boot_disk_size_gb": { - Type: resource_dataproc_cluster_schema.TypeInt, - Optional: true, - Computed: true, - Description: `Size of the primary disk attached to each node, specified in GB. The primary disk contains the boot volume and system libraries, and the smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories.`, - AtLeastOneOf: []string{ - "cluster_config.0." + parent + ".0.disk_config.0.num_local_ssds", - "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_size_gb", - "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_type", - }, - ForceNew: true, - ValidateFunc: resource_dataproc_cluster_validation.IntAtLeast(10), - }, - - "boot_disk_type": { - Type: resource_dataproc_cluster_schema.TypeString, - Optional: true, - Description: `The disk type of the primary disk attached to each node. One of "pd-ssd" or "pd-standard". Defaults to "pd-standard".`, - AtLeastOneOf: []string{ - "cluster_config.0." + parent + ".0.disk_config.0.num_local_ssds", - "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_size_gb", - "cluster_config.0." + parent + ".0.disk_config.0.boot_disk_type", - }, - ForceNew: true, - ValidateFunc: resource_dataproc_cluster_validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), - Default: "pd-standard", - }, - }, - }, - }, - - "accelerators": { - Type: resource_dataproc_cluster_schema.TypeSet, - Optional: true, - AtLeastOneOf: instanceConfigKeys, - ForceNew: true, - Elem: acceleratorsSchema(), - Description: `The Compute Engine accelerator (GPU) configuration for these instances. Can be specified multiple times.`, - }, - - "instance_names": { - Type: resource_dataproc_cluster_schema.TypeList, - Computed: true, - Elem: &resource_dataproc_cluster_schema.Schema{Type: resource_dataproc_cluster_schema.TypeString}, - Description: `List of master/worker instance names which have been assigned to the cluster.`, - }, - }, - }, - } -} - -func acceleratorsSchema() *resource_dataproc_cluster_schema.Resource { - return &resource_dataproc_cluster_schema.Resource{ - Schema: map[string]*resource_dataproc_cluster_schema.Schema{ - "accelerator_type": { - Type: resource_dataproc_cluster_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The short name of the accelerator type to expose to this instance. For example, nvidia-tesla-k80.`, - }, - - "accelerator_count": { - Type: resource_dataproc_cluster_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `The number of the accelerator cards of this type exposed to this instance. Often restricted to one of 1, 2, 4, or 8.`, - }, - }, - } -} - -func resourceDataprocClusterCreate(d *resource_dataproc_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region := d.Get("region").(string) - cluster := &resource_dataproc_cluster_dataproc.Cluster{ - ClusterName: d.Get("name").(string), - ProjectId: project, - } - - cluster.Config, err = expandClusterConfig(d, config) - if err != nil { - return err - } - - if _, ok := d.GetOk("labels"); ok { - cluster.Labels = expandLabels(d) - } - - if region == "global" && cluster.Config.GceClusterConfig.ZoneUri == "" { - return resource_dataproc_cluster_errors.New("zone is mandatory when region is set to 'global'") - } - - op, err := config.NewDataprocClient(userAgent).Projects.Regions.Clusters.Create( - project, region, cluster).Do() - if err != nil { - return resource_dataproc_cluster_fmt.Errorf("Error creating Dataproc cluster: %s", err) - } - - d.SetId(resource_dataproc_cluster_fmt.Sprintf("projects/%s/regions/%s/clusters/%s", project, region, cluster.ClusterName)) - - waitErr := dataprocClusterOperationWait(config, op, "creating Dataproc cluster", userAgent, d.Timeout(resource_dataproc_cluster_schema.TimeoutCreate)) - if waitErr != nil { - - return waitErr - } - - resource_dataproc_cluster_log.Printf("[INFO] Dataproc cluster %s has been created", cluster.ClusterName) - return resourceDataprocClusterRead(d, meta) -} - -func expandClusterConfig(d *resource_dataproc_cluster_schema.ResourceData, config *Config) (*resource_dataproc_cluster_dataproc.ClusterConfig, error) { - conf := &resource_dataproc_cluster_dataproc.ClusterConfig{ - - GceClusterConfig: &resource_dataproc_cluster_dataproc.GceClusterConfig{}, - } - - if v, ok := d.GetOk("cluster_config"); ok { - confs := v.([]interface{}) - if (len(confs)) == 0 { - return conf, nil - } - } - - if v, ok := d.GetOk("cluster_config.0.staging_bucket"); ok { - conf.ConfigBucket = v.(string) - } - - if v, ok := d.GetOk("cluster_config.0.temp_bucket"); ok { - conf.TempBucket = v.(string) - } - - c, err := expandGceClusterConfig(d, config) - if err != nil { - return nil, err - } - conf.GceClusterConfig = c - - if cfg, ok := configOptions(d, "cluster_config.0.security_config"); ok { - conf.SecurityConfig = expandSecurityConfig(cfg) - } - - if cfg, ok := configOptions(d, "cluster_config.0.software_config"); ok { - conf.SoftwareConfig = expandSoftwareConfig(cfg) - } - - if v, ok := d.GetOk("cluster_config.0.initialization_action"); ok { - conf.InitializationActions = expandInitializationActions(v) - } - - if cfg, ok := configOptions(d, "cluster_config.0.encryption_config"); ok { - conf.EncryptionConfig = expandEncryptionConfig(cfg) - } - - if cfg, ok := configOptions(d, "cluster_config.0.autoscaling_config"); ok { - conf.AutoscalingConfig = expandAutoscalingConfig(cfg) - } - - if cfg, ok := configOptions(d, "cluster_config.0.master_config"); ok { - resource_dataproc_cluster_log.Println("[INFO] got master_config") - conf.MasterConfig = expandInstanceGroupConfig(cfg) - } - - if cfg, ok := configOptions(d, "cluster_config.0.worker_config"); ok { - resource_dataproc_cluster_log.Println("[INFO] got worker config") - conf.WorkerConfig = expandInstanceGroupConfig(cfg) - } - - if cfg, ok := configOptions(d, "cluster_config.0.preemptible_worker_config"); ok { - resource_dataproc_cluster_log.Println("[INFO] got preemptible worker config") - conf.SecondaryWorkerConfig = expandPreemptibleInstanceGroupConfig(cfg) - if conf.SecondaryWorkerConfig.NumInstances > 0 { - conf.SecondaryWorkerConfig.IsPreemptible = true - } - } - return conf, nil -} - -func expandGceClusterConfig(d *resource_dataproc_cluster_schema.ResourceData, config *Config) (*resource_dataproc_cluster_dataproc.GceClusterConfig, error) { - conf := &resource_dataproc_cluster_dataproc.GceClusterConfig{} - - v, ok := d.GetOk("cluster_config.0.gce_cluster_config") - if !ok { - return conf, nil - } - cfg := v.([]interface{})[0].(map[string]interface{}) - - if v, ok := cfg["zone"]; ok { - conf.ZoneUri = v.(string) - } - if v, ok := cfg["network"]; ok { - nf, err := ParseNetworkFieldValue(v.(string), d, config) - if err != nil { - return nil, resource_dataproc_cluster_fmt.Errorf("cannot determine self_link for network %q: %s", v, err) - } - - conf.NetworkUri = nf.RelativeLink() - } - if v, ok := cfg["subnetwork"]; ok { - snf, err := ParseSubnetworkFieldValue(v.(string), d, config) - if err != nil { - return nil, resource_dataproc_cluster_fmt.Errorf("cannot determine self_link for subnetwork %q: %s", v, err) - } - - conf.SubnetworkUri = snf.RelativeLink() - } - if v, ok := cfg["tags"]; ok { - conf.Tags = convertStringSet(v.(*resource_dataproc_cluster_schema.Set)) - } - if v, ok := cfg["service_account"]; ok { - conf.ServiceAccount = v.(string) - } - if scopes, ok := cfg["service_account_scopes"]; ok { - scopesSet := scopes.(*resource_dataproc_cluster_schema.Set) - scopes := make([]string, scopesSet.Len()) - for i, scope := range scopesSet.List() { - scopes[i] = canonicalizeServiceScope(scope.(string)) - } - conf.ServiceAccountScopes = scopes - } - if v, ok := cfg["internal_ip_only"]; ok { - conf.InternalIpOnly = v.(bool) - } - if v, ok := cfg["metadata"]; ok { - conf.Metadata = convertStringMap(v.(map[string]interface{})) - } - if v, ok := d.GetOk("cluster_config.0.gce_cluster_config.0.shielded_instance_config"); ok { - cfgSic := v.([]interface{})[0].(map[string]interface{}) - conf.ShieldedInstanceConfig = &resource_dataproc_cluster_dataproc.ShieldedInstanceConfig{} - if v, ok := cfgSic["enable_integrity_monitoring"]; ok { - conf.ShieldedInstanceConfig.EnableIntegrityMonitoring = v.(bool) - } - if v, ok := cfgSic["enable_secure_boot"]; ok { - conf.ShieldedInstanceConfig.EnableSecureBoot = v.(bool) - } - if v, ok := cfgSic["enable_vtpm"]; ok { - conf.ShieldedInstanceConfig.EnableVtpm = v.(bool) - } - } - return conf, nil -} - -func expandSecurityConfig(cfg map[string]interface{}) *resource_dataproc_cluster_dataproc.SecurityConfig { - conf := &resource_dataproc_cluster_dataproc.SecurityConfig{} - if kfg, ok := cfg["kerberos_config"]; ok { - conf.KerberosConfig = expandKerberosConfig(kfg.([]interface{})[0].(map[string]interface{})) - } - return conf -} - -func expandKerberosConfig(cfg map[string]interface{}) *resource_dataproc_cluster_dataproc.KerberosConfig { - conf := &resource_dataproc_cluster_dataproc.KerberosConfig{} - if v, ok := cfg["enable_kerberos"]; ok { - conf.EnableKerberos = v.(bool) - } - if v, ok := cfg["root_principal_password_uri"]; ok { - conf.RootPrincipalPasswordUri = v.(string) - } - if v, ok := cfg["kms_key_uri"]; ok { - conf.KmsKeyUri = v.(string) - } - if v, ok := cfg["keystore_uri"]; ok { - conf.KeystoreUri = v.(string) - } - if v, ok := cfg["truststore_uri"]; ok { - conf.TruststoreUri = v.(string) - } - if v, ok := cfg["keystore_password_uri"]; ok { - conf.KeystorePasswordUri = v.(string) - } - if v, ok := cfg["key_password_uri"]; ok { - conf.KeyPasswordUri = v.(string) - } - if v, ok := cfg["truststore_password_uri"]; ok { - conf.TruststorePasswordUri = v.(string) - } - if v, ok := cfg["cross_realm_trust_realm"]; ok { - conf.CrossRealmTrustRealm = v.(string) - } - if v, ok := cfg["cross_realm_trust_kdc"]; ok { - conf.CrossRealmTrustKdc = v.(string) - } - if v, ok := cfg["cross_realm_trust_admin_server"]; ok { - conf.CrossRealmTrustAdminServer = v.(string) - } - if v, ok := cfg["cross_realm_trust_shared_password_uri"]; ok { - conf.CrossRealmTrustSharedPasswordUri = v.(string) - } - if v, ok := cfg["kdc_db_key_uri"]; ok { - conf.KdcDbKeyUri = v.(string) - } - if v, ok := cfg["tgt_lifetime_hours"]; ok { - conf.TgtLifetimeHours = int64(v.(int)) - } - if v, ok := cfg["realm"]; ok { - conf.Realm = v.(string) - } - - return conf -} - -func expandSoftwareConfig(cfg map[string]interface{}) *resource_dataproc_cluster_dataproc.SoftwareConfig { - conf := &resource_dataproc_cluster_dataproc.SoftwareConfig{} - if v, ok := cfg["override_properties"]; ok { - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - conf.Properties = m - } - if v, ok := cfg["image_version"]; ok { - conf.ImageVersion = v.(string) - } - if components, ok := cfg["optional_components"]; ok { - compSet := components.(*resource_dataproc_cluster_schema.Set) - components := make([]string, compSet.Len()) - for i, component := range compSet.List() { - components[i] = component.(string) - } - conf.OptionalComponents = components - } - return conf -} - -func expandEncryptionConfig(cfg map[string]interface{}) *resource_dataproc_cluster_dataproc.EncryptionConfig { - conf := &resource_dataproc_cluster_dataproc.EncryptionConfig{} - if v, ok := cfg["kms_key_name"]; ok { - conf.GcePdKmsKeyName = v.(string) - } - return conf -} - -func expandAutoscalingConfig(cfg map[string]interface{}) *resource_dataproc_cluster_dataproc.AutoscalingConfig { - conf := &resource_dataproc_cluster_dataproc.AutoscalingConfig{} - if v, ok := cfg["policy_uri"]; ok { - conf.PolicyUri = v.(string) - } - return conf -} - -func expandInitializationActions(v interface{}) []*resource_dataproc_cluster_dataproc.NodeInitializationAction { - actionList := v.([]interface{}) - - actions := []*resource_dataproc_cluster_dataproc.NodeInitializationAction{} - for _, v1 := range actionList { - actionItem := v1.(map[string]interface{}) - action := &resource_dataproc_cluster_dataproc.NodeInitializationAction{ - ExecutableFile: actionItem["script"].(string), - } - if x, ok := actionItem["timeout_sec"]; ok { - action.ExecutionTimeout = resource_dataproc_cluster_strconv.Itoa(x.(int)) + "s" - } - actions = append(actions, action) - } - - return actions -} - -func expandPreemptibleInstanceGroupConfig(cfg map[string]interface{}) *resource_dataproc_cluster_dataproc.InstanceGroupConfig { - icg := &resource_dataproc_cluster_dataproc.InstanceGroupConfig{} - - if v, ok := cfg["num_instances"]; ok { - icg.NumInstances = int64(v.(int)) - } - if dc, ok := cfg["disk_config"]; ok { - d := dc.([]interface{}) - if len(d) > 0 { - dcfg := d[0].(map[string]interface{}) - icg.DiskConfig = &resource_dataproc_cluster_dataproc.DiskConfig{} - - if v, ok := dcfg["boot_disk_size_gb"]; ok { - icg.DiskConfig.BootDiskSizeGb = int64(v.(int)) - } - if v, ok := dcfg["num_local_ssds"]; ok { - icg.DiskConfig.NumLocalSsds = int64(v.(int)) - } - if v, ok := dcfg["boot_disk_type"]; ok { - icg.DiskConfig.BootDiskType = v.(string) - } - } - } - return icg -} - -func expandInstanceGroupConfig(cfg map[string]interface{}) *resource_dataproc_cluster_dataproc.InstanceGroupConfig { - icg := &resource_dataproc_cluster_dataproc.InstanceGroupConfig{} - - if v, ok := cfg["num_instances"]; ok { - icg.NumInstances = int64(v.(int)) - } - if v, ok := cfg["machine_type"]; ok { - icg.MachineTypeUri = GetResourceNameFromSelfLink(v.(string)) - } - if v, ok := cfg["min_cpu_platform"]; ok { - icg.MinCpuPlatform = v.(string) - } - if v, ok := cfg["image_uri"]; ok { - icg.ImageUri = v.(string) - } - - if dc, ok := cfg["disk_config"]; ok { - d := dc.([]interface{}) - if len(d) > 0 { - dcfg := d[0].(map[string]interface{}) - icg.DiskConfig = &resource_dataproc_cluster_dataproc.DiskConfig{} - - if v, ok := dcfg["boot_disk_size_gb"]; ok { - icg.DiskConfig.BootDiskSizeGb = int64(v.(int)) - } - if v, ok := dcfg["num_local_ssds"]; ok { - icg.DiskConfig.NumLocalSsds = int64(v.(int)) - } - if v, ok := dcfg["boot_disk_type"]; ok { - icg.DiskConfig.BootDiskType = v.(string) - } - } - } - - icg.Accelerators = expandAccelerators(cfg["accelerators"].(*resource_dataproc_cluster_schema.Set).List()) - return icg -} - -func expandAccelerators(configured []interface{}) []*resource_dataproc_cluster_dataproc.AcceleratorConfig { - accelerators := make([]*resource_dataproc_cluster_dataproc.AcceleratorConfig, 0, len(configured)) - for _, raw := range configured { - data := raw.(map[string]interface{}) - accelerator := resource_dataproc_cluster_dataproc.AcceleratorConfig{ - AcceleratorTypeUri: data["accelerator_type"].(string), - AcceleratorCount: int64(data["accelerator_count"].(int)), - } - - accelerators = append(accelerators, &accelerator) - } - - return accelerators -} - -func resourceDataprocClusterUpdate(d *resource_dataproc_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region := d.Get("region").(string) - clusterName := d.Get("name").(string) - - cluster := &resource_dataproc_cluster_dataproc.Cluster{ - ClusterName: clusterName, - ProjectId: project, - Config: &resource_dataproc_cluster_dataproc.ClusterConfig{}, - } - - updMask := []string{} - - if d.HasChange("labels") { - v := d.Get("labels") - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - cluster.Labels = m - - updMask = append(updMask, "labels") - } - - if d.HasChange("cluster_config.0.worker_config.0.num_instances") { - desiredNumWorks := d.Get("cluster_config.0.worker_config.0.num_instances").(int) - cluster.Config.WorkerConfig = &resource_dataproc_cluster_dataproc.InstanceGroupConfig{ - NumInstances: int64(desiredNumWorks), - } - - updMask = append(updMask, "config.worker_config.num_instances") - } - - if d.HasChange("cluster_config.0.preemptible_worker_config.0.num_instances") { - desiredNumWorks := d.Get("cluster_config.0.preemptible_worker_config.0.num_instances").(int) - cluster.Config.SecondaryWorkerConfig = &resource_dataproc_cluster_dataproc.InstanceGroupConfig{ - NumInstances: int64(desiredNumWorks), - } - - updMask = append(updMask, "config.secondary_worker_config.num_instances") - } - - if d.HasChange("cluster_config.0.autoscaling_config") { - desiredPolicy := d.Get("cluster_config.0.autoscaling_config.0.policy_uri").(string) - cluster.Config.AutoscalingConfig = &resource_dataproc_cluster_dataproc.AutoscalingConfig{ - PolicyUri: desiredPolicy, - } - - updMask = append(updMask, "config.autoscaling_config.policy_uri") - } - - if len(updMask) > 0 { - gracefulDecommissionTimeout := d.Get("graceful_decommission_timeout").(string) - - patch := config.NewDataprocClient(userAgent).Projects.Regions.Clusters.Patch( - project, region, clusterName, cluster) - patch.GracefulDecommissionTimeout(gracefulDecommissionTimeout) - patch.UpdateMask(resource_dataproc_cluster_strings.Join(updMask, ",")) - op, err := patch.Do() - if err != nil { - return err - } - - waitErr := dataprocClusterOperationWait(config, op, "updating Dataproc cluster ", userAgent, d.Timeout(resource_dataproc_cluster_schema.TimeoutUpdate)) - if waitErr != nil { - return waitErr - } - - resource_dataproc_cluster_log.Printf("[INFO] Dataproc cluster %s has been updated ", d.Id()) - } - - return resourceDataprocClusterRead(d, meta) -} - -func resourceDataprocClusterRead(d *resource_dataproc_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region := d.Get("region").(string) - clusterName := d.Get("name").(string) - - cluster, err := config.NewDataprocClient(userAgent).Projects.Regions.Clusters.Get( - project, region, clusterName).Do() - if err != nil { - return handleNotFoundError(err, d, resource_dataproc_cluster_fmt.Sprintf("Dataproc Cluster %q", clusterName)) - } - - if err := d.Set("name", cluster.ClusterName); err != nil { - return resource_dataproc_cluster_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_dataproc_cluster_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("region", region); err != nil { - return resource_dataproc_cluster_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("labels", cluster.Labels); err != nil { - return resource_dataproc_cluster_fmt.Errorf("Error setting labels: %s", err) - } - - cfg, err := flattenClusterConfig(d, cluster.Config) - if err != nil { - return err - } - - err = d.Set("cluster_config", cfg) - if err != nil { - return err - } - return nil -} - -func flattenClusterConfig(d *resource_dataproc_cluster_schema.ResourceData, cfg *resource_dataproc_cluster_dataproc.ClusterConfig) ([]map[string]interface{}, error) { - - data := map[string]interface{}{ - "staging_bucket": d.Get("cluster_config.0.staging_bucket").(string), - - "bucket": cfg.ConfigBucket, - "temp_bucket": cfg.TempBucket, - "gce_cluster_config": flattenGceClusterConfig(d, cfg.GceClusterConfig), - "master_config": flattenInstanceGroupConfig(d, cfg.MasterConfig), - "worker_config": flattenInstanceGroupConfig(d, cfg.WorkerConfig), - "software_config": flattenSoftwareConfig(d, cfg.SoftwareConfig), - "encryption_config": flattenEncryptionConfig(d, cfg.EncryptionConfig), - "autoscaling_config": flattenAutoscalingConfig(d, cfg.AutoscalingConfig), - "security_config": flattenSecurityConfig(d, cfg.SecurityConfig), - "preemptible_worker_config": flattenPreemptibleInstanceGroupConfig(d, cfg.SecondaryWorkerConfig), - } - - if len(cfg.InitializationActions) > 0 { - val, err := flattenInitializationActions(cfg.InitializationActions) - if err != nil { - return nil, err - } - data["initialization_action"] = val - } - return []map[string]interface{}{data}, nil -} - -func flattenSecurityConfig(d *resource_dataproc_cluster_schema.ResourceData, sc *resource_dataproc_cluster_dataproc.SecurityConfig) []map[string]interface{} { - if sc == nil { - return nil - } - data := map[string]interface{}{ - "kerberos_config": flattenKerberosConfig(d, sc.KerberosConfig), - } - - return []map[string]interface{}{data} -} - -func flattenKerberosConfig(d *resource_dataproc_cluster_schema.ResourceData, kfg *resource_dataproc_cluster_dataproc.KerberosConfig) []map[string]interface{} { - data := map[string]interface{}{ - "enable_kerberos": kfg.EnableKerberos, - "root_principal_password_uri": kfg.RootPrincipalPasswordUri, - "kms_key_uri": kfg.KmsKeyUri, - "keystore_uri": kfg.KeystoreUri, - "truststore_uri": kfg.TruststoreUri, - "keystore_password_uri": kfg.KeystorePasswordUri, - "key_password_uri": kfg.KeyPasswordUri, - "truststore_password_uri": kfg.TruststorePasswordUri, - "cross_realm_trust_realm": kfg.CrossRealmTrustRealm, - "cross_realm_trust_kdc": kfg.CrossRealmTrustKdc, - "cross_realm_trust_admin_server": kfg.CrossRealmTrustAdminServer, - "cross_realm_trust_shared_password_uri": kfg.CrossRealmTrustSharedPasswordUri, - "kdc_db_key_uri": kfg.KdcDbKeyUri, - "tgt_lifetime_hours": kfg.TgtLifetimeHours, - "realm": kfg.Realm, - } - - return []map[string]interface{}{data} -} - -func flattenSoftwareConfig(d *resource_dataproc_cluster_schema.ResourceData, sc *resource_dataproc_cluster_dataproc.SoftwareConfig) []map[string]interface{} { - data := map[string]interface{}{ - "image_version": sc.ImageVersion, - "optional_components": sc.OptionalComponents, - "properties": sc.Properties, - "override_properties": d.Get("cluster_config.0.software_config.0.override_properties").(map[string]interface{}), - } - - return []map[string]interface{}{data} -} - -func flattenEncryptionConfig(d *resource_dataproc_cluster_schema.ResourceData, ec *resource_dataproc_cluster_dataproc.EncryptionConfig) []map[string]interface{} { - if ec == nil { - return nil - } - - data := map[string]interface{}{ - "kms_key_name": ec.GcePdKmsKeyName, - } - - return []map[string]interface{}{data} -} - -func flattenAutoscalingConfig(d *resource_dataproc_cluster_schema.ResourceData, ec *resource_dataproc_cluster_dataproc.AutoscalingConfig) []map[string]interface{} { - if ec == nil { - return nil - } - - data := map[string]interface{}{ - "policy_uri": ec.PolicyUri, - } - - return []map[string]interface{}{data} -} - -func flattenAccelerators(accelerators []*resource_dataproc_cluster_dataproc.AcceleratorConfig) interface{} { - acceleratorsTypeSet := resource_dataproc_cluster_schema.NewSet(resource_dataproc_cluster_schema.HashResource(acceleratorsSchema()), []interface{}{}) - for _, accelerator := range accelerators { - data := map[string]interface{}{ - "accelerator_type": GetResourceNameFromSelfLink(accelerator.AcceleratorTypeUri), - "accelerator_count": int(accelerator.AcceleratorCount), - } - - acceleratorsTypeSet.Add(data) - } - - return acceleratorsTypeSet -} - -func flattenInitializationActions(nia []*resource_dataproc_cluster_dataproc.NodeInitializationAction) ([]map[string]interface{}, error) { - - actions := []map[string]interface{}{} - for _, v := range nia { - action := map[string]interface{}{ - "script": v.ExecutableFile, - } - if len(v.ExecutionTimeout) > 0 { - tsec, err := extractInitTimeout(v.ExecutionTimeout) - if err != nil { - return nil, err - } - action["timeout_sec"] = tsec - } - - actions = append(actions, action) - } - return actions, nil - -} - -func flattenGceClusterConfig(d *resource_dataproc_cluster_schema.ResourceData, gcc *resource_dataproc_cluster_dataproc.GceClusterConfig) []map[string]interface{} { - - gceConfig := map[string]interface{}{ - "tags": resource_dataproc_cluster_schema.NewSet(resource_dataproc_cluster_schema.HashString, convertStringArrToInterface(gcc.Tags)), - "service_account": gcc.ServiceAccount, - "zone": GetResourceNameFromSelfLink(gcc.ZoneUri), - "internal_ip_only": gcc.InternalIpOnly, - "metadata": gcc.Metadata, - } - - if gcc.NetworkUri != "" { - gceConfig["network"] = gcc.NetworkUri - } - if gcc.SubnetworkUri != "" { - gceConfig["subnetwork"] = gcc.SubnetworkUri - } - if len(gcc.ServiceAccountScopes) > 0 { - gceConfig["service_account_scopes"] = resource_dataproc_cluster_schema.NewSet(stringScopeHashcode, convertStringArrToInterface(gcc.ServiceAccountScopes)) - } - if gcc.ShieldedInstanceConfig != nil { - gceConfig["shielded_instance_config"] = []map[string]interface{}{ - { - "enable_integrity_monitoring": gcc.ShieldedInstanceConfig.EnableIntegrityMonitoring, - "enable_secure_boot": gcc.ShieldedInstanceConfig.EnableSecureBoot, - "enable_vtpm": gcc.ShieldedInstanceConfig.EnableVtpm, - }, - } - } - - return []map[string]interface{}{gceConfig} -} - -func flattenPreemptibleInstanceGroupConfig(d *resource_dataproc_cluster_schema.ResourceData, icg *resource_dataproc_cluster_dataproc.InstanceGroupConfig) []map[string]interface{} { - - if icg == nil { - icgSchema := d.Get("cluster_config.0.preemptible_worker_config") - resource_dataproc_cluster_log.Printf("[DEBUG] state of preemptible is %#v", icgSchema) - if v, ok := icgSchema.([]interface{}); ok && len(v) > 0 { - if m, ok := v[0].(map[string]interface{}); ok { - return []map[string]interface{}{m} - } - } - } - - disk := map[string]interface{}{} - data := map[string]interface{}{} - - if icg != nil { - data["num_instances"] = icg.NumInstances - data["instance_names"] = icg.InstanceNames - if icg.DiskConfig != nil { - disk["boot_disk_size_gb"] = icg.DiskConfig.BootDiskSizeGb - disk["num_local_ssds"] = icg.DiskConfig.NumLocalSsds - disk["boot_disk_type"] = icg.DiskConfig.BootDiskType - } - } - - data["disk_config"] = []map[string]interface{}{disk} - return []map[string]interface{}{data} -} - -func flattenInstanceGroupConfig(d *resource_dataproc_cluster_schema.ResourceData, icg *resource_dataproc_cluster_dataproc.InstanceGroupConfig) []map[string]interface{} { - disk := map[string]interface{}{} - data := map[string]interface{}{} - - if icg != nil { - data["num_instances"] = icg.NumInstances - data["machine_type"] = GetResourceNameFromSelfLink(icg.MachineTypeUri) - data["min_cpu_platform"] = icg.MinCpuPlatform - data["image_uri"] = icg.ImageUri - data["instance_names"] = icg.InstanceNames - if icg.DiskConfig != nil { - disk["boot_disk_size_gb"] = icg.DiskConfig.BootDiskSizeGb - disk["num_local_ssds"] = icg.DiskConfig.NumLocalSsds - disk["boot_disk_type"] = icg.DiskConfig.BootDiskType - } - - data["accelerators"] = flattenAccelerators(icg.Accelerators) - } - - data["disk_config"] = []map[string]interface{}{disk} - return []map[string]interface{}{data} -} - -func extractInitTimeout(t string) (int, error) { - d, err := resource_dataproc_cluster_time.ParseDuration(t) - if err != nil { - return 0, err - } - return int(d.Seconds()), nil -} - -func resourceDataprocClusterDelete(d *resource_dataproc_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region := d.Get("region").(string) - clusterName := d.Get("name").(string) - - resource_dataproc_cluster_log.Printf("[DEBUG] Deleting Dataproc cluster %s", clusterName) - op, err := config.NewDataprocClient(userAgent).Projects.Regions.Clusters.Delete( - project, region, clusterName).Do() - if err != nil { - return err - } - - waitErr := dataprocClusterOperationWait(config, op, "deleting Dataproc cluster", userAgent, d.Timeout(resource_dataproc_cluster_schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - resource_dataproc_cluster_log.Printf("[INFO] Dataproc cluster %s has been deleted", d.Id()) - d.SetId("") - - return nil -} - -func configOptions(d *resource_dataproc_cluster_schema.ResourceData, option string) (map[string]interface{}, bool) { - if v, ok := d.GetOk(option); ok { - clist := v.([]interface{}) - if len(clist) == 0 { - return nil, false - } - - if clist[0] != nil { - return clist[0].(map[string]interface{}), true - } - } - return nil, false -} - -func dataprocImageVersionDiffSuppress(_, old, new string, _ *resource_dataproc_cluster_schema.ResourceData) bool { - oldV, err := parseDataprocImageVersion(old) - if err != nil { - return false - } - newV, err := parseDataprocImageVersion(new) - if err != nil { - return false - } - - if newV.major != oldV.major { - return false - } - if newV.minor != oldV.minor { - return false - } - - if newV.subminor != "" && newV.subminor != oldV.subminor { - return false - } - - if newV.osName != "" && newV.osName != oldV.osName { - return false - } - return true -} - -type dataprocImageVersion struct { - major string - minor string - subminor string - osName string -} - -func parseDataprocImageVersion(version string) (*dataprocImageVersion, error) { - matches := resolveDataprocImageVersion.FindStringSubmatch(version) - if len(matches) != 5 { - return nil, resource_dataproc_cluster_fmt.Errorf("invalid image version %q", version) - } - - return &dataprocImageVersion{ - major: matches[1], - minor: matches[2], - subminor: matches[3], - osName: matches[4], - }, nil -} - -func resourceDataprocJob() *resource_dataproc_job_schema.Resource { - return &resource_dataproc_job_schema.Resource{ - Create: resourceDataprocJobCreate, - Update: resourceDataprocJobUpdate, - Read: resourceDataprocJobRead, - Delete: resourceDataprocJobDelete, - - Timeouts: &resource_dataproc_job_schema.ResourceTimeout{ - Create: resource_dataproc_job_schema.DefaultTimeout(20 * resource_dataproc_job_time.Minute), - Delete: resource_dataproc_job_schema.DefaultTimeout(20 * resource_dataproc_job_time.Minute), - }, - - Schema: map[string]*resource_dataproc_job_schema.Schema{ - "project": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The project in which the cluster can be found and jobs subsequently run against. If it is not provided, the provider project is used.`, - }, - - "region": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - Default: "global", - ForceNew: true, - Description: `The Cloud Dataproc region. This essentially determines which clusters are available for this job to be submitted to. If not specified, defaults to global.`, - }, - - "force_delete": { - Type: resource_dataproc_job_schema.TypeBool, - Default: false, - Optional: true, - Description: `By default, you can only delete inactive jobs within Dataproc. Setting this to true, and calling destroy, will ensure that the job is first cancelled before issuing the delete.`, - }, - - "reference": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `The reference of the job`, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - "job_id": { - Type: resource_dataproc_job_schema.TypeString, - Description: "The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs", - Optional: true, - ForceNew: true, - Computed: true, - ValidateFunc: validateRegexp("^[a-zA-Z0-9_-]{1,100}$"), - }, - }, - }, - }, - - "placement": { - Type: resource_dataproc_job_schema.TypeList, - Required: true, - MaxItems: 1, - Description: `The config of job placement.`, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - "cluster_name": { - Type: resource_dataproc_job_schema.TypeString, - Description: "The name of the cluster where the job will be submitted", - Required: true, - ForceNew: true, - }, - "cluster_uuid": { - Type: resource_dataproc_job_schema.TypeString, - Computed: true, - Description: "Output-only. A cluster UUID generated by the Cloud Dataproc service when the job is submitted", - }, - }, - }, - }, - - "status": { - Type: resource_dataproc_job_schema.TypeList, - Computed: true, - Description: `The status of the job.`, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - "state": { - Type: resource_dataproc_job_schema.TypeString, - Description: "Output-only. A state message specifying the overall job state", - Computed: true, - }, - "details": { - Type: resource_dataproc_job_schema.TypeString, - Description: "Output-only. Optional job state details, such as an error description if the state is ERROR", - Computed: true, - }, - "state_start_time": { - Type: resource_dataproc_job_schema.TypeString, - Description: "Output-only. The time when this state was entered", - Computed: true, - }, - "substate": { - Type: resource_dataproc_job_schema.TypeString, - Description: "Output-only. Additional state information, which includes status reported by the agent", - Computed: true, - }, - }, - }, - }, - - "driver_output_resource_uri": { - Type: resource_dataproc_job_schema.TypeString, - Description: "Output-only. A URI pointing to the location of the stdout of the job's driver program", - Computed: true, - }, - - "driver_controls_files_uri": { - Type: resource_dataproc_job_schema.TypeString, - Description: "Output-only. If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", - Computed: true, - }, - - "labels": { - Type: resource_dataproc_job_schema.TypeMap, - Description: "Optional. The labels to associate with this job.", - Optional: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "scheduling": { - Type: resource_dataproc_job_schema.TypeList, - Description: "Optional. Job scheduling configuration.", - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - "max_failures_per_hour": { - Type: resource_dataproc_job_schema.TypeInt, - Description: "Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed.", - Required: true, - ForceNew: true, - ValidateFunc: resource_dataproc_job_validation.IntAtMost(10), - }, - "max_failures_total": { - Type: resource_dataproc_job_schema.TypeInt, - Description: "Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed.", - Required: true, - ForceNew: true, - ValidateFunc: resource_dataproc_job_validation.IntAtMost(240), - }, - }, - }, - }, - - "pyspark_config": pySparkSchema, - "spark_config": sparkSchema, - "hadoop_config": hadoopSchema, - "hive_config": hiveSchema, - "pig_config": pigSchema, - "sparksql_config": sparkSqlSchema, - }, - UseJSONNumber: true, - } -} - -func resourceDataprocJobUpdate(d *resource_dataproc_job_schema.ResourceData, meta interface{}) error { - - return resourceDataprocJobRead(d, meta) -} - -func resourceDataprocJobCreate(d *resource_dataproc_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - clusterName := d.Get("placement.0.cluster_name").(string) - region := d.Get("region").(string) - - submitReq := &resource_dataproc_job_dataproc.SubmitJobRequest{ - Job: &resource_dataproc_job_dataproc.Job{ - Placement: &resource_dataproc_job_dataproc.JobPlacement{ - ClusterName: clusterName, - }, - Reference: &resource_dataproc_job_dataproc.JobReference{ - ProjectId: project, - }, - }, - } - - if v, ok := d.GetOk("reference.0.job_id"); ok { - submitReq.Job.Reference.JobId = v.(string) - } - - if v, ok := d.GetOk("scheduling"); ok { - config := extractFirstMapConfig(v.([]interface{})) - submitReq.Job.Scheduling = expandJobScheduling(config) - } - - if _, ok := d.GetOk("labels"); ok { - submitReq.Job.Labels = expandLabels(d) - } - - if v, ok := d.GetOk("pyspark_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) - submitReq.Job.PysparkJob = expandPySparkJob(config) - } - - if v, ok := d.GetOk("spark_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) - submitReq.Job.SparkJob = expandSparkJob(config) - } - - if v, ok := d.GetOk("hadoop_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) - submitReq.Job.HadoopJob = expandHadoopJob(config) - } - - if v, ok := d.GetOk("hive_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) - submitReq.Job.HiveJob = expandHiveJob(config) - } - - if v, ok := d.GetOk("pig_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) - submitReq.Job.PigJob = expandPigJob(config) - } - - if v, ok := d.GetOk("sparksql_config"); ok { - config := extractFirstMapConfig(v.([]interface{})) - submitReq.Job.SparkSqlJob = expandSparkSqlJob(config) - } - - job, err := config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Submit( - project, region, submitReq).Do() - if err != nil { - return err - } - d.SetId(resource_dataproc_job_fmt.Sprintf("projects/%s/regions/%s/jobs/%s", project, region, job.Reference.JobId)) - - waitErr := dataprocJobOperationWait(config, region, project, job.Reference.JobId, - "Creating Dataproc job", userAgent, d.Timeout(resource_dataproc_job_schema.TimeoutCreate)) - if waitErr != nil { - return waitErr - } - - resource_dataproc_job_log.Printf("[INFO] Dataproc job %s has been submitted", job.Reference.JobId) - return resourceDataprocJobRead(d, meta) -} - -func resourceDataprocJobRead(d *resource_dataproc_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - region := d.Get("region").(string) - - project, err := getProject(d, config) - if err != nil { - return err - } - - parts := resource_dataproc_job_strings.Split(d.Id(), "/") - jobId := parts[len(parts)-1] - job, err := config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Get( - project, region, jobId).Do() - if err != nil { - return handleNotFoundError(err, d, resource_dataproc_job_fmt.Sprintf("Dataproc Job %q", jobId)) - } - - if err := d.Set("force_delete", d.Get("force_delete")); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting force_delete: %s", err) - } - if err := d.Set("labels", job.Labels); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("driver_output_resource_uri", job.DriverOutputResourceUri); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting driver_output_resource_uri: %s", err) - } - if err := d.Set("driver_controls_files_uri", job.DriverControlFilesUri); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting driver_controls_files_uri: %s", err) - } - - if err := d.Set("placement", flattenJobPlacement(job.Placement)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting placement: %s", err) - } - if err := d.Set("status", flattenJobStatus(job.Status)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting status: %s", err) - } - if err := d.Set("reference", flattenJobReference(job.Reference)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting reference: %s", err) - } - if err := d.Set("scheduling", flattenJobScheduling(job.Scheduling)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting reference: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting project: %s", err) - } - - if job.PysparkJob != nil { - if err := d.Set("pyspark_config", flattenPySparkJob(job.PysparkJob)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting pyspark_config: %s", err) - } - } - if job.SparkJob != nil { - if err := d.Set("spark_config", flattenSparkJob(job.SparkJob)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting spark_config: %s", err) - } - } - if job.HadoopJob != nil { - if err := d.Set("hadoop_config", flattenHadoopJob(job.HadoopJob)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting hadoop_config: %s", err) - } - } - if job.HiveJob != nil { - if err := d.Set("hive_config", flattenHiveJob(job.HiveJob)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting hive_config: %s", err) - } - } - if job.PigJob != nil { - if err := d.Set("pig_config", flattenPigJob(job.PigJob)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting pig_config: %s", err) - } - } - if job.SparkSqlJob != nil { - if err := d.Set("sparksql_config", flattenSparkSqlJob(job.SparkSqlJob)); err != nil { - return resource_dataproc_job_fmt.Errorf("Error setting sparksql_config: %s", err) - } - } - return nil -} - -func resourceDataprocJobDelete(d *resource_dataproc_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region := d.Get("region").(string) - forceDelete := d.Get("force_delete").(bool) - - parts := resource_dataproc_job_strings.Split(d.Id(), "/") - jobId := parts[len(parts)-1] - if forceDelete { - resource_dataproc_job_log.Printf("[DEBUG] Attempting to first cancel Dataproc job %s if it's still running ...", d.Id()) - - _, _ = config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Cancel(project, region, jobId, &resource_dataproc_job_dataproc.CancelJobRequest{}).Do() - - waitErr := dataprocJobOperationWait(config, region, project, jobId, - "Cancelling Dataproc job", userAgent, d.Timeout(resource_dataproc_job_schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - } - - resource_dataproc_job_log.Printf("[DEBUG] Deleting Dataproc job %s", d.Id()) - _, err = config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Delete( - project, region, jobId).Do() - if err != nil { - return err - } - - waitErr := dataprocDeleteOperationWait(config, region, project, jobId, - "Deleting Dataproc job", userAgent, d.Timeout(resource_dataproc_job_schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - - resource_dataproc_job_log.Printf("[INFO] Dataproc job %s has been deleted", d.Id()) - d.SetId("") - - return nil -} - -var loggingConfig = &resource_dataproc_job_schema.Schema{ - Type: resource_dataproc_job_schema.TypeList, - Description: "The runtime logging config of the job", - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_job_schema.TypeMap, - Description: "Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'.", - Required: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - }, - }, -} - -var pySparkSchema = &resource_dataproc_job_schema.Schema{ - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `The config of pySpark job.`, - ExactlyOneOf: []string{"pyspark_config", "spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - "main_python_file_uri": { - Type: resource_dataproc_job_schema.TypeString, - Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file", - Required: true, - ForceNew: true, - }, - - "args": { - Type: resource_dataproc_job_schema.TypeList, - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission", - Optional: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "python_file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip", - Optional: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks", - Optional: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Description: "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks", - Optional: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "archive_uris": { - Type: resource_dataproc_job_schema.TypeList, - Description: "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip", - Optional: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_job_schema.TypeMap, - Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code", - Optional: true, - ForceNew: true, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "logging_config": loggingConfig, - }, - }, -} - -func flattenPySparkJob(job *resource_dataproc_job_dataproc.PySparkJob) []map[string]interface{} { - return []map[string]interface{}{ - { - "main_python_file_uri": job.MainPythonFileUri, - "args": job.Args, - "python_file_uris": job.PythonFileUris, - "jar_file_uris": job.JarFileUris, - "file_uris": job.FileUris, - "archive_uris": job.ArchiveUris, - "properties": job.Properties, - "logging_config": flattenLoggingConfig(job.LoggingConfig), - }, - } -} - -func expandPySparkJob(config map[string]interface{}) *resource_dataproc_job_dataproc.PySparkJob { - job := &resource_dataproc_job_dataproc.PySparkJob{} - if v, ok := config["main_python_file_uri"]; ok { - job.MainPythonFileUri = v.(string) - } - if v, ok := config["args"]; ok { - job.Args = convertStringArr(v.([]interface{})) - } - if v, ok := config["python_file_uris"]; ok { - job.PythonFileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["file_uris"]; ok { - job.FileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["archive_uris"]; ok { - job.ArchiveUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) - } - if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) - job.LoggingConfig = expandLoggingConfig(config) - } - - return job - -} - -func expandJobScheduling(config map[string]interface{}) *resource_dataproc_job_dataproc.JobScheduling { - jobScheduling := &resource_dataproc_job_dataproc.JobScheduling{} - if v, ok := config["max_failures_per_hour"]; ok { - jobScheduling.MaxFailuresPerHour = int64(v.(int)) - } - if v, ok := config["max_failures_total"]; ok { - jobScheduling.MaxFailuresTotal = int64(v.(int)) - } - return jobScheduling -} - -var sparkSchema = &resource_dataproc_job_schema.Schema{ - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `The config of the Spark job.`, - ExactlyOneOf: []string{"pyspark_config", "spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - - "main_class": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The class containing the main method of the driver. Must be in a provided jar or jar that is already on the classpath. Conflicts with main_jar_file_uri`, - ExactlyOneOf: []string{"spark_config.0.main_class", "spark_config.0.main_jar_file_uri"}, - }, - - "main_jar_file_uri": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The HCFS URI of jar file containing the driver jar. Conflicts with main_class`, - ExactlyOneOf: []string{"spark_config.0.main_jar_file_uri", "spark_config.0.main_class"}, - }, - - "args": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The arguments to pass to the driver.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "archive_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "logging_config": loggingConfig, - }, - }, -} - -func flattenSparkJob(job *resource_dataproc_job_dataproc.SparkJob) []map[string]interface{} { - return []map[string]interface{}{ - { - "main_class": job.MainClass, - "main_jar_file_uri": job.MainJarFileUri, - "args": job.Args, - "jar_file_uris": job.JarFileUris, - "file_uris": job.FileUris, - "archive_uris": job.ArchiveUris, - "properties": job.Properties, - "logging_config": flattenLoggingConfig(job.LoggingConfig), - }, - } -} - -func expandSparkJob(config map[string]interface{}) *resource_dataproc_job_dataproc.SparkJob { - job := &resource_dataproc_job_dataproc.SparkJob{} - if v, ok := config["main_class"]; ok { - job.MainClass = v.(string) - } - if v, ok := config["main_jar_file_uri"]; ok { - job.MainJarFileUri = v.(string) - } - - if v, ok := config["args"]; ok { - job.Args = convertStringArr(v.([]interface{})) - } - if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["file_uris"]; ok { - job.FileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["archive_uris"]; ok { - job.ArchiveUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) - } - if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) - job.LoggingConfig = expandLoggingConfig(config) - } - - return job - -} - -var hadoopSchema = &resource_dataproc_job_schema.Schema{ - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `The config of Hadoop job`, - ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - - "main_class": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The class containing the main method of the driver. Must be in a provided jar or jar that is already on the classpath. Conflicts with main_jar_file_uri`, - ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, - }, - - "main_jar_file_uri": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The HCFS URI of jar file containing the driver jar. Conflicts with main_class`, - ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, - }, - - "args": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The arguments to pass to the driver.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "archive_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "logging_config": loggingConfig, - }, - }, -} - -func flattenHadoopJob(job *resource_dataproc_job_dataproc.HadoopJob) []map[string]interface{} { - return []map[string]interface{}{ - { - "main_class": job.MainClass, - "main_jar_file_uri": job.MainJarFileUri, - "args": job.Args, - "jar_file_uris": job.JarFileUris, - "file_uris": job.FileUris, - "archive_uris": job.ArchiveUris, - "properties": job.Properties, - "logging_config": flattenLoggingConfig(job.LoggingConfig), - }, - } -} - -func expandHadoopJob(config map[string]interface{}) *resource_dataproc_job_dataproc.HadoopJob { - job := &resource_dataproc_job_dataproc.HadoopJob{} - if v, ok := config["main_class"]; ok { - job.MainClass = v.(string) - } - if v, ok := config["main_jar_file_uri"]; ok { - job.MainJarFileUri = v.(string) - } - - if v, ok := config["args"]; ok { - job.Args = convertStringArr(v.([]interface{})) - } - if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["file_uris"]; ok { - job.FileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["archive_uris"]; ok { - job.ArchiveUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) - } - if v, ok := config["logging_config"]; ok { - config := extractFirstMapConfig(v.([]interface{})) - job.LoggingConfig = expandLoggingConfig(config) - } - - return job - -} - -var hiveSchema = &resource_dataproc_job_schema.Schema{ - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `The config of hive job`, - ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - - "query_list": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The list of Hive queries or statements to execute as part of the job. Conflicts with query_file_uri`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, - }, - - "query_file_uri": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `HCFS URI of file containing Hive script to execute as the job. Conflicts with query_list`, - ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, - }, - - "continue_on_failure": { - Type: resource_dataproc_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.`, - }, - - "script_variables": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - }, - }, -} - -func flattenHiveJob(job *resource_dataproc_job_dataproc.HiveJob) []map[string]interface{} { - queries := []string{} - if job.QueryList != nil { - queries = job.QueryList.Queries - } - return []map[string]interface{}{ - { - "query_list": queries, - "query_file_uri": job.QueryFileUri, - "continue_on_failure": job.ContinueOnFailure, - "script_variables": job.ScriptVariables, - "properties": job.Properties, - "jar_file_uris": job.JarFileUris, - }, - } -} - -func expandHiveJob(config map[string]interface{}) *resource_dataproc_job_dataproc.HiveJob { - job := &resource_dataproc_job_dataproc.HiveJob{} - if v, ok := config["query_file_uri"]; ok { - job.QueryFileUri = v.(string) - } - if v, ok := config["query_list"]; ok { - job.QueryList = &resource_dataproc_job_dataproc.QueryList{ - Queries: convertStringArr(v.([]interface{})), - } - } - if v, ok := config["continue_on_failure"]; ok { - job.ContinueOnFailure = v.(bool) - } - if v, ok := config["script_variables"]; ok { - job.ScriptVariables = convertStringMap(v.(map[string]interface{})) - } - if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) - } - - return job -} - -var pigSchema = &resource_dataproc_job_schema.Schema{ - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `The config of pag job.`, - ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - - "query_list": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The list of Hive queries or statements to execute as part of the job. Conflicts with query_file_uri`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, - }, - - "query_file_uri": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `HCFS URI of file containing Hive script to execute as the job. Conflicts with query_list`, - ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, - }, - - "continue_on_failure": { - Type: resource_dataproc_job_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.`, - }, - - "script_variables": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Mapping of query variable names to values (equivalent to the Pig command: name=[value]).`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "logging_config": loggingConfig, - }, - }, -} - -func flattenPigJob(job *resource_dataproc_job_dataproc.PigJob) []map[string]interface{} { - queries := []string{} - if job.QueryList != nil { - queries = job.QueryList.Queries - } - return []map[string]interface{}{ - { - "query_list": queries, - "query_file_uri": job.QueryFileUri, - "continue_on_failure": job.ContinueOnFailure, - "script_variables": job.ScriptVariables, - "properties": job.Properties, - "jar_file_uris": job.JarFileUris, - }, - } -} - -func expandPigJob(config map[string]interface{}) *resource_dataproc_job_dataproc.PigJob { - job := &resource_dataproc_job_dataproc.PigJob{} - if v, ok := config["query_file_uri"]; ok { - job.QueryFileUri = v.(string) - } - if v, ok := config["query_list"]; ok { - job.QueryList = &resource_dataproc_job_dataproc.QueryList{ - Queries: convertStringArr(v.([]interface{})), - } - } - if v, ok := config["continue_on_failure"]; ok { - job.ContinueOnFailure = v.(bool) - } - if v, ok := config["script_variables"]; ok { - job.ScriptVariables = convertStringMap(v.(map[string]interface{})) - } - if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) - } - - return job - -} - -var sparkSqlSchema = &resource_dataproc_job_schema.Schema{ - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Description: `The config of SparkSql job`, - ExactlyOneOf: []string{"spark_config", "pyspark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config"}, - Elem: &resource_dataproc_job_schema.Resource{ - Schema: map[string]*resource_dataproc_job_schema.Schema{ - - "query_list": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The list of SQL queries or statements to execute as part of the job. Conflicts with query_file_uri`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, - }, - - "query_file_uri": { - Type: resource_dataproc_job_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The HCFS URI of the script that contains SQL queries. Conflicts with query_list`, - ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, - }, - - "script_variables": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_job_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_job_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `HCFS URIs of jar files to be added to the Spark CLASSPATH.`, - Elem: &resource_dataproc_job_schema.Schema{Type: resource_dataproc_job_schema.TypeString}, - }, - - "logging_config": loggingConfig, - }, - }, -} - -func flattenSparkSqlJob(job *resource_dataproc_job_dataproc.SparkSqlJob) []map[string]interface{} { - queries := []string{} - if job.QueryList != nil { - queries = job.QueryList.Queries - } - return []map[string]interface{}{ - { - "query_list": queries, - "query_file_uri": job.QueryFileUri, - "script_variables": job.ScriptVariables, - "properties": job.Properties, - "jar_file_uris": job.JarFileUris, - }, - } -} - -func expandSparkSqlJob(config map[string]interface{}) *resource_dataproc_job_dataproc.SparkSqlJob { - job := &resource_dataproc_job_dataproc.SparkSqlJob{} - if v, ok := config["query_file_uri"]; ok { - job.QueryFileUri = v.(string) - } - if v, ok := config["query_list"]; ok { - job.QueryList = &resource_dataproc_job_dataproc.QueryList{ - Queries: convertStringArr(v.([]interface{})), - } - } - if v, ok := config["script_variables"]; ok { - job.ScriptVariables = convertStringMap(v.(map[string]interface{})) - } - if v, ok := config["jar_file_uris"]; ok { - job.JarFileUris = convertStringArr(v.([]interface{})) - } - if v, ok := config["properties"]; ok { - job.Properties = convertStringMap(v.(map[string]interface{})) - } - - return job - -} - -func expandLoggingConfig(config map[string]interface{}) *resource_dataproc_job_dataproc.LoggingConfig { - conf := &resource_dataproc_job_dataproc.LoggingConfig{} - if v, ok := config["driver_log_levels"]; ok { - conf.DriverLogLevels = convertStringMap(v.(map[string]interface{})) - } - return conf -} - -func flattenLoggingConfig(l *resource_dataproc_job_dataproc.LoggingConfig) []map[string]interface{} { - return []map[string]interface{}{ - { - "driver_log_levels": l.DriverLogLevels, - }, - } -} - -func flattenJobReference(r *resource_dataproc_job_dataproc.JobReference) []map[string]interface{} { - return []map[string]interface{}{ - { - "job_id": r.JobId, - }, - } -} - -func flattenJobScheduling(r *resource_dataproc_job_dataproc.JobScheduling) []map[string]interface{} { - jobScheduling := []map[string]interface{}{} - - if r != nil { - jobScheduling = append(jobScheduling, - map[string]interface{}{ - "max_failures_per_hour": r.MaxFailuresPerHour, - "max_failures_total": r.MaxFailuresTotal, - }) - } - return jobScheduling -} - -func flattenJobStatus(s *resource_dataproc_job_dataproc.JobStatus) []map[string]interface{} { - return []map[string]interface{}{ - { - "state": s.State, - "details": s.Details, - "state_start_time": s.StateStartTime, - "substate": s.Substate, - }, - } -} - -func flattenJobPlacement(jp *resource_dataproc_job_dataproc.JobPlacement) []map[string]interface{} { - return []map[string]interface{}{ - { - "cluster_name": jp.ClusterName, - "cluster_uuid": jp.ClusterUuid, - }, - } -} - -func resourceDataprocWorkflowTemplate() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Create: resourceDataprocWorkflowTemplateCreate, - Read: resourceDataprocWorkflowTemplateRead, - Delete: resourceDataprocWorkflowTemplateDelete, - - Importer: &resource_dataproc_workflow_template_schema.ResourceImporter{ - State: resourceDataprocWorkflowTemplateImport, - }, - - Timeouts: &resource_dataproc_workflow_template_schema.ResourceTimeout{ - Create: resource_dataproc_workflow_template_schema.DefaultTimeout(10 * resource_dataproc_workflow_template_time.Minute), - Delete: resource_dataproc_workflow_template_schema.DefaultTimeout(10 * resource_dataproc_workflow_template_time.Minute), - }, - - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "jobs": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. The Directed Acyclic Graph of Jobs to submit.", - Elem: DataprocWorkflowTemplateJobsSchema(), - }, - - "location": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The location for the resource", - }, - - "name": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` * For `projects.locations.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`", - }, - - "placement": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. WorkflowTemplate scheduling information.", - MaxItems: 1, - Elem: DataprocWorkflowTemplatePlacementSchema(), - }, - - "dag_timeout": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.", - }, - - "labels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "parameters": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", - Elem: DataprocWorkflowTemplateParametersSchema(), - }, - - "project": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - - "version": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Output only. The current version of this workflow template.", - Deprecated: "version is not useful as a configurable field, and will be removed in the future.", - }, - - "create_time": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Description: "Output only. The time template was created.", - }, - - "update_time": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Description: "Output only. The time template was last updated.", - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "step_id": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", - }, - - "hadoop_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a Hadoop job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsHadoopJobSchema(), - }, - - "hive_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a Hive job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsHiveJobSchema(), - }, - - "labels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "pig_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a Pig job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPigJobSchema(), - }, - - "prerequisite_step_ids": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "presto_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a Presto job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPrestoJobSchema(), - }, - - "pyspark_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a PySpark job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPysparkJobSchema(), - }, - - "scheduling": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job scheduling configuration.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSchedulingSchema(), - }, - - "spark_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a Spark job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSparkJobSchema(), - }, - - "spark_r_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a SparkR job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSparkRJobSchema(), - }, - - "spark_sql_job": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Job is a SparkSql job.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSparkSqlJobSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplateJobsHadoopJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "archive_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "args": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "logging_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The runtime log config for job execution.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema(), - }, - - "main_class": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.", - }, - - "main_jar_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsHiveJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "continue_on_failure": { - Type: resource_dataproc_workflow_template_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", - }, - - "jar_file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "query_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The HCFS URI of the script that contains Hive queries.", - }, - - "query_list": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "A list of queries.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsHiveJobQueryListSchema(), - }, - - "script_variables": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsHiveJobQueryListSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "queries": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPigJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "continue_on_failure": { - Type: resource_dataproc_workflow_template_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", - }, - - "jar_file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "logging_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The runtime log config for job execution.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema(), - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "query_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The HCFS URI of the script that contains the Pig queries.", - }, - - "query_list": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "A list of queries.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPigJobQueryListSchema(), - }, - - "script_variables": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPigJobLoggingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPigJobQueryListSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "queries": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPrestoJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "client_tags": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Presto client tags to attach to this query", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "continue_on_failure": { - Type: resource_dataproc_workflow_template_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", - }, - - "logging_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The runtime log config for job execution.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema(), - }, - - "output_format": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "query_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The HCFS URI of the script that contains SQL queries.", - }, - - "query_list": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "A list of queries.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPrestoJobQueryListSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPrestoJobQueryListSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "queries": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPysparkJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "main_python_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", - }, - - "archive_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "args": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "logging_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The runtime log config for job execution.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema(), - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "python_file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSchedulingSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "max_failures_per_hour": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.", - }, - - "max_failures_total": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: "Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240.", - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSparkJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "archive_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "args": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "jar_file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "logging_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The runtime log config for job execution.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema(), - }, - - "main_class": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`.", - }, - - "main_jar_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The HCFS URI of the jar file that contains the main class.", - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSparkRJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "main_r_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", - }, - - "archive_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "args": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "logging_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The runtime log config for job execution.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema(), - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSparkSqlJobSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "jar_file_uris": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "logging_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The runtime log config for job execution.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema(), - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "query_file_uri": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The HCFS URI of the script that contains SQL queries.", - }, - - "query_list": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "A list of queries.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema(), - }, - - "script_variables": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "driver_log_levels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "queries": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplatePlacementSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "cluster_selector": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.", - MaxItems: 1, - Elem: DataprocWorkflowTemplatePlacementClusterSelectorSchema(), - }, - - "managed_cluster": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "A cluster that is managed by the workflow.", - MaxItems: 1, - Elem: DataprocWorkflowTemplatePlacementManagedClusterSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplatePlacementClusterSelectorSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "cluster_labels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Required: true, - ForceNew: true, - Description: "Required. The cluster labels. Cluster must have all labels to match.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "zone": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.", - }, - }, - } -} - -func DataprocWorkflowTemplatePlacementManagedClusterSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "cluster_name": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", - }, - - "config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. The cluster configuration.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigSchema(), - }, - - "labels": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateParametersSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "fields": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "name": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.", - }, - - "description": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. Brief description of the parameter. Must not exceed 1024 characters.", - }, - - "validation": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Validation rules to be applied to this parameter's value.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateParametersValidationSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplateParametersValidationSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "regex": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Validation based on regular expressions.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateParametersValidationRegexSchema(), - }, - - "values": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Validation based on a list of allowed values.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateParametersValidationValuesSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplateParametersValidationRegexSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "regexes": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateParametersValidationValuesSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "values": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Required: true, - ForceNew: true, - Description: "Required. List of allowed values for the parameter.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateClusterInstanceGroupConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "accelerators": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. The Compute Engine accelerator configuration for these instances.", - Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsSchema(), - }, - - "disk_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. Disk option config settings.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfigSchema(), - }, - - "image": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", - }, - - "machine_type": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", - }, - - "min_cpu_platform": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", - }, - - "num_instances": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", - }, - - "preemptibility": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", - }, - - "instance_names": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Computed: true, - Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "is_preemptible": { - Type: resource_dataproc_workflow_template_schema.TypeBool, - Computed: true, - Description: "Output only. Specifies that this instance group contains preemptible instances.", - }, - - "managed_group_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Computed: true, - Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", - Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfigSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "accelerator_count": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: "The number of the accelerator cards of this type exposed to this instance.", - }, - - "accelerator_type": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "boot_disk_size_gb": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: "Optional. Size in GB of the boot disk (default is 500GB).", - }, - - "boot_disk_type": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", - }, - - "num_local_ssds": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "instance_group_manager_name": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Description: "Output only. The name of the Instance Group Manager for this group.", - }, - - "instance_template_name": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "autoscaling_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigAutoscalingConfigSchema(), - }, - - "encryption_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Encryption settings for the cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigEncryptionConfigSchema(), - }, - - "endpoint_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Port/endpoint configuration for this cluster", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigEndpointConfigSchema(), - }, - - "gce_cluster_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigSchema(), - }, - - "initialization_actions": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", - Elem: DataprocWorkflowTemplateClusterClusterConfigInitializationActionsSchema(), - }, - - "lifecycle_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Lifecycle setting for the cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigLifecycleConfigSchema(), - }, - - "master_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigSchema(), - }, - - "secondary_worker_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigSchema(), - }, - - "security_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Security settings for the cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigSecurityConfigSchema(), - }, - - "software_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The config settings for software inside the cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigSoftwareConfigSchema(), - }, - - "staging_bucket": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", - }, - - "temp_bucket": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", - }, - - "worker_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterInstanceGroupConfigSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigAutoscalingConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "policy": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigEncryptionConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "gce_pd_kms_key_name": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigEndpointConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "enable_http_port_access": { - Type: resource_dataproc_workflow_template_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", - }, - - "http_ports": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Computed: true, - Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "internal_ip_only": { - Type: resource_dataproc_workflow_template_schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", - }, - - "metadata": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "network": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", - }, - - "node_group_affinity": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Node Group Affinity for sole-tenant clusters.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinitySchema(), - }, - - "private_ipv6_google_access": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", - }, - - "reservation_affinity": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Reservation Affinity for consuming Zonal reservation.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinitySchema(), - }, - - "service_account": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", - }, - - "service_account_scopes": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "subnetwork": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", - }, - - "tags": { - Type: resource_dataproc_workflow_template_schema.TypeSet, - Optional: true, - ForceNew: true, - Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - Set: resource_dataproc_workflow_template_schema.HashString, - }, - - "zone": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinitySchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "node_group": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinitySchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "consume_reservation_type": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", - }, - - "key": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. Corresponds to the label key of reservation resource.", - }, - - "values": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Corresponds to the label values of reservation resource.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigInitializationActionsSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "executable_file": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Required. Cloud Storage URI of executable file.", - }, - - "execution_timeout": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigLifecycleConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "auto_delete_time": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - }, - - "auto_delete_ttl": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - }, - - "idle_delete_ttl": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - }, - - "idle_start_time": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Computed: true, - Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigSecurityConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "kerberos_config": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. Kerberos related configuration.", - MaxItems: 1, - Elem: DataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfigSchema(), - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "cross_realm_trust_admin_server": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - }, - - "cross_realm_trust_kdc": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", - }, - - "cross_realm_trust_realm": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", - }, - - "cross_realm_trust_shared_password": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", - }, - - "enable_kerberos": { - Type: resource_dataproc_workflow_template_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", - }, - - "kdc_db_key": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", - }, - - "key_password": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", - }, - - "keystore": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", - }, - - "keystore_password": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", - }, - - "kms_key": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", - }, - - "realm": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", - }, - - "root_principal_password": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", - }, - - "tgt_lifetime_hours": { - Type: resource_dataproc_workflow_template_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", - }, - - "truststore": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", - }, - - "truststore_password": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", - }, - }, - } -} - -func DataprocWorkflowTemplateClusterClusterConfigSoftwareConfigSchema() *resource_dataproc_workflow_template_schema.Resource { - return &resource_dataproc_workflow_template_schema.Resource{ - Schema: map[string]*resource_dataproc_workflow_template_schema.Schema{ - "image_version": { - Type: resource_dataproc_workflow_template_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", - }, - - "optional_components": { - Type: resource_dataproc_workflow_template_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "Optional. The set of components to activate on the cluster.", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - - "properties": { - Type: resource_dataproc_workflow_template_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", - Elem: &resource_dataproc_workflow_template_schema.Schema{Type: resource_dataproc_workflow_template_schema.TypeString}, - }, - }, - } -} - -func resourceDataprocWorkflowTemplateCreate(d *resource_dataproc_workflow_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplate{ - Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), - Location: resource_dataproc_workflow_template_dcldcl.String(d.Get("location").(string)), - Name: resource_dataproc_workflow_template_dcldcl.String(d.Get("name").(string)), - Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), - DagTimeout: resource_dataproc_workflow_template_dcldcl.String(d.Get("dag_timeout").(string)), - Labels: checkStringMap(d.Get("labels")), - Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), - Project: resource_dataproc_workflow_template_dcldcl.String(project), - Version: resource_dataproc_workflow_template_dcldcl.Int64OrNil(int64(d.Get("version").(int))), - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") - if err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(resource_dataproc_workflow_template_schema.TimeoutCreate)) - res, err := client.ApplyWorkflowTemplate(resource_dataproc_workflow_template_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_dataproc_workflow_template_dcldcl.DiffAfterApplyError); ok { - resource_dataproc_workflow_template_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_dataproc_workflow_template_fmt.Errorf("Error creating WorkflowTemplate: %s", err) - } - - resource_dataproc_workflow_template_log.Printf("[DEBUG] Finished creating WorkflowTemplate %q: %#v", d.Id(), res) - - return resourceDataprocWorkflowTemplateRead(d, meta) -} - -func resourceDataprocWorkflowTemplateRead(d *resource_dataproc_workflow_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplate{ - Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), - Location: resource_dataproc_workflow_template_dcldcl.String(d.Get("location").(string)), - Name: resource_dataproc_workflow_template_dcldcl.String(d.Get("name").(string)), - Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), - DagTimeout: resource_dataproc_workflow_template_dcldcl.String(d.Get("dag_timeout").(string)), - Labels: checkStringMap(d.Get("labels")), - Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), - Project: resource_dataproc_workflow_template_dcldcl.String(project), - Version: resource_dataproc_workflow_template_dcldcl.Int64OrNil(int64(d.Get("version").(int))), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(resource_dataproc_workflow_template_schema.TimeoutRead)) - res, err := client.GetWorkflowTemplate(resource_dataproc_workflow_template_context.Background(), obj) - if err != nil { - resourceName := resource_dataproc_workflow_template_fmt.Sprintf("DataprocWorkflowTemplate %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("jobs", flattenDataprocWorkflowTemplateJobsArray(res.Jobs)); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting jobs in state: %s", err) - } - if err = d.Set("location", res.Location); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting location in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("placement", flattenDataprocWorkflowTemplatePlacement(res.Placement)); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting placement in state: %s", err) - } - if err = d.Set("dag_timeout", res.DagTimeout); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting dag_timeout in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("parameters", flattenDataprocWorkflowTemplateParametersArray(res.Parameters)); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting parameters in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("version", res.Version); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting version in state: %s", err) - } - if err = d.Set("create_time", res.CreateTime); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting create_time in state: %s", err) - } - if err = d.Set("update_time", res.UpdateTime); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("error setting update_time in state: %s", err) - } - - return nil -} - -func resourceDataprocWorkflowTemplateDelete(d *resource_dataproc_workflow_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplate{ - Jobs: expandDataprocWorkflowTemplateJobsArray(d.Get("jobs")), - Location: resource_dataproc_workflow_template_dcldcl.String(d.Get("location").(string)), - Name: resource_dataproc_workflow_template_dcldcl.String(d.Get("name").(string)), - Placement: expandDataprocWorkflowTemplatePlacement(d.Get("placement")), - DagTimeout: resource_dataproc_workflow_template_dcldcl.String(d.Get("dag_timeout").(string)), - Labels: checkStringMap(d.Get("labels")), - Parameters: expandDataprocWorkflowTemplateParametersArray(d.Get("parameters")), - Project: resource_dataproc_workflow_template_dcldcl.String(project), - Version: resource_dataproc_workflow_template_dcldcl.Int64OrNil(int64(d.Get("version").(int))), - } - - resource_dataproc_workflow_template_log.Printf("[DEBUG] Deleting WorkflowTemplate %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLDataprocClient(config, userAgent, billingProject, d.Timeout(resource_dataproc_workflow_template_schema.TimeoutDelete)) - if err := client.DeleteWorkflowTemplate(resource_dataproc_workflow_template_context.Background(), obj); err != nil { - return resource_dataproc_workflow_template_fmt.Errorf("Error deleting WorkflowTemplate: %s", err) - } - - resource_dataproc_workflow_template_log.Printf("[DEBUG] Finished deleting WorkflowTemplate %q", d.Id()) - return nil -} - -func resourceDataprocWorkflowTemplateImport(d *resource_dataproc_workflow_template_schema.ResourceData, meta interface{}) ([]*resource_dataproc_workflow_template_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/workflowTemplates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/workflowTemplates/{{name}}") - if err != nil { - return nil, resource_dataproc_workflow_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dataproc_workflow_template_schema.ResourceData{d}, nil -} - -func expandDataprocWorkflowTemplateJobsArray(o interface{}) []resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs { - if o == nil { - return make([]resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs, 0) - } - - items := make([]resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs, 0, len(objs)) - for _, item := range objs { - i := expandDataprocWorkflowTemplateJobs(item) - items = append(items, *i) - } - - return items -} - -func expandDataprocWorkflowTemplateJobs(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobs - } - - obj := o.(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs{ - StepId: resource_dataproc_workflow_template_dcldcl.String(obj["step_id"].(string)), - HadoopJob: expandDataprocWorkflowTemplateJobsHadoopJob(obj["hadoop_job"]), - HiveJob: expandDataprocWorkflowTemplateJobsHiveJob(obj["hive_job"]), - Labels: checkStringMap(obj["labels"]), - PigJob: expandDataprocWorkflowTemplateJobsPigJob(obj["pig_job"]), - PrerequisiteStepIds: expandStringArray(obj["prerequisite_step_ids"]), - PrestoJob: expandDataprocWorkflowTemplateJobsPrestoJob(obj["presto_job"]), - PysparkJob: expandDataprocWorkflowTemplateJobsPysparkJob(obj["pyspark_job"]), - Scheduling: expandDataprocWorkflowTemplateJobsScheduling(obj["scheduling"]), - SparkJob: expandDataprocWorkflowTemplateJobsSparkJob(obj["spark_job"]), - SparkRJob: expandDataprocWorkflowTemplateJobsSparkRJob(obj["spark_r_job"]), - SparkSqlJob: expandDataprocWorkflowTemplateJobsSparkSqlJob(obj["spark_sql_job"]), - } -} - -func flattenDataprocWorkflowTemplateJobsArray(objs []resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenDataprocWorkflowTemplateJobs(&item) - items = append(items, i) - } - - return items -} - -func flattenDataprocWorkflowTemplateJobs(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobs) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "step_id": obj.StepId, - "hadoop_job": flattenDataprocWorkflowTemplateJobsHadoopJob(obj.HadoopJob), - "hive_job": flattenDataprocWorkflowTemplateJobsHiveJob(obj.HiveJob), - "labels": obj.Labels, - "pig_job": flattenDataprocWorkflowTemplateJobsPigJob(obj.PigJob), - "prerequisite_step_ids": obj.PrerequisiteStepIds, - "presto_job": flattenDataprocWorkflowTemplateJobsPrestoJob(obj.PrestoJob), - "pyspark_job": flattenDataprocWorkflowTemplateJobsPysparkJob(obj.PysparkJob), - "scheduling": flattenDataprocWorkflowTemplateJobsScheduling(obj.Scheduling), - "spark_job": flattenDataprocWorkflowTemplateJobsSparkJob(obj.SparkJob), - "spark_r_job": flattenDataprocWorkflowTemplateJobsSparkRJob(obj.SparkRJob), - "spark_sql_job": flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj.SparkSqlJob), - } - - return transformed - -} - -func expandDataprocWorkflowTemplateJobsHadoopJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHadoopJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHadoopJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHadoopJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHadoopJob{ - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), - JarFileUris: expandStringArray(obj["jar_file_uris"]), - LoggingConfig: expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj["logging_config"]), - MainClass: resource_dataproc_workflow_template_dcldcl.String(obj["main_class"].(string)), - MainJarFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["main_jar_file_uri"].(string)), - Properties: checkStringMap(obj["properties"]), - } -} - -func flattenDataprocWorkflowTemplateJobsHadoopJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHadoopJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "archive_uris": obj.ArchiveUris, - "args": obj.Args, - "file_uris": obj.FileUris, - "jar_file_uris": obj.JarFileUris, - "logging_config": flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj.LoggingConfig), - "main_class": obj.MainClass, - "main_jar_file_uri": obj.MainJarFileUri, - "properties": obj.Properties, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHadoopJobLoggingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHadoopJobLoggingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHadoopJobLoggingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHadoopJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), - } -} - -func flattenDataprocWorkflowTemplateJobsHadoopJobLoggingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHadoopJobLoggingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "driver_log_levels": obj.DriverLogLevels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsHiveJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHiveJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHiveJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHiveJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHiveJob{ - ContinueOnFailure: resource_dataproc_workflow_template_dcldcl.Bool(obj["continue_on_failure"].(bool)), - JarFileUris: expandStringArray(obj["jar_file_uris"]), - Properties: checkStringMap(obj["properties"]), - QueryFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["query_file_uri"].(string)), - QueryList: expandDataprocWorkflowTemplateJobsHiveJobQueryList(obj["query_list"]), - ScriptVariables: checkStringMap(obj["script_variables"]), - } -} - -func flattenDataprocWorkflowTemplateJobsHiveJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHiveJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "continue_on_failure": obj.ContinueOnFailure, - "jar_file_uris": obj.JarFileUris, - "properties": obj.Properties, - "query_file_uri": obj.QueryFileUri, - "query_list": flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj.QueryList), - "script_variables": obj.ScriptVariables, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsHiveJobQueryList(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHiveJobQueryList { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHiveJobQueryList - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsHiveJobQueryList - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHiveJobQueryList{ - Queries: expandStringArray(obj["queries"]), - } -} - -func flattenDataprocWorkflowTemplateJobsHiveJobQueryList(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsHiveJobQueryList) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "queries": obj.Queries, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPigJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPigJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPigJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJob{ - ContinueOnFailure: resource_dataproc_workflow_template_dcldcl.Bool(obj["continue_on_failure"].(bool)), - JarFileUris: expandStringArray(obj["jar_file_uris"]), - LoggingConfig: expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), - QueryFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["query_file_uri"].(string)), - QueryList: expandDataprocWorkflowTemplateJobsPigJobQueryList(obj["query_list"]), - ScriptVariables: checkStringMap(obj["script_variables"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPigJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "continue_on_failure": obj.ContinueOnFailure, - "jar_file_uris": obj.JarFileUris, - "logging_config": flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj.LoggingConfig), - "properties": obj.Properties, - "query_file_uri": obj.QueryFileUri, - "query_list": flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj.QueryList), - "script_variables": obj.ScriptVariables, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPigJobLoggingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJobLoggingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPigJobLoggingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPigJobLoggingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPigJobLoggingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJobLoggingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "driver_log_levels": obj.DriverLogLevels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPigJobQueryList(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJobQueryList { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPigJobQueryList - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPigJobQueryList - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJobQueryList{ - Queries: expandStringArray(obj["queries"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPigJobQueryList(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPigJobQueryList) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "queries": obj.Queries, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPrestoJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPrestoJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPrestoJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJob{ - ClientTags: expandStringArray(obj["client_tags"]), - ContinueOnFailure: resource_dataproc_workflow_template_dcldcl.Bool(obj["continue_on_failure"].(bool)), - LoggingConfig: expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj["logging_config"]), - OutputFormat: resource_dataproc_workflow_template_dcldcl.String(obj["output_format"].(string)), - Properties: checkStringMap(obj["properties"]), - QueryFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["query_file_uri"].(string)), - QueryList: expandDataprocWorkflowTemplateJobsPrestoJobQueryList(obj["query_list"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPrestoJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "client_tags": obj.ClientTags, - "continue_on_failure": obj.ContinueOnFailure, - "logging_config": flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj.LoggingConfig), - "output_format": obj.OutputFormat, - "properties": obj.Properties, - "query_file_uri": obj.QueryFileUri, - "query_list": flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj.QueryList), - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJobLoggingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPrestoJobLoggingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPrestoJobLoggingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPrestoJobLoggingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJobLoggingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "driver_log_levels": obj.DriverLogLevels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPrestoJobQueryList(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJobQueryList { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPrestoJobQueryList - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPrestoJobQueryList - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJobQueryList{ - Queries: expandStringArray(obj["queries"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPrestoJobQueryList(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPrestoJobQueryList) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "queries": obj.Queries, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPysparkJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPysparkJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPysparkJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPysparkJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPysparkJob{ - MainPythonFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["main_python_file_uri"].(string)), - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), - JarFileUris: expandStringArray(obj["jar_file_uris"]), - LoggingConfig: expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), - PythonFileUris: expandStringArray(obj["python_file_uris"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPysparkJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPysparkJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "main_python_file_uri": obj.MainPythonFileUri, - "archive_uris": obj.ArchiveUris, - "args": obj.Args, - "file_uris": obj.FileUris, - "jar_file_uris": obj.JarFileUris, - "logging_config": flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj.LoggingConfig), - "properties": obj.Properties, - "python_file_uris": obj.PythonFileUris, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPysparkJobLoggingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPysparkJobLoggingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsPysparkJobLoggingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPysparkJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), - } -} - -func flattenDataprocWorkflowTemplateJobsPysparkJobLoggingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsPysparkJobLoggingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "driver_log_levels": obj.DriverLogLevels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsScheduling(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsScheduling { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsScheduling - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsScheduling - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsScheduling{ - MaxFailuresPerHour: resource_dataproc_workflow_template_dcldcl.Int64(int64(obj["max_failures_per_hour"].(int))), - MaxFailuresTotal: resource_dataproc_workflow_template_dcldcl.Int64(int64(obj["max_failures_total"].(int))), - } -} - -func flattenDataprocWorkflowTemplateJobsScheduling(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsScheduling) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "max_failures_per_hour": obj.MaxFailuresPerHour, - "max_failures_total": obj.MaxFailuresTotal, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsSparkJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkJob{ - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), - JarFileUris: expandStringArray(obj["jar_file_uris"]), - LoggingConfig: expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj["logging_config"]), - MainClass: resource_dataproc_workflow_template_dcldcl.String(obj["main_class"].(string)), - MainJarFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["main_jar_file_uri"].(string)), - Properties: checkStringMap(obj["properties"]), - } -} - -func flattenDataprocWorkflowTemplateJobsSparkJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "archive_uris": obj.ArchiveUris, - "args": obj.Args, - "file_uris": obj.FileUris, - "jar_file_uris": obj.JarFileUris, - "logging_config": flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj.LoggingConfig), - "main_class": obj.MainClass, - "main_jar_file_uri": obj.MainJarFileUri, - "properties": obj.Properties, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsSparkJobLoggingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkJobLoggingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkJobLoggingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkJobLoggingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), - } -} - -func flattenDataprocWorkflowTemplateJobsSparkJobLoggingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkJobLoggingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "driver_log_levels": obj.DriverLogLevels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsSparkRJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkRJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkRJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkRJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkRJob{ - MainRFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["main_r_file_uri"].(string)), - ArchiveUris: expandStringArray(obj["archive_uris"]), - Args: expandStringArray(obj["args"]), - FileUris: expandStringArray(obj["file_uris"]), - LoggingConfig: expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), - } -} - -func flattenDataprocWorkflowTemplateJobsSparkRJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkRJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "main_r_file_uri": obj.MainRFileUri, - "archive_uris": obj.ArchiveUris, - "args": obj.Args, - "file_uris": obj.FileUris, - "logging_config": flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj.LoggingConfig), - "properties": obj.Properties, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkRJobLoggingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkRJobLoggingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkRJobLoggingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkRJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), - } -} - -func flattenDataprocWorkflowTemplateJobsSparkRJobLoggingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkRJobLoggingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "driver_log_levels": obj.DriverLogLevels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsSparkSqlJob(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJob { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkSqlJob - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkSqlJob - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJob{ - JarFileUris: expandStringArray(obj["jar_file_uris"]), - LoggingConfig: expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj["logging_config"]), - Properties: checkStringMap(obj["properties"]), - QueryFileUri: resource_dataproc_workflow_template_dcldcl.String(obj["query_file_uri"].(string)), - QueryList: expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj["query_list"]), - ScriptVariables: checkStringMap(obj["script_variables"]), - } -} - -func flattenDataprocWorkflowTemplateJobsSparkSqlJob(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJob) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "jar_file_uris": obj.JarFileUris, - "logging_config": flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj.LoggingConfig), - "properties": obj.Properties, - "query_file_uri": obj.QueryFileUri, - "query_list": flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj.QueryList), - "script_variables": obj.ScriptVariables, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJobLoggingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkSqlJobLoggingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJobLoggingConfig{ - DriverLogLevels: checkStringMap(obj["driver_log_levels"]), - } -} - -func flattenDataprocWorkflowTemplateJobsSparkSqlJobLoggingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJobLoggingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "driver_log_levels": obj.DriverLogLevels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateJobsSparkSqlJobQueryList(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJobQueryList { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkSqlJobQueryList - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateJobsSparkSqlJobQueryList - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJobQueryList{ - Queries: expandStringArray(obj["queries"]), - } -} - -func flattenDataprocWorkflowTemplateJobsSparkSqlJobQueryList(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateJobsSparkSqlJobQueryList) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "queries": obj.Queries, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplatePlacement(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacement { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplatePlacement - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplatePlacement - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacement{ - ClusterSelector: expandDataprocWorkflowTemplatePlacementClusterSelector(obj["cluster_selector"]), - ManagedCluster: expandDataprocWorkflowTemplatePlacementManagedCluster(obj["managed_cluster"]), - } -} - -func flattenDataprocWorkflowTemplatePlacement(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacement) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "cluster_selector": flattenDataprocWorkflowTemplatePlacementClusterSelector(obj.ClusterSelector), - "managed_cluster": flattenDataprocWorkflowTemplatePlacementManagedCluster(obj.ManagedCluster), - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplatePlacementClusterSelector(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacementClusterSelector { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplatePlacementClusterSelector - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplatePlacementClusterSelector - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacementClusterSelector{ - ClusterLabels: checkStringMap(obj["cluster_labels"]), - Zone: resource_dataproc_workflow_template_dcldcl.StringOrNil(obj["zone"].(string)), - } -} - -func flattenDataprocWorkflowTemplatePlacementClusterSelector(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacementClusterSelector) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "cluster_labels": obj.ClusterLabels, - "zone": obj.Zone, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplatePlacementManagedCluster(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacementManagedCluster { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplatePlacementManagedCluster - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplatePlacementManagedCluster - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacementManagedCluster{ - ClusterName: resource_dataproc_workflow_template_dcldcl.String(obj["cluster_name"].(string)), - Config: expandDataprocWorkflowTemplateClusterClusterConfig(obj["config"]), - Labels: checkStringMap(obj["labels"]), - } -} - -func flattenDataprocWorkflowTemplatePlacementManagedCluster(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplatePlacementManagedCluster) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "cluster_name": obj.ClusterName, - "config": flattenDataprocWorkflowTemplateClusterClusterConfig(obj.Config), - "labels": obj.Labels, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateParametersArray(o interface{}) []resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters { - if o == nil { - return make([]resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters, 0) - } - - items := make([]resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters, 0, len(objs)) - for _, item := range objs { - i := expandDataprocWorkflowTemplateParameters(item) - items = append(items, *i) - } - - return items -} - -func expandDataprocWorkflowTemplateParameters(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateParameters - } - - obj := o.(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters{ - Fields: expandStringArray(obj["fields"]), - Name: resource_dataproc_workflow_template_dcldcl.String(obj["name"].(string)), - Description: resource_dataproc_workflow_template_dcldcl.String(obj["description"].(string)), - Validation: expandDataprocWorkflowTemplateParametersValidation(obj["validation"]), - } -} - -func flattenDataprocWorkflowTemplateParametersArray(objs []resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenDataprocWorkflowTemplateParameters(&item) - items = append(items, i) - } - - return items -} - -func flattenDataprocWorkflowTemplateParameters(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParameters) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "fields": obj.Fields, - "name": obj.Name, - "description": obj.Description, - "validation": flattenDataprocWorkflowTemplateParametersValidation(obj.Validation), - } - - return transformed - -} - -func expandDataprocWorkflowTemplateParametersValidation(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidation { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateParametersValidation - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateParametersValidation - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidation{ - Regex: expandDataprocWorkflowTemplateParametersValidationRegex(obj["regex"]), - Values: expandDataprocWorkflowTemplateParametersValidationValues(obj["values"]), - } -} - -func flattenDataprocWorkflowTemplateParametersValidation(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidation) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "regex": flattenDataprocWorkflowTemplateParametersValidationRegex(obj.Regex), - "values": flattenDataprocWorkflowTemplateParametersValidationValues(obj.Values), - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateParametersValidationRegex(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidationRegex { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateParametersValidationRegex - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateParametersValidationRegex - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidationRegex{ - Regexes: expandStringArray(obj["regexes"]), - } -} - -func flattenDataprocWorkflowTemplateParametersValidationRegex(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidationRegex) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "regexes": obj.Regexes, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateParametersValidationValues(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidationValues { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateParametersValidationValues - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyWorkflowTemplateParametersValidationValues - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidationValues{ - Values: expandStringArray(obj["values"]), - } -} - -func flattenDataprocWorkflowTemplateParametersValidationValues(obj *resource_dataproc_workflow_template_dataprocdataproc.WorkflowTemplateParametersValidationValues) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "values": obj.Values, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterInstanceGroupConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfig { - if o == nil { - return nil - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return nil - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfig{ - Accelerators: expandDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(obj["accelerators"]), - DiskConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(obj["disk_config"]), - Image: resource_dataproc_workflow_template_dcldcl.String(obj["image"].(string)), - MachineType: resource_dataproc_workflow_template_dcldcl.String(obj["machine_type"].(string)), - MinCpuPlatform: resource_dataproc_workflow_template_dcldcl.StringOrNil(obj["min_cpu_platform"].(string)), - NumInstances: resource_dataproc_workflow_template_dcldcl.Int64(int64(obj["num_instances"].(int))), - Preemptibility: resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigPreemptibilityEnumRef(obj["preemptibility"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "accelerators": flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(obj.Accelerators), - "disk_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(obj.DiskConfig), - "image": obj.Image, - "machine_type": obj.MachineType, - "min_cpu_platform": obj.MinCpuPlatform, - "num_instances": obj.NumInstances, - "preemptibility": obj.Preemptibility, - "instance_names": obj.InstanceNames, - "is_preemptible": obj.IsPreemptible, - "managed_group_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfig(obj.ManagedGroupConfig), - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(o interface{}) []resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigAccelerators { - if o == nil { - return nil - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return nil - } - - items := make([]resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigAccelerators, 0, len(objs)) - for _, item := range objs { - i := expandDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(item) - items = append(items, *i) - } - - return items -} - -func expandDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigAccelerators { - if o == nil { - return nil - } - - obj := o.(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigAccelerators{ - AcceleratorCount: resource_dataproc_workflow_template_dcldcl.Int64(int64(obj["accelerator_count"].(int))), - AcceleratorType: resource_dataproc_workflow_template_dcldcl.String(obj["accelerator_type"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAcceleratorsArray(objs []resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigAccelerators) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(&item) - items = append(items, i) - } - - return items -} - -func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigAccelerators(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigAccelerators) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "accelerator_count": obj.AcceleratorCount, - "accelerator_type": obj.AcceleratorType, - } - - return transformed - -} - -func expandDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigDiskConfig { - if o == nil { - return nil - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return nil - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigDiskConfig{ - BootDiskSizeGb: resource_dataproc_workflow_template_dcldcl.Int64(int64(obj["boot_disk_size_gb"].(int))), - BootDiskType: resource_dataproc_workflow_template_dcldcl.String(obj["boot_disk_type"].(string)), - NumLocalSsds: resource_dataproc_workflow_template_dcldcl.Int64OrNil(int64(obj["num_local_ssds"].(int))), - } -} - -func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigDiskConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigDiskConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "boot_disk_size_gb": obj.BootDiskSizeGb, - "boot_disk_type": obj.BootDiskType, - "num_local_ssds": obj.NumLocalSsds, - } - - return []interface{}{transformed} - -} - -func flattenDataprocWorkflowTemplateClusterInstanceGroupConfigManagedGroupConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterInstanceGroupConfigManagedGroupConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "instance_group_manager_name": obj.InstanceGroupManagerName, - "instance_template_name": obj.InstanceTemplateName, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfig{ - AutoscalingConfig: expandDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(obj["autoscaling_config"]), - EncryptionConfig: expandDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(obj["encryption_config"]), - EndpointConfig: expandDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(obj["endpoint_config"]), - GceClusterConfig: expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(obj["gce_cluster_config"]), - InitializationActions: expandDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(obj["initialization_actions"]), - LifecycleConfig: expandDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(obj["lifecycle_config"]), - MasterConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfig(obj["master_config"]), - SecondaryWorkerConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfig(obj["secondary_worker_config"]), - SecurityConfig: expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(obj["security_config"]), - SoftwareConfig: expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(obj["software_config"]), - StagingBucket: resource_dataproc_workflow_template_dcldcl.String(obj["staging_bucket"].(string)), - TempBucket: resource_dataproc_workflow_template_dcldcl.String(obj["temp_bucket"].(string)), - WorkerConfig: expandDataprocWorkflowTemplateClusterInstanceGroupConfig(obj["worker_config"]), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "autoscaling_config": flattenDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(obj.AutoscalingConfig), - "encryption_config": flattenDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(obj.EncryptionConfig), - "endpoint_config": flattenDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(obj.EndpointConfig), - "gce_cluster_config": flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(obj.GceClusterConfig), - "initialization_actions": flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(obj.InitializationActions), - "lifecycle_config": flattenDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(obj.LifecycleConfig), - "master_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj.MasterConfig), - "secondary_worker_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj.SecondaryWorkerConfig), - "security_config": flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(obj.SecurityConfig), - "software_config": flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(obj.SoftwareConfig), - "staging_bucket": obj.StagingBucket, - "temp_bucket": obj.TempBucket, - "worker_config": flattenDataprocWorkflowTemplateClusterInstanceGroupConfig(obj.WorkerConfig), - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigAutoscalingConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigAutoscalingConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigAutoscalingConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigAutoscalingConfig{ - Policy: resource_dataproc_workflow_template_dcldcl.String(obj["policy"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigAutoscalingConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigAutoscalingConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "policy": obj.Policy, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigEncryptionConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigEncryptionConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigEncryptionConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigEncryptionConfig{ - GcePdKmsKeyName: resource_dataproc_workflow_template_dcldcl.String(obj["gce_pd_kms_key_name"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigEncryptionConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigEncryptionConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "gce_pd_kms_key_name": obj.GcePdKmsKeyName, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigEndpointConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigEndpointConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigEndpointConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigEndpointConfig{ - EnableHttpPortAccess: resource_dataproc_workflow_template_dcldcl.Bool(obj["enable_http_port_access"].(bool)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigEndpointConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigEndpointConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "enable_http_port_access": obj.EnableHttpPortAccess, - "http_ports": obj.HttpPorts, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigGceClusterConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigGceClusterConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfig{ - InternalIPOnly: resource_dataproc_workflow_template_dcldcl.Bool(obj["internal_ip_only"].(bool)), - Metadata: checkStringMap(obj["metadata"]), - Network: resource_dataproc_workflow_template_dcldcl.String(obj["network"].(string)), - NodeGroupAffinity: expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(obj["node_group_affinity"]), - PrivateIPv6GoogleAccess: resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigPrivateIPv6GoogleAccessEnumRef(obj["private_ipv6_google_access"].(string)), - ReservationAffinity: expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(obj["reservation_affinity"]), - ServiceAccount: resource_dataproc_workflow_template_dcldcl.String(obj["service_account"].(string)), - ServiceAccountScopes: expandStringArray(obj["service_account_scopes"]), - Subnetwork: resource_dataproc_workflow_template_dcldcl.String(obj["subnetwork"].(string)), - Tags: expandStringArray(obj["tags"]), - Zone: resource_dataproc_workflow_template_dcldcl.StringOrNil(obj["zone"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "internal_ip_only": obj.InternalIPOnly, - "metadata": obj.Metadata, - "network": obj.Network, - "node_group_affinity": flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(obj.NodeGroupAffinity), - "private_ipv6_google_access": obj.PrivateIPv6GoogleAccess, - "reservation_affinity": flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(obj.ReservationAffinity), - "service_account": obj.ServiceAccount, - "service_account_scopes": obj.ServiceAccountScopes, - "subnetwork": obj.Subnetwork, - "tags": obj.Tags, - "zone": obj.Zone, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinity { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigGceClusterConfigNodeGroupAffinity - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigGceClusterConfigNodeGroupAffinity - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinity{ - NodeGroup: resource_dataproc_workflow_template_dcldcl.String(obj["node_group"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigNodeGroupAffinity(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigNodeGroupAffinity) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "node_group": obj.NodeGroup, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigReservationAffinity { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigGceClusterConfigReservationAffinity - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigGceClusterConfigReservationAffinity - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigReservationAffinity{ - ConsumeReservationType: resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigReservationAffinityConsumeReservationTypeEnumRef(obj["consume_reservation_type"].(string)), - Key: resource_dataproc_workflow_template_dcldcl.String(obj["key"].(string)), - Values: expandStringArray(obj["values"]), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigGceClusterConfigReservationAffinity(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigGceClusterConfigReservationAffinity) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "consume_reservation_type": obj.ConsumeReservationType, - "key": obj.Key, - "values": obj.Values, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(o interface{}) []resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions { - if o == nil { - return make([]resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions, 0) - } - - items := make([]resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions, 0, len(objs)) - for _, item := range objs { - i := expandDataprocWorkflowTemplateClusterClusterConfigInitializationActions(item) - items = append(items, *i) - } - - return items -} - -func expandDataprocWorkflowTemplateClusterClusterConfigInitializationActions(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigInitializationActions - } - - obj := o.(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions{ - ExecutableFile: resource_dataproc_workflow_template_dcldcl.String(obj["executable_file"].(string)), - ExecutionTimeout: resource_dataproc_workflow_template_dcldcl.String(obj["execution_timeout"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActionsArray(objs []resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActions(&item) - items = append(items, i) - } - - return items -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigInitializationActions(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigInitializationActions) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "executable_file": obj.ExecutableFile, - "execution_timeout": obj.ExecutionTimeout, - } - - return transformed - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigLifecycleConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigLifecycleConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigLifecycleConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigLifecycleConfig{ - AutoDeleteTime: resource_dataproc_workflow_template_dcldcl.String(obj["auto_delete_time"].(string)), - AutoDeleteTtl: resource_dataproc_workflow_template_dcldcl.String(obj["auto_delete_ttl"].(string)), - IdleDeleteTtl: resource_dataproc_workflow_template_dcldcl.String(obj["idle_delete_ttl"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigLifecycleConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigLifecycleConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "auto_delete_time": obj.AutoDeleteTime, - "auto_delete_ttl": obj.AutoDeleteTtl, - "idle_delete_ttl": obj.IdleDeleteTtl, - "idle_start_time": obj.IdleStartTime, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSecurityConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigSecurityConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigSecurityConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSecurityConfig{ - KerberosConfig: expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(obj["kerberos_config"]), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSecurityConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "kerberos_config": flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(obj.KerberosConfig), - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSecurityConfigKerberosConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigSecurityConfigKerberosConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigSecurityConfigKerberosConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSecurityConfigKerberosConfig{ - CrossRealmTrustAdminServer: resource_dataproc_workflow_template_dcldcl.String(obj["cross_realm_trust_admin_server"].(string)), - CrossRealmTrustKdc: resource_dataproc_workflow_template_dcldcl.String(obj["cross_realm_trust_kdc"].(string)), - CrossRealmTrustRealm: resource_dataproc_workflow_template_dcldcl.String(obj["cross_realm_trust_realm"].(string)), - CrossRealmTrustSharedPassword: resource_dataproc_workflow_template_dcldcl.String(obj["cross_realm_trust_shared_password"].(string)), - EnableKerberos: resource_dataproc_workflow_template_dcldcl.Bool(obj["enable_kerberos"].(bool)), - KdcDbKey: resource_dataproc_workflow_template_dcldcl.String(obj["kdc_db_key"].(string)), - KeyPassword: resource_dataproc_workflow_template_dcldcl.String(obj["key_password"].(string)), - Keystore: resource_dataproc_workflow_template_dcldcl.String(obj["keystore"].(string)), - KeystorePassword: resource_dataproc_workflow_template_dcldcl.String(obj["keystore_password"].(string)), - KmsKey: resource_dataproc_workflow_template_dcldcl.String(obj["kms_key"].(string)), - Realm: resource_dataproc_workflow_template_dcldcl.String(obj["realm"].(string)), - RootPrincipalPassword: resource_dataproc_workflow_template_dcldcl.String(obj["root_principal_password"].(string)), - TgtLifetimeHours: resource_dataproc_workflow_template_dcldcl.Int64(int64(obj["tgt_lifetime_hours"].(int))), - Truststore: resource_dataproc_workflow_template_dcldcl.String(obj["truststore"].(string)), - TruststorePassword: resource_dataproc_workflow_template_dcldcl.String(obj["truststore_password"].(string)), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigSecurityConfigKerberosConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSecurityConfigKerberosConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "cross_realm_trust_admin_server": obj.CrossRealmTrustAdminServer, - "cross_realm_trust_kdc": obj.CrossRealmTrustKdc, - "cross_realm_trust_realm": obj.CrossRealmTrustRealm, - "cross_realm_trust_shared_password": obj.CrossRealmTrustSharedPassword, - "enable_kerberos": obj.EnableKerberos, - "kdc_db_key": obj.KdcDbKey, - "key_password": obj.KeyPassword, - "keystore": obj.Keystore, - "keystore_password": obj.KeystorePassword, - "kms_key": obj.KmsKey, - "realm": obj.Realm, - "root_principal_password": obj.RootPrincipalPassword, - "tgt_lifetime_hours": obj.TgtLifetimeHours, - "truststore": obj.Truststore, - "truststore_password": obj.TruststorePassword, - } - - return []interface{}{transformed} - -} - -func expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(o interface{}) *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSoftwareConfig { - if o == nil { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigSoftwareConfig - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_dataproc_workflow_template_dataprocdataproc.EmptyClusterClusterConfigSoftwareConfig - } - obj := objArr[0].(map[string]interface{}) - return &resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSoftwareConfig{ - ImageVersion: resource_dataproc_workflow_template_dcldcl.String(obj["image_version"].(string)), - OptionalComponents: expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(obj["optional_components"]), - Properties: checkStringMap(obj["properties"]), - } -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfig(obj *resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSoftwareConfig) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "image_version": obj.ImageVersion, - "optional_components": flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(obj.OptionalComponents), - "properties": obj.Properties, - } - - return []interface{}{transformed} - -} - -func flattenDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(obj []resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnum) interface{} { - if obj == nil { - return nil - } - items := []string{} - for _, item := range obj { - items = append(items, string(item)) - } - return items -} - -func expandDataprocWorkflowTemplateClusterClusterConfigSoftwareConfigOptionalComponentsArray(o interface{}) []resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnum { - objs := o.([]interface{}) - items := make([]resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnum, 0, len(objs)) - for _, item := range objs { - i := resource_dataproc_workflow_template_dataprocdataproc.ClusterClusterConfigSoftwareConfigOptionalComponentsEnumRef(item.(string)) - items = append(items, *i) - } - return items -} - -func resourceDatastoreIndex() *resource_datastore_index_schema.Resource { - return &resource_datastore_index_schema.Resource{ - Create: resourceDatastoreIndexCreate, - Read: resourceDatastoreIndexRead, - Delete: resourceDatastoreIndexDelete, - - Importer: &resource_datastore_index_schema.ResourceImporter{ - State: resourceDatastoreIndexImport, - }, - - Timeouts: &resource_datastore_index_schema.ResourceTimeout{ - Create: resource_datastore_index_schema.DefaultTimeout(20 * resource_datastore_index_time.Minute), - Delete: resource_datastore_index_schema.DefaultTimeout(10 * resource_datastore_index_time.Minute), - }, - - Schema: map[string]*resource_datastore_index_schema.Schema{ - "kind": { - Type: resource_datastore_index_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The entity kind which the index applies to.`, - }, - "ancestor": { - Type: resource_datastore_index_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_datastore_index_validation.StringInSlice([]string{"NONE", "ALL_ANCESTORS", ""}, false), - Description: `Policy for including ancestors in the index. Default value: "NONE" Possible values: ["NONE", "ALL_ANCESTORS"]`, - Default: "NONE", - }, - "properties": { - Type: resource_datastore_index_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An ordered list of properties to index on.`, - MinItems: 1, - Elem: &resource_datastore_index_schema.Resource{ - Schema: map[string]*resource_datastore_index_schema.Schema{ - "direction": { - Type: resource_datastore_index_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_datastore_index_validation.StringInSlice([]string{"ASCENDING", "DESCENDING"}, false), - Description: `The direction the index should optimize for sorting. Possible values: ["ASCENDING", "DESCENDING"]`, - }, - "name": { - Type: resource_datastore_index_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The property name to index.`, - }, - }, - }, - }, - "index_id": { - Type: resource_datastore_index_schema.TypeString, - Computed: true, - Description: `The index id.`, - }, - "project": { - Type: resource_datastore_index_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDatastoreIndexCreate(d *resource_datastore_index_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - kindProp, err := expandDatastoreIndexKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(resource_datastore_index_reflect.ValueOf(kindProp)) && (ok || !resource_datastore_index_reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - ancestorProp, err := expandDatastoreIndexAncestor(d.Get("ancestor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ancestor"); !isEmptyValue(resource_datastore_index_reflect.ValueOf(ancestorProp)) && (ok || !resource_datastore_index_reflect.DeepEqual(v, ancestorProp)) { - obj["ancestor"] = ancestorProp - } - propertiesProp, err := expandDatastoreIndexProperties(d.Get("properties"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("properties"); !isEmptyValue(resource_datastore_index_reflect.ValueOf(propertiesProp)) && (ok || !resource_datastore_index_reflect.DeepEqual(v, propertiesProp)) { - obj["properties"] = propertiesProp - } - - url, err := replaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes") - if err != nil { - return err - } - - resource_datastore_index_log.Printf("[DEBUG] Creating new Index: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_datastore_index_fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_datastore_index_schema.TimeoutCreate), datastoreIndex409Contention) - if err != nil { - return resource_datastore_index_fmt.Errorf("Error creating Index: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return resource_datastore_index_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = datastoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Index", userAgent, - d.Timeout(resource_datastore_index_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_datastore_index_fmt.Errorf("Error waiting to create Index: %s", err) - } - - if err := d.Set("index_id", flattenDatastoreIndexIndexId(opRes["indexId"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return resource_datastore_index_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_datastore_index_log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) - - return resourceDatastoreIndexRead(d, meta) -} - -func resourceDatastoreIndexRead(d *resource_datastore_index_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_datastore_index_fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, datastoreIndex409Contention) - if err != nil { - return handleNotFoundError(err, d, resource_datastore_index_fmt.Sprintf("DatastoreIndex %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_datastore_index_fmt.Errorf("Error reading Index: %s", err) - } - - if err := d.Set("index_id", flattenDatastoreIndexIndexId(res["indexId"], d, config)); err != nil { - return resource_datastore_index_fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("kind", flattenDatastoreIndexKind(res["kind"], d, config)); err != nil { - return resource_datastore_index_fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("ancestor", flattenDatastoreIndexAncestor(res["ancestor"], d, config)); err != nil { - return resource_datastore_index_fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("properties", flattenDatastoreIndexProperties(res["properties"], d, config)); err != nil { - return resource_datastore_index_fmt.Errorf("Error reading Index: %s", err) - } - - return nil -} - -func resourceDatastoreIndexDelete(d *resource_datastore_index_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_datastore_index_fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DatastoreBasePath}}projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_datastore_index_log.Printf("[DEBUG] Deleting Index %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_datastore_index_schema.TimeoutDelete), datastoreIndex409Contention) - if err != nil { - return handleNotFoundError(err, d, "Index") - } - - err = datastoreOperationWaitTime( - config, res, project, "Deleting Index", userAgent, - d.Timeout(resource_datastore_index_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_datastore_index_log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) - return nil -} - -func resourceDatastoreIndexImport(d *resource_datastore_index_schema.ResourceData, meta interface{}) ([]*resource_datastore_index_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/indexes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/indexes/{{index_id}}") - if err != nil { - return nil, resource_datastore_index_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_datastore_index_schema.ResourceData{d}, nil -} - -func flattenDatastoreIndexIndexId(v interface{}, d *resource_datastore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexKind(v interface{}, d *resource_datastore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexAncestor(v interface{}, d *resource_datastore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexProperties(v interface{}, d *resource_datastore_index_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDatastoreIndexPropertiesName(original["name"], d, config), - "direction": flattenDatastoreIndexPropertiesDirection(original["direction"], d, config), - }) - } - return transformed -} - -func flattenDatastoreIndexPropertiesName(v interface{}, d *resource_datastore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDatastoreIndexPropertiesDirection(v interface{}, d *resource_datastore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDatastoreIndexKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexAncestor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexProperties(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDatastoreIndexPropertiesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_datastore_index_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedDirection, err := expandDatastoreIndexPropertiesDirection(original["direction"], d, config) - if err != nil { - return nil, err - } else if val := resource_datastore_index_reflect.ValueOf(transformedDirection); val.IsValid() && !isEmptyValue(val) { - transformed["direction"] = transformedDirection - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDatastoreIndexPropertiesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDatastoreIndexPropertiesDirection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func customDiffDeploymentManagerDeployment(_ resource_deployment_manager_deployment_context.Context, d *resource_deployment_manager_deployment_schema.ResourceDiff, meta interface{}) error { - if preview := d.Get("preview").(bool); preview { - resource_deployment_manager_deployment_log.Printf("[WARN] Deployment preview set to true - Terraform will treat Deployment as recreate-only") - - if d.HasChange("preview") { - if err := d.ForceNew("preview"); err != nil { - return err - } - } - - if d.HasChange("target") { - if err := d.ForceNew("target"); err != nil { - return err - } - } - - if d.HasChange("labels") { - if err := d.ForceNew("labels"); err != nil { - return err - } - } - } - return nil -} - -func resourceDeploymentManagerDeployment() *resource_deployment_manager_deployment_schema.Resource { - return &resource_deployment_manager_deployment_schema.Resource{ - Create: resourceDeploymentManagerDeploymentCreate, - Read: resourceDeploymentManagerDeploymentRead, - Update: resourceDeploymentManagerDeploymentUpdate, - Delete: resourceDeploymentManagerDeploymentDelete, - - Importer: &resource_deployment_manager_deployment_schema.ResourceImporter{ - State: resourceDeploymentManagerDeploymentImport, - }, - - Timeouts: &resource_deployment_manager_deployment_schema.ResourceTimeout{ - Create: resource_deployment_manager_deployment_schema.DefaultTimeout(60 * resource_deployment_manager_deployment_time.Minute), - Update: resource_deployment_manager_deployment_schema.DefaultTimeout(60 * resource_deployment_manager_deployment_time.Minute), - Delete: resource_deployment_manager_deployment_schema.DefaultTimeout(60 * resource_deployment_manager_deployment_time.Minute), - }, - - CustomizeDiff: customDiffDeploymentManagerDeployment, - - Schema: map[string]*resource_deployment_manager_deployment_schema.Schema{ - "name": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Unique name for the deployment`, - }, - "target": { - Type: resource_deployment_manager_deployment_schema.TypeList, - Required: true, - Description: `Parameters that define your deployment, including the deployment -configuration and relevant templates.`, - MaxItems: 1, - Elem: &resource_deployment_manager_deployment_schema.Resource{ - Schema: map[string]*resource_deployment_manager_deployment_schema.Schema{ - "config": { - Type: resource_deployment_manager_deployment_schema.TypeList, - Required: true, - Description: `The root configuration file to use for this deployment.`, - MaxItems: 1, - Elem: &resource_deployment_manager_deployment_schema.Resource{ - Schema: map[string]*resource_deployment_manager_deployment_schema.Schema{ - "content": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Required: true, - Description: `The full YAML contents of your configuration file.`, - }, - }, - }, - }, - "imports": { - Type: resource_deployment_manager_deployment_schema.TypeList, - Optional: true, - Description: `Specifies import files for this configuration. This can be -used to import templates or other files. For example, you might -import a text file in order to use the file in a template.`, - Elem: &resource_deployment_manager_deployment_schema.Resource{ - Schema: map[string]*resource_deployment_manager_deployment_schema.Schema{ - "content": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - Description: `The full contents of the template that you want to import.`, - }, - "name": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - Description: `The name of the template to import, as declared in the YAML -configuration.`, - }, - }, - }, - }, - }, - }, - }, - "create_policy": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_deployment_manager_deployment_validation.StringInSlice([]string{"ACQUIRE", "CREATE_OR_ACQUIRE", ""}, false), - Description: `Set the policy to use for creating new resources. Only used on -create and update. Valid values are 'CREATE_OR_ACQUIRE' (default) or -'ACQUIRE'. If set to 'ACQUIRE' and resources do not already exist, -the deployment will fail. Note that updating this field does not -actually affect the deployment, just how it is updated. Default value: "CREATE_OR_ACQUIRE" Possible values: ["ACQUIRE", "CREATE_OR_ACQUIRE"]`, - Default: "CREATE_OR_ACQUIRE", - }, - "delete_policy": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_deployment_manager_deployment_validation.StringInSlice([]string{"ABANDON", "DELETE", ""}, false), - Description: `Set the policy to use for deleting new resources on update/delete. -Valid values are 'DELETE' (default) or 'ABANDON'. If 'DELETE', -resource is deleted after removal from Deployment Manager. If -'ABANDON', the resource is only removed from Deployment Manager -and is not actually deleted. Note that updating this field does not -actually change the deployment, just how it is updated. Default value: "DELETE" Possible values: ["ABANDON", "DELETE"]`, - Default: "DELETE", - }, - "description": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - Description: `Optional user-provided description of deployment.`, - }, - "labels": { - Type: resource_deployment_manager_deployment_schema.TypeSet, - Optional: true, - Description: `Key-value pairs to apply to this labels.`, - Elem: deploymentmanagerDeploymentLabelsSchema(), - }, - "preview": { - Type: resource_deployment_manager_deployment_schema.TypeBool, - Optional: true, - Description: `If set to true, a deployment is created with "shell" resources -that are not actually instantiated. This allows you to preview a -deployment. It can be updated to false to actually deploy -with real resources. - ~>**NOTE:** Deployment Manager does not allow update -of a deployment in preview (unless updating to preview=false). Thus, -Terraform will force-recreate deployments if either preview is updated -to true or if other fields are updated while preview is true.`, - Default: false, - }, - "deployment_id": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Computed: true, - Description: `Unique identifier for deployment. Output only.`, - }, - "manifest": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Computed: true, - Description: `Output only. URL of the manifest representing the last manifest that -was successfully deployed.`, - }, - "self_link": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Computed: true, - Description: `Output only. Server defined URL for the resource.`, - }, - "project": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func deploymentmanagerDeploymentLabelsSchema() *resource_deployment_manager_deployment_schema.Resource { - return &resource_deployment_manager_deployment_schema.Resource{ - Schema: map[string]*resource_deployment_manager_deployment_schema.Schema{ - "key": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - Description: `Key for label.`, - }, - "value": { - Type: resource_deployment_manager_deployment_schema.TypeString, - Optional: true, - Description: `Value of label.`, - }, - }, - } -} - -func resourceDeploymentManagerDeploymentCreate(d *resource_deployment_manager_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandDeploymentManagerDeploymentName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_deployment_manager_deployment_reflect.ValueOf(nameProp)) && (ok || !resource_deployment_manager_deployment_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandDeploymentManagerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_deployment_manager_deployment_reflect.ValueOf(descriptionProp)) && (ok || !resource_deployment_manager_deployment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandDeploymentManagerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); ok || !resource_deployment_manager_deployment_reflect.DeepEqual(v, labelsProp) { - obj["labels"] = labelsProp - } - targetProp, err := expandDeploymentManagerDeploymentTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(resource_deployment_manager_deployment_reflect.ValueOf(targetProp)) && (ok || !resource_deployment_manager_deployment_reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments?preview={{preview}}&createPolicy={{create_policy}}") - if err != nil { - return err - } - - resource_deployment_manager_deployment_log.Printf("[DEBUG] Creating new Deployment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_deployment_manager_deployment_schema.TimeoutCreate)) - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error creating Deployment: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/deployments/{{name}}") - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = deploymentManagerOperationWaitTime( - config, res, project, "Creating Deployment", userAgent, - d.Timeout(resource_deployment_manager_deployment_schema.TimeoutCreate)) - - if err != nil { - resourceDeploymentManagerDeploymentPostCreateFailure(d, meta) - - d.SetId("") - return resource_deployment_manager_deployment_fmt.Errorf("Error waiting to create Deployment: %s", err) - } - - resource_deployment_manager_deployment_log.Printf("[DEBUG] Finished creating Deployment %q: %#v", d.Id(), res) - - return resourceDeploymentManagerDeploymentRead(d, meta) -} - -func resourceDeploymentManagerDeploymentRead(d *resource_deployment_manager_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_deployment_manager_deployment_fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error reading Deployment: %s", err) - } - - if err := d.Set("name", flattenDeploymentManagerDeploymentName(res["name"], d, config)); err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("description", flattenDeploymentManagerDeploymentDescription(res["description"], d, config)); err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("labels", flattenDeploymentManagerDeploymentLabels(res["labels"], d, config)); err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("deployment_id", flattenDeploymentManagerDeploymentDeploymentId(res["id"], d, config)); err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("manifest", flattenDeploymentManagerDeploymentManifest(res["manifest"], d, config)); err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error reading Deployment: %s", err) - } - if err := d.Set("self_link", flattenDeploymentManagerDeploymentSelfLink(res["selfLink"], d, config)); err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error reading Deployment: %s", err) - } - - return nil -} - -func resourceDeploymentManagerDeploymentUpdate(d *resource_deployment_manager_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("preview") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := sendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_deployment_manager_deployment_fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?preview={{preview}}&createPolicy={{create_policy}}&deletePolicy={{delete_policy}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_deployment_manager_deployment_schema.TimeoutUpdate)) - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error updating Deployment %q: %s", d.Id(), err) - } else { - resource_deployment_manager_deployment_log.Printf("[DEBUG] Finished updating Deployment %q: %#v", d.Id(), res) - } - - err = deploymentManagerOperationWaitTime( - config, res, project, "Updating Deployment", userAgent, - d.Timeout(resource_deployment_manager_deployment_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - if d.HasChange("description") || d.HasChange("labels") || d.HasChange("target") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := sendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_deployment_manager_deployment_fmt.Sprintf("DeploymentManagerDeployment %q", d.Id())) - } - - obj["fingerprint"] = getRes["fingerprint"] - - descriptionProp, err := expandDeploymentManagerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_deployment_manager_deployment_reflect.ValueOf(v)) && (ok || !resource_deployment_manager_deployment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandDeploymentManagerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); ok || !resource_deployment_manager_deployment_reflect.DeepEqual(v, labelsProp) { - obj["labels"] = labelsProp - } - targetProp, err := expandDeploymentManagerDeploymentTarget(d.Get("target"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("target"); !isEmptyValue(resource_deployment_manager_deployment_reflect.ValueOf(v)) && (ok || !resource_deployment_manager_deployment_reflect.DeepEqual(v, targetProp)) { - obj["target"] = targetProp - } - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?preview={{preview}}&createPolicy={{create_policy}}&deletePolicy={{delete_policy}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_deployment_manager_deployment_schema.TimeoutUpdate)) - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error updating Deployment %q: %s", d.Id(), err) - } else { - resource_deployment_manager_deployment_log.Printf("[DEBUG] Finished updating Deployment %q: %#v", d.Id(), res) - } - - err = deploymentManagerOperationWaitTime( - config, res, project, "Updating Deployment", userAgent, - d.Timeout(resource_deployment_manager_deployment_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceDeploymentManagerDeploymentRead(d, meta) -} - -func resourceDeploymentManagerDeploymentDelete(d *resource_deployment_manager_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_deployment_manager_deployment_fmt.Errorf("Error fetching project for Deployment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DeploymentManagerBasePath}}projects/{{project}}/global/deployments/{{name}}?deletePolicy={{delete_policy}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_deployment_manager_deployment_log.Printf("[DEBUG] Deleting Deployment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_deployment_manager_deployment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Deployment") - } - - err = deploymentManagerOperationWaitTime( - config, res, project, "Deleting Deployment", userAgent, - d.Timeout(resource_deployment_manager_deployment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_deployment_manager_deployment_log.Printf("[DEBUG] Finished deleting Deployment %q: %#v", d.Id(), res) - return nil -} - -func resourceDeploymentManagerDeploymentImport(d *resource_deployment_manager_deployment_schema.ResourceData, meta interface{}) ([]*resource_deployment_manager_deployment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/deployments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/deployments/{{name}}") - if err != nil { - return nil, resource_deployment_manager_deployment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_deployment_manager_deployment_schema.ResourceData{d}, nil -} - -func flattenDeploymentManagerDeploymentName(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentDescription(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentLabels(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_deployment_manager_deployment_schema.NewSet(resource_deployment_manager_deployment_schema.HashResource(deploymentmanagerDeploymentLabelsSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "key": flattenDeploymentManagerDeploymentLabelsKey(original["key"], d, config), - "value": flattenDeploymentManagerDeploymentLabelsValue(original["value"], d, config), - }) - } - return transformed -} - -func flattenDeploymentManagerDeploymentLabelsKey(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentLabelsValue(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentDeploymentId(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentManifest(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDeploymentManagerDeploymentSelfLink(v interface{}, d *resource_deployment_manager_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDeploymentManagerDeploymentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_deployment_manager_deployment_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandDeploymentManagerDeploymentLabelsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_deployment_manager_deployment_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedValue, err := expandDeploymentManagerDeploymentLabelsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_deployment_manager_deployment_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDeploymentManagerDeploymentLabelsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentLabelsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentTarget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConfig, err := expandDeploymentManagerDeploymentTargetConfig(original["config"], d, config) - if err != nil { - return nil, err - } else if val := resource_deployment_manager_deployment_reflect.ValueOf(transformedConfig); val.IsValid() && !isEmptyValue(val) { - transformed["config"] = transformedConfig - } - - transformedImports, err := expandDeploymentManagerDeploymentTargetImports(original["imports"], d, config) - if err != nil { - return nil, err - } else if val := resource_deployment_manager_deployment_reflect.ValueOf(transformedImports); val.IsValid() && !isEmptyValue(val) { - transformed["imports"] = transformedImports - } - - return transformed, nil -} - -func expandDeploymentManagerDeploymentTargetConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandDeploymentManagerDeploymentTargetConfigContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := resource_deployment_manager_deployment_reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - return transformed, nil -} - -func expandDeploymentManagerDeploymentTargetConfigContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentTargetImports(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandDeploymentManagerDeploymentTargetImportsContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := resource_deployment_manager_deployment_reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - transformedName, err := expandDeploymentManagerDeploymentTargetImportsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_deployment_manager_deployment_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDeploymentManagerDeploymentTargetImportsContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDeploymentManagerDeploymentTargetImportsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDeploymentManagerDeploymentPostCreateFailure(d *resource_deployment_manager_deployment_schema.ResourceData, meta interface{}) { - resource_deployment_manager_deployment_log.Printf("[WARN] Attempt to clean up Deployment if it still exists") - var cleanErr error - if cleanErr = resourceDeploymentManagerDeploymentRead(d, meta); cleanErr == nil { - if d.Id() != "" { - resource_deployment_manager_deployment_log.Printf("[WARN] Deployment %q still exists, attempting to delete...", d.Id()) - if cleanErr = resourceDeploymentManagerDeploymentDelete(d, meta); cleanErr == nil { - resource_deployment_manager_deployment_log.Printf("[WARN] Invalid Deployment was successfully deleted") - d.SetId("") - } - } - } - if cleanErr != nil { - resource_deployment_manager_deployment_log.Printf("[WARN] Could not confirm cleanup of Deployment if created in error state: %v", cleanErr) - } -} - -func resourceDialogflowAgent() *resource_dialogflow_agent_schema.Resource { - return &resource_dialogflow_agent_schema.Resource{ - Create: resourceDialogflowAgentCreate, - Read: resourceDialogflowAgentRead, - Update: resourceDialogflowAgentUpdate, - Delete: resourceDialogflowAgentDelete, - - Importer: &resource_dialogflow_agent_schema.ResourceImporter{ - State: resourceDialogflowAgentImport, - }, - - Timeouts: &resource_dialogflow_agent_schema.ResourceTimeout{ - Create: resource_dialogflow_agent_schema.DefaultTimeout(40 * resource_dialogflow_agent_time.Minute), - Update: resource_dialogflow_agent_schema.DefaultTimeout(40 * resource_dialogflow_agent_time.Minute), - Delete: resource_dialogflow_agent_schema.DefaultTimeout(4 * resource_dialogflow_agent_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_agent_schema.Schema{ - "default_language_code": { - Type: resource_dialogflow_agent_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/docs/reference/language) -for a list of the currently supported language codes. This field cannot be updated after creation.`, - }, - "display_name": { - Type: resource_dialogflow_agent_schema.TypeString, - Required: true, - Description: `The name of this agent.`, - }, - "time_zone": { - Type: resource_dialogflow_agent_schema.TypeString, - Required: true, - Description: `The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, -Europe/Paris.`, - }, - "api_version": { - Type: resource_dialogflow_agent_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_dialogflow_agent_validation.StringInSlice([]string{"API_VERSION_V1", "API_VERSION_V2", "API_VERSION_V2_BETA_1", ""}, false), - Description: `API version displayed in Dialogflow console. If not specified, V2 API is assumed. Clients are free to query -different service endpoints for different API versions. However, bots connectors and webhook calls will follow -the specified API version. -* API_VERSION_V1: Legacy V1 API. -* API_VERSION_V2: V2 API. -* API_VERSION_V2_BETA_1: V2beta1 API. Possible values: ["API_VERSION_V1", "API_VERSION_V2", "API_VERSION_V2_BETA_1"]`, - }, - "avatar_uri": { - Type: resource_dialogflow_agent_schema.TypeString, - Optional: true, - Description: `The URI of the agent's avatar, which are used throughout the Dialogflow console. When an image URL is entered -into this field, the Dialogflow will save the image in the backend. The address of the backend image returned -from the API will be shown in the [avatarUriBackend] field.`, - }, - "classification_threshold": { - Type: resource_dialogflow_agent_schema.TypeFloat, - Optional: true, - Description: `To filter out false positive results and still get variety in matched natural language inputs for your agent, -you can tune the machine learning classification threshold. If the returned score value is less than the threshold -value, then a fallback intent will be triggered or, if there are no fallback intents defined, no intent will be -triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the -default of 0.3 is used.`, - }, - "description": { - Type: resource_dialogflow_agent_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_agent_validation.StringLenBetween(0, 500), - Description: `The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "enable_logging": { - Type: resource_dialogflow_agent_schema.TypeBool, - Optional: true, - Description: `Determines whether this agent should log conversation queries.`, - }, - "match_mode": { - Type: resource_dialogflow_agent_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_dialogflow_agent_validation.StringInSlice([]string{"MATCH_MODE_HYBRID", "MATCH_MODE_ML_ONLY", ""}, false), - Description: `Determines how intents are detected from user queries. -* MATCH_MODE_HYBRID: Best for agents with a small number of examples in intents and/or wide use of templates -syntax and composite entities. -* MATCH_MODE_ML_ONLY: Can be used for agents with a large number of examples in intents, especially the ones -using @sys.any or very large developer entities. Possible values: ["MATCH_MODE_HYBRID", "MATCH_MODE_ML_ONLY"]`, - }, - "supported_language_codes": { - Type: resource_dialogflow_agent_schema.TypeList, - Optional: true, - Description: `The list of all languages supported by this agent (except for the defaultLanguageCode).`, - Elem: &resource_dialogflow_agent_schema.Schema{ - Type: resource_dialogflow_agent_schema.TypeString, - }, - }, - "tier": { - Type: resource_dialogflow_agent_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_agent_validation.StringInSlice([]string{"TIER_STANDARD", "TIER_ENTERPRISE", "TIER_ENTERPRISE_PLUS", ""}, false), - Description: `The agent tier. If not specified, TIER_STANDARD is assumed. -* TIER_STANDARD: Standard tier. -* TIER_ENTERPRISE: Enterprise tier (Essentials). -* TIER_ENTERPRISE_PLUS: Enterprise tier (Plus). -NOTE: Due to consistency issues, the provider will not read this field from the API. Drift is possible between -the Terraform state and Dialogflow if the agent tier is changed outside of Terraform. Possible values: ["TIER_STANDARD", "TIER_ENTERPRISE", "TIER_ENTERPRISE_PLUS"]`, - }, - "avatar_uri_backend": { - Type: resource_dialogflow_agent_schema.TypeString, - Computed: true, - Description: `The URI of the agent's avatar as returned from the API. Output only. To provide an image URL for the agent avatar, -the [avatarUri] field can be used.`, - }, - "project": { - Type: resource_dialogflow_agent_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowAgentCreate(d *resource_dialogflow_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - defaultLanguageCodeProp, err := expandDialogflowAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_language_code"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(defaultLanguageCodeProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, defaultLanguageCodeProp)) { - obj["defaultLanguageCode"] = defaultLanguageCodeProp - } - supportedLanguageCodesProp, err := expandDialogflowAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(supportedLanguageCodesProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(timeZoneProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(descriptionProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(avatarUriProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - enableLoggingProp, err := expandDialogflowAgentEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(enableLoggingProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, enableLoggingProp)) { - obj["enableLogging"] = enableLoggingProp - } - matchModeProp, err := expandDialogflowAgentMatchMode(d.Get("match_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("match_mode"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(matchModeProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, matchModeProp)) { - obj["matchMode"] = matchModeProp - } - classificationThresholdProp, err := expandDialogflowAgentClassificationThreshold(d.Get("classification_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("classification_threshold"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(classificationThresholdProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, classificationThresholdProp)) { - obj["classificationThreshold"] = classificationThresholdProp - } - apiVersionProp, err := expandDialogflowAgentApiVersion(d.Get("api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_version"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(apiVersionProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, apiVersionProp)) { - obj["apiVersion"] = apiVersionProp - } - tierProp, err := expandDialogflowAgentTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(tierProp)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - resource_dialogflow_agent_log.Printf("[DEBUG] Creating new Agent: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_agent_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error creating Agent: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_agent_log.Printf("[DEBUG] Finished creating Agent %q: %#v", d.Id(), res) - - return resourceDialogflowAgentRead(d, meta) -} - -func resourceDialogflowAgentRead(d *resource_dialogflow_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_agent_fmt.Sprintf("DialogflowAgent %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - - if err := d.Set("display_name", flattenDialogflowAgentDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("default_language_code", flattenDialogflowAgentDefaultLanguageCode(res["defaultLanguageCode"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("supported_language_codes", flattenDialogflowAgentSupportedLanguageCodes(res["supportedLanguageCodes"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("time_zone", flattenDialogflowAgentTimeZone(res["timeZone"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("description", flattenDialogflowAgentDescription(res["description"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("avatar_uri_backend", flattenDialogflowAgentAvatarUriBackend(res["avatarUri"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("enable_logging", flattenDialogflowAgentEnableLogging(res["enableLogging"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("match_mode", flattenDialogflowAgentMatchMode(res["matchMode"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("classification_threshold", flattenDialogflowAgentClassificationThreshold(res["classificationThreshold"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("api_version", flattenDialogflowAgentApiVersion(res["apiVersion"], d, config)); err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error reading Agent: %s", err) - } - - return nil -} - -func resourceDialogflowAgentUpdate(d *resource_dialogflow_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - defaultLanguageCodeProp, err := expandDialogflowAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_language_code"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, defaultLanguageCodeProp)) { - obj["defaultLanguageCode"] = defaultLanguageCodeProp - } - supportedLanguageCodesProp, err := expandDialogflowAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - enableLoggingProp, err := expandDialogflowAgentEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, enableLoggingProp)) { - obj["enableLogging"] = enableLoggingProp - } - matchModeProp, err := expandDialogflowAgentMatchMode(d.Get("match_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("match_mode"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, matchModeProp)) { - obj["matchMode"] = matchModeProp - } - classificationThresholdProp, err := expandDialogflowAgentClassificationThreshold(d.Get("classification_threshold"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("classification_threshold"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, classificationThresholdProp)) { - obj["classificationThreshold"] = classificationThresholdProp - } - apiVersionProp, err := expandDialogflowAgentApiVersion(d.Get("api_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("api_version"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, apiVersionProp)) { - obj["apiVersion"] = apiVersionProp - } - tierProp, err := expandDialogflowAgentTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(resource_dialogflow_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_agent_reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - resource_dialogflow_agent_log.Printf("[DEBUG] Updating Agent %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_agent_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error updating Agent %q: %s", d.Id(), err) - } else { - resource_dialogflow_agent_log.Printf("[DEBUG] Finished updating Agent %q: %#v", d.Id(), res) - } - - return resourceDialogflowAgentRead(d, meta) -} - -func resourceDialogflowAgentDelete(d *resource_dialogflow_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_agent_log.Printf("[DEBUG] Deleting Agent %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_agent_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Agent") - } - - resource_dialogflow_agent_log.Printf("[DEBUG] Finished deleting Agent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowAgentImport(d *resource_dialogflow_agent_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_agent_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}") - if err != nil { - return nil, resource_dialogflow_agent_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_agent_schema.ResourceData{d}, nil -} - -func flattenDialogflowAgentDisplayName(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentDefaultLanguageCode(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentSupportedLanguageCodes(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentTimeZone(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentDescription(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentAvatarUriBackend(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentEnableLogging(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentMatchMode(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentClassificationThreshold(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowAgentApiVersion(v interface{}, d *resource_dialogflow_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowAgentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentDefaultLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentSupportedLanguageCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentAvatarUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentEnableLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentMatchMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentClassificationThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentApiVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowAgentTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowCXAgent() *resource_dialogflow_cx_agent_schema.Resource { - return &resource_dialogflow_cx_agent_schema.Resource{ - Create: resourceDialogflowCXAgentCreate, - Read: resourceDialogflowCXAgentRead, - Update: resourceDialogflowCXAgentUpdate, - Delete: resourceDialogflowCXAgentDelete, - - Importer: &resource_dialogflow_cx_agent_schema.ResourceImporter{ - State: resourceDialogflowCXAgentImport, - }, - - Timeouts: &resource_dialogflow_cx_agent_schema.ResourceTimeout{ - Create: resource_dialogflow_cx_agent_schema.DefaultTimeout(40 * resource_dialogflow_cx_agent_time.Minute), - Update: resource_dialogflow_cx_agent_schema.DefaultTimeout(40 * resource_dialogflow_cx_agent_time.Minute), - Delete: resource_dialogflow_cx_agent_schema.DefaultTimeout(4 * resource_dialogflow_cx_agent_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_cx_agent_schema.Schema{ - "default_language_code": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/cx/docs/reference/language) -for a list of the currently supported language codes. This field cannot be updated after creation.`, - }, - "display_name": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Required: true, - Description: `The human-readable name of the agent, unique within the location.`, - }, - "location": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the location this agent is located in. - -~> **Note:** The first time you are deploying an Agent in your project you must configure location settings. - This is a one time step but at the moment you can only [configure location settings](https://cloud.google.com/dialogflow/cx/docs/concept/region#location-settings) via the Dialogflow CX console. - Another options is to use global location so you don't need to manually configure location settings.`, - }, - "time_zone": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Required: true, - Description: `The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, -Europe/Paris.`, - }, - "avatar_uri": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Optional: true, - Description: `The URI of the agent's avatar. Avatars are used throughout the Dialogflow console and in the self-hosted Web Demo integration.`, - }, - "description": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_agent_validation.StringLenBetween(0, 500), - Description: `The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "enable_spell_correction": { - Type: resource_dialogflow_cx_agent_schema.TypeBool, - Optional: true, - Description: `Indicates if automatic spell correction is enabled in detect intent requests.`, - }, - "enable_stackdriver_logging": { - Type: resource_dialogflow_cx_agent_schema.TypeBool, - Optional: true, - Description: `Determines whether this agent should log conversation queries.`, - }, - "security_settings": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Optional: true, - Description: `Name of the SecuritySettings reference for the agent. Format: projects//locations//securitySettings/.`, - }, - "speech_to_text_settings": { - Type: resource_dialogflow_cx_agent_schema.TypeList, - Optional: true, - Description: `Settings related to speech recognition.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_agent_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_agent_schema.Schema{ - "enable_speech_adaptation": { - Type: resource_dialogflow_cx_agent_schema.TypeBool, - Optional: true, - Description: `Whether to use speech adaptation for speech recognition.`, - }, - }, - }, - }, - "supported_language_codes": { - Type: resource_dialogflow_cx_agent_schema.TypeList, - Optional: true, - Description: `The list of all languages supported by this agent (except for the default_language_code).`, - Elem: &resource_dialogflow_cx_agent_schema.Schema{ - Type: resource_dialogflow_cx_agent_schema.TypeString, - }, - }, - "name": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Computed: true, - Description: `The unique identifier of the agent.`, - }, - "start_flow": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Computed: true, - Description: `Name of the start flow in this agent. A start flow will be automatically created when the agent is created, and can only be deleted by deleting the agent. Format: projects//locations//agents//flows/.`, - }, - "project": { - Type: resource_dialogflow_cx_agent_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXAgentCreate(d *resource_dialogflow_cx_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - defaultLanguageCodeProp, err := expandDialogflowCXAgentDefaultLanguageCode(d.Get("default_language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_language_code"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(defaultLanguageCodeProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, defaultLanguageCodeProp)) { - obj["defaultLanguageCode"] = defaultLanguageCodeProp - } - supportedLanguageCodesProp, err := expandDialogflowCXAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(supportedLanguageCodesProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowCXAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(timeZoneProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowCXAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(descriptionProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowCXAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(avatarUriProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - speechToTextSettingsProp, err := expandDialogflowCXAgentSpeechToTextSettings(d.Get("speech_to_text_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("speech_to_text_settings"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(speechToTextSettingsProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, speechToTextSettingsProp)) { - obj["speechToTextSettings"] = speechToTextSettingsProp - } - securitySettingsProp, err := expandDialogflowCXAgentSecuritySettings(d.Get("security_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(securitySettingsProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, securitySettingsProp)) { - obj["securitySettings"] = securitySettingsProp - } - enableStackdriverLoggingProp, err := expandDialogflowCXAgentEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(enableStackdriverLoggingProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableSpellCorrectionProp, err := expandDialogflowCXAgentEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_spell_correction"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(enableSpellCorrectionProp)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, enableSpellCorrectionProp)) { - obj["enableSpellCorrection"] = enableSpellCorrectionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents") - if err != nil { - return err - } - - resource_dialogflow_cx_agent_log.Printf("[DEBUG] Creating new Agent: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_agent_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error creating Agent: %s", err) - } - if err := d.Set("name", flattenDialogflowCXAgentName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_cx_agent_log.Printf("[DEBUG] Finished creating Agent %q: %#v", d.Id(), res) - - return resourceDialogflowCXAgentRead(d, meta) -} - -func resourceDialogflowCXAgentRead(d *resource_dialogflow_cx_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_cx_agent_fmt.Sprintf("DialogflowCXAgent %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - - if err := d.Set("name", flattenDialogflowCXAgentName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXAgentDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("default_language_code", flattenDialogflowCXAgentDefaultLanguageCode(res["defaultLanguageCode"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("supported_language_codes", flattenDialogflowCXAgentSupportedLanguageCodes(res["supportedLanguageCodes"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("time_zone", flattenDialogflowCXAgentTimeZone(res["timeZone"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("description", flattenDialogflowCXAgentDescription(res["description"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("avatar_uri", flattenDialogflowCXAgentAvatarUri(res["avatarUri"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("speech_to_text_settings", flattenDialogflowCXAgentSpeechToTextSettings(res["speechToTextSettings"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("start_flow", flattenDialogflowCXAgentStartFlow(res["startFlow"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("security_settings", flattenDialogflowCXAgentSecuritySettings(res["securitySettings"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("enable_stackdriver_logging", flattenDialogflowCXAgentEnableStackdriverLogging(res["enableStackdriverLogging"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - if err := d.Set("enable_spell_correction", flattenDialogflowCXAgentEnableSpellCorrection(res["enableSpellCorrection"], d, config)); err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error reading Agent: %s", err) - } - - return nil -} - -func resourceDialogflowCXAgentUpdate(d *resource_dialogflow_cx_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXAgentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - supportedLanguageCodesProp, err := expandDialogflowCXAgentSupportedLanguageCodes(d.Get("supported_language_codes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("supported_language_codes"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, supportedLanguageCodesProp)) { - obj["supportedLanguageCodes"] = supportedLanguageCodesProp - } - timeZoneProp, err := expandDialogflowCXAgentTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandDialogflowCXAgentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - avatarUriProp, err := expandDialogflowCXAgentAvatarUri(d.Get("avatar_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("avatar_uri"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, avatarUriProp)) { - obj["avatarUri"] = avatarUriProp - } - speechToTextSettingsProp, err := expandDialogflowCXAgentSpeechToTextSettings(d.Get("speech_to_text_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("speech_to_text_settings"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, speechToTextSettingsProp)) { - obj["speechToTextSettings"] = speechToTextSettingsProp - } - securitySettingsProp, err := expandDialogflowCXAgentSecuritySettings(d.Get("security_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("security_settings"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, securitySettingsProp)) { - obj["securitySettings"] = securitySettingsProp - } - enableStackdriverLoggingProp, err := expandDialogflowCXAgentEnableStackdriverLogging(d.Get("enable_stackdriver_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_stackdriver_logging"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, enableStackdriverLoggingProp)) { - obj["enableStackdriverLogging"] = enableStackdriverLoggingProp - } - enableSpellCorrectionProp, err := expandDialogflowCXAgentEnableSpellCorrection(d.Get("enable_spell_correction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_spell_correction"); !isEmptyValue(resource_dialogflow_cx_agent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_agent_reflect.DeepEqual(v, enableSpellCorrectionProp)) { - obj["enableSpellCorrection"] = enableSpellCorrectionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return err - } - - resource_dialogflow_cx_agent_log.Printf("[DEBUG] Updating Agent %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("supported_language_codes") { - updateMask = append(updateMask, "supportedLanguageCodes") - } - - if d.HasChange("time_zone") { - updateMask = append(updateMask, "timeZone") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("avatar_uri") { - updateMask = append(updateMask, "avatarUri") - } - - if d.HasChange("speech_to_text_settings") { - updateMask = append(updateMask, "speechToTextSettings") - } - - if d.HasChange("security_settings") { - updateMask = append(updateMask, "securitySettings") - } - - if d.HasChange("enable_stackdriver_logging") { - updateMask = append(updateMask, "enableStackdriverLogging") - } - - if d.HasChange("enable_spell_correction") { - updateMask = append(updateMask, "enableSpellCorrection") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_cx_agent_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_agent_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error updating Agent %q: %s", d.Id(), err) - } else { - resource_dialogflow_cx_agent_log.Printf("[DEBUG] Finished updating Agent %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXAgentRead(d, meta) -} - -func resourceDialogflowCXAgentDelete(d *resource_dialogflow_cx_agent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_cx_agent_fmt.Errorf("Error fetching project for Agent: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_cx_agent_log.Printf("[DEBUG] Deleting Agent %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_agent_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Agent") - } - - resource_dialogflow_cx_agent_log.Printf("[DEBUG] Finished deleting Agent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXAgentImport(d *resource_dialogflow_cx_agent_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_cx_agent_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/agents/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/agents/{{name}}") - if err != nil { - return nil, resource_dialogflow_cx_agent_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_cx_agent_schema.ResourceData{d}, nil -} - -func flattenDialogflowCXAgentName(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXAgentDisplayName(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentDefaultLanguageCode(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentSupportedLanguageCodes(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentTimeZone(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentDescription(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentAvatarUri(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentSpeechToTextSettings(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_speech_adaptation"] = - flattenDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(original["enableSpeechAdaptation"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentStartFlow(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentSecuritySettings(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentEnableStackdriverLogging(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXAgentEnableSpellCorrection(v interface{}, d *resource_dialogflow_cx_agent_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXAgentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentDefaultLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentSupportedLanguageCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentAvatarUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentSpeechToTextSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableSpeechAdaptation, err := expandDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(original["enable_speech_adaptation"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_agent_reflect.ValueOf(transformedEnableSpeechAdaptation); val.IsValid() && !isEmptyValue(val) { - transformed["enableSpeechAdaptation"] = transformedEnableSpeechAdaptation - } - - return transformed, nil -} - -func expandDialogflowCXAgentSpeechToTextSettingsEnableSpeechAdaptation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentSecuritySettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentEnableStackdriverLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXAgentEnableSpellCorrection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowCXEntityType() *resource_dialogflow_cx_entity_type_schema.Resource { - return &resource_dialogflow_cx_entity_type_schema.Resource{ - Create: resourceDialogflowCXEntityTypeCreate, - Read: resourceDialogflowCXEntityTypeRead, - Update: resourceDialogflowCXEntityTypeUpdate, - Delete: resourceDialogflowCXEntityTypeDelete, - - Importer: &resource_dialogflow_cx_entity_type_schema.ResourceImporter{ - State: resourceDialogflowCXEntityTypeImport, - }, - - Timeouts: &resource_dialogflow_cx_entity_type_schema.ResourceTimeout{ - Create: resource_dialogflow_cx_entity_type_schema.DefaultTimeout(40 * resource_dialogflow_cx_entity_type_time.Minute), - Update: resource_dialogflow_cx_entity_type_schema.DefaultTimeout(40 * resource_dialogflow_cx_entity_type_time.Minute), - Delete: resource_dialogflow_cx_entity_type_schema.DefaultTimeout(4 * resource_dialogflow_cx_entity_type_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_cx_entity_type_schema.Schema{ - "display_name": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_cx_entity_type_validation.StringLenBetween(0, 64), - Description: `The human-readable name of the entity type, unique within the agent.`, - }, - "entities": { - Type: resource_dialogflow_cx_entity_type_schema.TypeList, - Required: true, - Description: `The collection of entity entries associated with the entity type.`, - Elem: &resource_dialogflow_cx_entity_type_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_entity_type_schema.Schema{ - "synonyms": { - Type: resource_dialogflow_cx_entity_type_schema.TypeList, - Optional: true, - Description: `A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym could be green onions. -For KIND_LIST entity types: This collection must contain exactly one synonym equal to value.`, - Elem: &resource_dialogflow_cx_entity_type_schema.Schema{ - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - }, - }, - "value": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Optional: true, - Description: `The primary value associated with this entity entry. For example, if the entity type is vegetable, the value could be scallions. -For KIND_MAP entity types: A canonical value to be used in place of synonyms. -For KIND_LIST entity types: A string that can contain references to other entity types (with or without aliases).`, - }, - }, - }, - }, - "kind": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_cx_entity_type_validation.StringInSlice([]string{"KIND_MAP", "KIND_LIST", "KIND_REGEXP"}, false), - Description: `Indicates whether the entity type can be automatically expanded. -* KIND_MAP: Map entity types allow mapping of a group of synonyms to a canonical value. -* KIND_LIST: List entity types contain a set of entries that do not map to canonical values. However, list entity types can contain references to other entity types (with or without aliases). -* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values. Possible values: ["KIND_MAP", "KIND_LIST", "KIND_REGEXP"]`, - }, - "auto_expansion_mode": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_entity_type_validation.StringInSlice([]string{"AUTO_EXPANSION_MODE_DEFAULT", "AUTO_EXPANSION_MODE_UNSPECIFIED", ""}, false), - Description: `Represents kinds of entities. -* AUTO_EXPANSION_MODE_UNSPECIFIED: Auto expansion disabled for the entity. -* AUTO_EXPANSION_MODE_DEFAULT: Allows an agent to recognize values that have not been explicitly listed in the entity. Possible values: ["AUTO_EXPANSION_MODE_DEFAULT", "AUTO_EXPANSION_MODE_UNSPECIFIED"]`, - }, - "enable_fuzzy_extraction": { - Type: resource_dialogflow_cx_entity_type_schema.TypeBool, - Optional: true, - Description: `Enables fuzzy entity extraction during classification.`, - }, - "excluded_phrases": { - Type: resource_dialogflow_cx_entity_type_schema.TypeList, - Optional: true, - Description: `Collection of exceptional words and phrases that shouldn't be matched. For example, if you have a size entity type with entry giant(an adjective), you might consider adding giants(a noun) as an exclusion. -If the kind of entity type is KIND_MAP, then the phrases specified by entities and excluded phrases should be mutually exclusive.`, - Elem: &resource_dialogflow_cx_entity_type_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_entity_type_schema.Schema{ - "value": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Optional: true, - Description: `The word or phrase to be excluded.`, - }, - }, - }, - }, - "language_code": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The language of the following fields in entityType: -EntityType.entities.value -EntityType.entities.synonyms -EntityType.excluded_phrases.value -If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, - }, - "parent": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The agent to create a entity type for. -Format: projects//locations//agents/.`, - }, - "redact": { - Type: resource_dialogflow_cx_entity_type_schema.TypeBool, - Optional: true, - Description: `Indicates whether parameters of the entity type should be redacted in log. If redaction is enabled, page parameters and intent parameters referring to the entity type will be replaced by parameter name when logging.`, - }, - "name": { - Type: resource_dialogflow_cx_entity_type_schema.TypeString, - Computed: true, - Description: `The unique identifier of the entity type. -Format: projects//locations//agents//entityTypes/.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXEntityTypeCreate(d *resource_dialogflow_cx_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowCXEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(kindProp)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - autoExpansionModeProp, err := expandDialogflowCXEntityTypeAutoExpansionMode(d.Get("auto_expansion_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auto_expansion_mode"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(autoExpansionModeProp)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, autoExpansionModeProp)) { - obj["autoExpansionMode"] = autoExpansionModeProp - } - entitiesProp, err := expandDialogflowCXEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(entitiesProp)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - excludedPhrasesProp, err := expandDialogflowCXEntityTypeExcludedPhrases(d.Get("excluded_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("excluded_phrases"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(excludedPhrasesProp)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, excludedPhrasesProp)) { - obj["excludedPhrases"] = excludedPhrasesProp - } - enableFuzzyExtractionProp, err := expandDialogflowCXEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(enableFuzzyExtractionProp)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - redactProp, err := expandDialogflowCXEntityTypeRedact(d.Get("redact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redact"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(redactProp)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, redactProp)) { - obj["redact"] = redactProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes") - if err != nil { - return err - } - - resource_dialogflow_cx_entity_type_log.Printf("[DEBUG] Creating new EntityType: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_entity_type_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error creating EntityType: %s", err) - } - if err := d.Set("name", flattenDialogflowCXEntityTypeName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/entityTypes/{{name}}") - if err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_cx_entity_type_log.Printf("[DEBUG] Finished creating EntityType %q: %#v", d.Id(), res) - - return resourceDialogflowCXEntityTypeRead(d, meta) -} - -func resourceDialogflowCXEntityTypeRead(d *resource_dialogflow_cx_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_cx_entity_type_fmt.Sprintf("DialogflowCXEntityType %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXEntityTypeName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXEntityTypeDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("kind", flattenDialogflowCXEntityTypeKind(res["kind"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("auto_expansion_mode", flattenDialogflowCXEntityTypeAutoExpansionMode(res["autoExpansionMode"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("entities", flattenDialogflowCXEntityTypeEntities(res["entities"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("excluded_phrases", flattenDialogflowCXEntityTypeExcludedPhrases(res["excludedPhrases"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("enable_fuzzy_extraction", flattenDialogflowCXEntityTypeEnableFuzzyExtraction(res["enableFuzzyExtraction"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("redact", flattenDialogflowCXEntityTypeRedact(res["redact"], d, config)); err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - - return nil -} - -func resourceDialogflowCXEntityTypeUpdate(d *resource_dialogflow_cx_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowCXEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - autoExpansionModeProp, err := expandDialogflowCXEntityTypeAutoExpansionMode(d.Get("auto_expansion_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auto_expansion_mode"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, autoExpansionModeProp)) { - obj["autoExpansionMode"] = autoExpansionModeProp - } - entitiesProp, err := expandDialogflowCXEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - excludedPhrasesProp, err := expandDialogflowCXEntityTypeExcludedPhrases(d.Get("excluded_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("excluded_phrases"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, excludedPhrasesProp)) { - obj["excludedPhrases"] = excludedPhrasesProp - } - enableFuzzyExtractionProp, err := expandDialogflowCXEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - redactProp, err := expandDialogflowCXEntityTypeRedact(d.Get("redact"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redact"); !isEmptyValue(resource_dialogflow_cx_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_entity_type_reflect.DeepEqual(v, redactProp)) { - obj["redact"] = redactProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") - if err != nil { - return err - } - - resource_dialogflow_cx_entity_type_log.Printf("[DEBUG] Updating EntityType %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("kind") { - updateMask = append(updateMask, "kind") - } - - if d.HasChange("auto_expansion_mode") { - updateMask = append(updateMask, "autoExpansionMode") - } - - if d.HasChange("entities") { - updateMask = append(updateMask, "entities") - } - - if d.HasChange("excluded_phrases") { - updateMask = append(updateMask, "excludedPhrases") - } - - if d.HasChange("enable_fuzzy_extraction") { - updateMask = append(updateMask, "enableFuzzyExtraction") - } - - if d.HasChange("redact") { - updateMask = append(updateMask, "redact") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_cx_entity_type_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_entity_type_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_cx_entity_type_fmt.Errorf("Error updating EntityType %q: %s", d.Id(), err) - } else { - resource_dialogflow_cx_entity_type_log.Printf("[DEBUG] Finished updating EntityType %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXEntityTypeRead(d, meta) -} - -func resourceDialogflowCXEntityTypeDelete(d *resource_dialogflow_cx_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/entityTypes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_cx_entity_type_log.Printf("[DEBUG] Deleting EntityType %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_entity_type_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EntityType") - } - - resource_dialogflow_cx_entity_type_log.Printf("[DEBUG] Finished deleting EntityType %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXEntityTypeImport(d *resource_dialogflow_cx_entity_type_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_cx_entity_type_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/entityTypes/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{parent}}/entityTypes/{{name}}") - if err != nil { - return nil, resource_dialogflow_cx_entity_type_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_cx_entity_type_schema.ResourceData{d}, nil -} - -func flattenDialogflowCXEntityTypeName(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXEntityTypeDisplayName(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeKind(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeAutoExpansionMode(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeEntities(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "value": flattenDialogflowCXEntityTypeEntitiesValue(original["value"], d, config), - "synonyms": flattenDialogflowCXEntityTypeEntitiesSynonyms(original["synonyms"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXEntityTypeEntitiesValue(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeEntitiesSynonyms(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeExcludedPhrases(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "value": flattenDialogflowCXEntityTypeExcludedPhrasesValue(original["value"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXEntityTypeExcludedPhrasesValue(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeEnableFuzzyExtraction(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEntityTypeRedact(v interface{}, d *resource_dialogflow_cx_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXEntityTypeDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeAutoExpansionMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeEntities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedValue, err := expandDialogflowCXEntityTypeEntitiesValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_entity_type_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedSynonyms, err := expandDialogflowCXEntityTypeEntitiesSynonyms(original["synonyms"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_entity_type_reflect.ValueOf(transformedSynonyms); val.IsValid() && !isEmptyValue(val) { - transformed["synonyms"] = transformedSynonyms - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXEntityTypeEntitiesValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeEntitiesSynonyms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeExcludedPhrases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedValue, err := expandDialogflowCXEntityTypeExcludedPhrasesValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_entity_type_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXEntityTypeExcludedPhrasesValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeEnableFuzzyExtraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEntityTypeRedact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowCXEnvironment() *resource_dialogflow_cx_environment_schema.Resource { - return &resource_dialogflow_cx_environment_schema.Resource{ - Create: resourceDialogflowCXEnvironmentCreate, - Read: resourceDialogflowCXEnvironmentRead, - Update: resourceDialogflowCXEnvironmentUpdate, - Delete: resourceDialogflowCXEnvironmentDelete, - - Importer: &resource_dialogflow_cx_environment_schema.ResourceImporter{ - State: resourceDialogflowCXEnvironmentImport, - }, - - Timeouts: &resource_dialogflow_cx_environment_schema.ResourceTimeout{ - Create: resource_dialogflow_cx_environment_schema.DefaultTimeout(40 * resource_dialogflow_cx_environment_time.Minute), - Update: resource_dialogflow_cx_environment_schema.DefaultTimeout(40 * resource_dialogflow_cx_environment_time.Minute), - Delete: resource_dialogflow_cx_environment_schema.DefaultTimeout(4 * resource_dialogflow_cx_environment_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_cx_environment_schema.Schema{ - "display_name": { - Type: resource_dialogflow_cx_environment_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_cx_environment_validation.StringLenBetween(0, 64), - Description: `The human-readable name of the environment (unique in an agent). Limit of 64 characters.`, - }, - "version_configs": { - Type: resource_dialogflow_cx_environment_schema.TypeList, - Required: true, - Description: `A list of configurations for flow versions. You should include version configs for all flows that are reachable from [Start Flow][Agent.start_flow] in the agent. Otherwise, an error will be returned.`, - Elem: &resource_dialogflow_cx_environment_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_environment_schema.Schema{ - "version": { - Type: resource_dialogflow_cx_environment_schema.TypeString, - Required: true, - Description: `Format: projects/{{project}}/locations/{{location}}/agents/{{agent}}/flows/{{flow}}/versions/{{version}}.`, - }, - }, - }, - }, - "description": { - Type: resource_dialogflow_cx_environment_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_environment_validation.StringLenBetween(0, 500), - Description: `The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "parent": { - Type: resource_dialogflow_cx_environment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Agent to create an Environment for. -Format: projects//locations//agents/.`, - }, - "name": { - Type: resource_dialogflow_cx_environment_schema.TypeString, - Computed: true, - Description: `The name of the environment.`, - }, - "update_time": { - Type: resource_dialogflow_cx_environment_schema.TypeString, - Computed: true, - Description: `Update time of this environment. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXEnvironmentCreate(d *resource_dialogflow_cx_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_environment_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_cx_environment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_environment_reflect.ValueOf(descriptionProp)) && (ok || !resource_dialogflow_cx_environment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - versionConfigsProp, err := expandDialogflowCXEnvironmentVersionConfigs(d.Get("version_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_configs"); !isEmptyValue(resource_dialogflow_cx_environment_reflect.ValueOf(versionConfigsProp)) && (ok || !resource_dialogflow_cx_environment_reflect.DeepEqual(v, versionConfigsProp)) { - obj["versionConfigs"] = versionConfigsProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments") - if err != nil { - return err - } - - resource_dialogflow_cx_environment_log.Printf("[DEBUG] Creating new Environment: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_environment_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error creating Environment: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/environments/{{name}}") - if err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = dialogflowCXOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Environment", userAgent, - d.Timeout(resource_dialogflow_cx_environment_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_dialogflow_cx_environment_fmt.Errorf("Error waiting to create Environment: %s", err) - } - - if err := d.Set("name", flattenDialogflowCXEnvironmentName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{parent}}/environments/{{name}}") - if err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_cx_environment_log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) - - return resourceDialogflowCXEnvironmentRead(d, meta) -} - -func resourceDialogflowCXEnvironmentRead(d *resource_dialogflow_cx_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_cx_environment_fmt.Sprintf("DialogflowCXEnvironment %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXEnvironmentName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXEnvironmentDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("description", flattenDialogflowCXEnvironmentDescription(res["description"], d, config)); err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("version_configs", flattenDialogflowCXEnvironmentVersionConfigs(res["versionConfigs"], d, config)); err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("update_time", flattenDialogflowCXEnvironmentUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error reading Environment: %s", err) - } - - return nil -} - -func resourceDialogflowCXEnvironmentUpdate(d *resource_dialogflow_cx_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_environment_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_environment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_environment_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_environment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - versionConfigsProp, err := expandDialogflowCXEnvironmentVersionConfigs(d.Get("version_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_configs"); !isEmptyValue(resource_dialogflow_cx_environment_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_environment_reflect.DeepEqual(v, versionConfigsProp)) { - obj["versionConfigs"] = versionConfigsProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") - if err != nil { - return err - } - - resource_dialogflow_cx_environment_log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("version_configs") { - updateMask = append(updateMask, "versionConfigs") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_cx_environment_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_environment_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_cx_environment_fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) - } else { - resource_dialogflow_cx_environment_log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) - } - - err = dialogflowCXOperationWaitTime( - config, res, "Updating Environment", userAgent, - d.Timeout(resource_dialogflow_cx_environment_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceDialogflowCXEnvironmentRead(d, meta) -} - -func resourceDialogflowCXEnvironmentDelete(d *resource_dialogflow_cx_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/environments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_cx_environment_log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_environment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Environment") - } - - err = dialogflowCXOperationWaitTime( - config, res, "Deleting Environment", userAgent, - d.Timeout(resource_dialogflow_cx_environment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_dialogflow_cx_environment_log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXEnvironmentImport(d *resource_dialogflow_cx_environment_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_cx_environment_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/environments/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{parent}}/environments/{{name}}") - if err != nil { - return nil, resource_dialogflow_cx_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_cx_environment_schema.ResourceData{d}, nil -} - -func flattenDialogflowCXEnvironmentName(v interface{}, d *resource_dialogflow_cx_environment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXEnvironmentDisplayName(v interface{}, d *resource_dialogflow_cx_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEnvironmentDescription(v interface{}, d *resource_dialogflow_cx_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEnvironmentVersionConfigs(v interface{}, d *resource_dialogflow_cx_environment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "version": flattenDialogflowCXEnvironmentVersionConfigsVersion(original["version"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXEnvironmentVersionConfigsVersion(v interface{}, d *resource_dialogflow_cx_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXEnvironmentUpdateTime(v interface{}, d *resource_dialogflow_cx_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXEnvironmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEnvironmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXEnvironmentVersionConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedVersion, err := expandDialogflowCXEnvironmentVersionConfigsVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_environment_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXEnvironmentVersionConfigsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowCXFlow() *resource_dialogflow_cx_flow_schema.Resource { - return &resource_dialogflow_cx_flow_schema.Resource{ - Create: resourceDialogflowCXFlowCreate, - Read: resourceDialogflowCXFlowRead, - Update: resourceDialogflowCXFlowUpdate, - Delete: resourceDialogflowCXFlowDelete, - - Importer: &resource_dialogflow_cx_flow_schema.ResourceImporter{ - State: resourceDialogflowCXFlowImport, - }, - - Timeouts: &resource_dialogflow_cx_flow_schema.ResourceTimeout{ - Create: resource_dialogflow_cx_flow_schema.DefaultTimeout(40 * resource_dialogflow_cx_flow_time.Minute), - Update: resource_dialogflow_cx_flow_schema.DefaultTimeout(40 * resource_dialogflow_cx_flow_time.Minute), - Delete: resource_dialogflow_cx_flow_schema.DefaultTimeout(4 * resource_dialogflow_cx_flow_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "display_name": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Required: true, - Description: `The human-readable name of the flow.`, - }, - "description": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_flow_validation.StringLenBetween(0, 500), - Description: `The description of the flow. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "event_handlers": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Computed: true, - Optional: true, - Description: `A flow's event handlers serve two purposes: -They are responsible for handling events (e.g. no match, webhook errors) in the flow. -They are inherited by every page's [event handlers][Page.event_handlers], which can be used to handle common events regardless of the current page. Event handlers defined in the page have higher priority than those defined in the flow. -Unlike transitionRoutes, these handlers are evaluated on a first-match basis. The first one that matches the event get executed, with the rest being ignored.`, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "event": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The name of the event to handle.`, - }, - "target_flow": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The target flow to transition to. -Format: projects//locations//agents//flows/.`, - }, - "target_page": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The target page to transition to. -Format: projects//locations//agents//flows//pages/.`, - }, - "trigger_fulfillment": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "messages": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `The list of rich message responses to present to the user.`, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `The text response message.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `A collection of text responses.`, - Elem: &resource_dialogflow_cx_flow_schema.Schema{ - Type: resource_dialogflow_cx_flow_schema.TypeString, - }, - }, - "allow_playback_interruption": { - Type: resource_dialogflow_cx_flow_schema.TypeBool, - Computed: true, - Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, - }, - }, - }, - }, - }, - }, - }, - "return_partial_responses": { - Type: resource_dialogflow_cx_flow_schema.TypeBool, - Optional: true, - Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, - }, - "tag": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified.`, - }, - "webhook": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The webhook to call. Format: projects//locations//agents//webhooks/.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Computed: true, - Description: `The unique identifier of this event handler.`, - }, - }, - }, - }, - "language_code": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The language of the following fields in flow: -Flow.event_handlers.trigger_fulfillment.messages -Flow.event_handlers.trigger_fulfillment.conditional_cases -Flow.transition_routes.trigger_fulfillment.messages -Flow.transition_routes.trigger_fulfillment.conditional_cases -If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, - }, - "nlu_settings": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `NLU related settings of the flow.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "classification_threshold": { - Type: resource_dialogflow_cx_flow_schema.TypeFloat, - Optional: true, - Description: `To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. -If the returned score value is less than the threshold value, then a no-match event will be triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used.`, - }, - "model_training_mode": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_flow_validation.StringInSlice([]string{"MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL", ""}, false), - Description: `Indicates NLU model training mode. -* MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. -* MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. Possible values: ["MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL"]`, - }, - "model_type": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_flow_validation.StringInSlice([]string{"MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED", ""}, false), - Description: `Indicates the type of NLU model. -* MODEL_TYPE_STANDARD: Use standard NLU model. -* MODEL_TYPE_ADVANCED: Use advanced NLU model. Possible values: ["MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED"]`, - }, - }, - }, - }, - "parent": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The agent to create a flow for. -Format: projects//locations//agents/.`, - }, - "transition_route_groups": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `A flow's transition route group serve two purposes: -They are responsible for matching the user's first utterances in the flow. -They are inherited by every page's [transition route groups][Page.transition_route_groups]. Transition route groups defined in the page have higher priority than those defined in the flow. -Format:projects//locations//agents//flows//transitionRouteGroups/.`, - Elem: &resource_dialogflow_cx_flow_schema.Schema{ - Type: resource_dialogflow_cx_flow_schema.TypeString, - }, - }, - "transition_routes": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `A flow's transition routes serve two purposes: -They are responsible for matching the user's first utterances in the flow. -They are inherited by every page's [transition routes][Page.transition_routes] and can support use cases such as the user saying "help" or "can I talk to a human?", which can be handled in a common way regardless of the current page. Transition routes defined in the page have higher priority than those defined in the flow. - -TransitionRoutes are evalauted in the following order: - TransitionRoutes with intent specified. - TransitionRoutes with only condition specified. - TransitionRoutes with intent specified are inherited by pages in the flow.`, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "condition": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The condition to evaluate against form parameters or session parameters. -At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled.`, - }, - "intent": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The unique identifier of an Intent. -Format: projects//locations//agents//intents/. Indicates that the transition can only happen when the given intent is matched. At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled.`, - }, - "target_flow": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The target flow to transition to. -Format: projects//locations//agents//flows/.`, - }, - "target_page": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The target page to transition to. -Format: projects//locations//agents//flows//pages/.`, - }, - "trigger_fulfillment": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `The fulfillment to call when the condition is satisfied. At least one of triggerFulfillment and target must be specified. When both are defined, triggerFulfillment is executed first.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "messages": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `The list of rich message responses to present to the user.`, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `The text response message.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_flow_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_flow_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_flow_schema.TypeList, - Optional: true, - Description: `A collection of text responses.`, - Elem: &resource_dialogflow_cx_flow_schema.Schema{ - Type: resource_dialogflow_cx_flow_schema.TypeString, - }, - }, - "allow_playback_interruption": { - Type: resource_dialogflow_cx_flow_schema.TypeBool, - Computed: true, - Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, - }, - }, - }, - }, - }, - }, - }, - "return_partial_responses": { - Type: resource_dialogflow_cx_flow_schema.TypeBool, - Optional: true, - Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, - }, - "tag": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified.`, - }, - "webhook": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Optional: true, - Description: `The webhook to call. Format: projects//locations//agents//webhooks/.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Computed: true, - Description: `The unique identifier of this transition route.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_cx_flow_schema.TypeString, - Computed: true, - Description: `The unique identifier of the flow. -Format: projects//locations//agents//flows/.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXFlowCreate(d *resource_dialogflow_cx_flow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXFlowDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXFlowDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(descriptionProp)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - transitionRoutesProp, err := expandDialogflowCXFlowTransitionRoutes(d.Get("transition_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_routes"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(transitionRoutesProp)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, transitionRoutesProp)) { - obj["transitionRoutes"] = transitionRoutesProp - } - eventHandlersProp, err := expandDialogflowCXFlowEventHandlers(d.Get("event_handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_handlers"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(eventHandlersProp)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, eventHandlersProp)) { - obj["eventHandlers"] = eventHandlersProp - } - transitionRouteGroupsProp, err := expandDialogflowCXFlowTransitionRouteGroups(d.Get("transition_route_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_route_groups"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(transitionRouteGroupsProp)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, transitionRouteGroupsProp)) { - obj["transitionRouteGroups"] = transitionRouteGroupsProp - } - nluSettingsProp, err := expandDialogflowCXFlowNluSettings(d.Get("nlu_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nlu_settings"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(nluSettingsProp)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, nluSettingsProp)) { - obj["nluSettings"] = nluSettingsProp - } - languageCodeProp, err := expandDialogflowCXFlowLanguageCode(d.Get("language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_code"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(languageCodeProp)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, languageCodeProp)) { - obj["languageCode"] = languageCodeProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/flows") - if err != nil { - return err - } - - resource_dialogflow_cx_flow_log.Printf("[DEBUG] Creating new Flow: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_flow_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error creating Flow: %s", err) - } - if err := d.Set("name", flattenDialogflowCXFlowName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/flows/{{name}}") - if err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_cx_flow_log.Printf("[DEBUG] Finished creating Flow %q: %#v", d.Id(), res) - - return resourceDialogflowCXFlowRead(d, meta) -} - -func resourceDialogflowCXFlowRead(d *resource_dialogflow_cx_flow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/flows/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_cx_flow_fmt.Sprintf("DialogflowCXFlow %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXFlowName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXFlowDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - if err := d.Set("description", flattenDialogflowCXFlowDescription(res["description"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - if err := d.Set("transition_routes", flattenDialogflowCXFlowTransitionRoutes(res["transitionRoutes"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - if err := d.Set("event_handlers", flattenDialogflowCXFlowEventHandlers(res["eventHandlers"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - if err := d.Set("transition_route_groups", flattenDialogflowCXFlowTransitionRouteGroups(res["transitionRouteGroups"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - if err := d.Set("nlu_settings", flattenDialogflowCXFlowNluSettings(res["nluSettings"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - if err := d.Set("language_code", flattenDialogflowCXFlowLanguageCode(res["languageCode"], d, config)); err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error reading Flow: %s", err) - } - - return nil -} - -func resourceDialogflowCXFlowUpdate(d *resource_dialogflow_cx_flow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXFlowDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXFlowDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - transitionRoutesProp, err := expandDialogflowCXFlowTransitionRoutes(d.Get("transition_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_routes"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, transitionRoutesProp)) { - obj["transitionRoutes"] = transitionRoutesProp - } - eventHandlersProp, err := expandDialogflowCXFlowEventHandlers(d.Get("event_handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_handlers"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, eventHandlersProp)) { - obj["eventHandlers"] = eventHandlersProp - } - transitionRouteGroupsProp, err := expandDialogflowCXFlowTransitionRouteGroups(d.Get("transition_route_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_route_groups"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, transitionRouteGroupsProp)) { - obj["transitionRouteGroups"] = transitionRouteGroupsProp - } - nluSettingsProp, err := expandDialogflowCXFlowNluSettings(d.Get("nlu_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("nlu_settings"); !isEmptyValue(resource_dialogflow_cx_flow_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_flow_reflect.DeepEqual(v, nluSettingsProp)) { - obj["nluSettings"] = nluSettingsProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/flows/{{name}}") - if err != nil { - return err - } - - resource_dialogflow_cx_flow_log.Printf("[DEBUG] Updating Flow %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("transition_routes") { - updateMask = append(updateMask, "transitionRoutes") - } - - if d.HasChange("event_handlers") { - updateMask = append(updateMask, "eventHandlers") - } - - if d.HasChange("transition_route_groups") { - updateMask = append(updateMask, "transitionRouteGroups") - } - - if d.HasChange("nlu_settings") { - updateMask = append(updateMask, "nluSettings") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_cx_flow_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_flow_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_cx_flow_fmt.Errorf("Error updating Flow %q: %s", d.Id(), err) - } else { - resource_dialogflow_cx_flow_log.Printf("[DEBUG] Finished updating Flow %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXFlowRead(d, meta) -} - -func resourceDialogflowCXFlowDelete(d *resource_dialogflow_cx_flow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/flows/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_cx_flow_log.Printf("[DEBUG] Deleting Flow %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_flow_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Flow") - } - - resource_dialogflow_cx_flow_log.Printf("[DEBUG] Finished deleting Flow %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXFlowImport(d *resource_dialogflow_cx_flow_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_cx_flow_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/flows/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{parent}}/flows/{{name}}") - if err != nil { - return nil, resource_dialogflow_cx_flow_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_cx_flow_schema.ResourceData{d}, nil -} - -func flattenDialogflowCXFlowName(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXFlowDisplayName(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowDescription(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutes(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDialogflowCXFlowTransitionRoutesName(original["name"], d, config), - "intent": flattenDialogflowCXFlowTransitionRoutesIntent(original["intent"], d, config), - "condition": flattenDialogflowCXFlowTransitionRoutesCondition(original["condition"], d, config), - "trigger_fulfillment": flattenDialogflowCXFlowTransitionRoutesTriggerFulfillment(original["triggerFulfillment"], d, config), - "target_page": flattenDialogflowCXFlowTransitionRoutesTargetPage(original["targetPage"], d, config), - "target_flow": flattenDialogflowCXFlowTransitionRoutesTargetFlow(original["targetFlow"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXFlowTransitionRoutesName(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesIntent(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesCondition(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["messages"] = - flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(original["messages"], d, config) - transformed["webhook"] = - flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(original["webhook"], d, config) - transformed["return_partial_responses"] = - flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(original["returnPartialResponses"], d, config) - transformed["tag"] = - flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(original["text"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["text"] = - flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(original["text"], d, config) - transformed["allow_playback_interruption"] = - flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesTargetPage(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRoutesTargetFlow(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlers(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDialogflowCXFlowEventHandlersName(original["name"], d, config), - "event": flattenDialogflowCXFlowEventHandlersEvent(original["event"], d, config), - "trigger_fulfillment": flattenDialogflowCXFlowEventHandlersTriggerFulfillment(original["triggerFulfillment"], d, config), - "target_page": flattenDialogflowCXFlowEventHandlersTargetPage(original["targetPage"], d, config), - "target_flow": flattenDialogflowCXFlowEventHandlersTargetFlow(original["targetFlow"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXFlowEventHandlersName(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersEvent(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["messages"] = - flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(original["messages"], d, config) - transformed["webhook"] = - flattenDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(original["webhook"], d, config) - transformed["return_partial_responses"] = - flattenDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(original["returnPartialResponses"], d, config) - transformed["tag"] = - flattenDialogflowCXFlowEventHandlersTriggerFulfillmentTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(original["text"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["text"] = - flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(original["text"], d, config) - transformed["allow_playback_interruption"] = - flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersTriggerFulfillmentTag(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersTargetPage(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowEventHandlersTargetFlow(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowTransitionRouteGroups(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowNluSettings(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["model_type"] = - flattenDialogflowCXFlowNluSettingsModelType(original["modelType"], d, config) - transformed["classification_threshold"] = - flattenDialogflowCXFlowNluSettingsClassificationThreshold(original["classificationThreshold"], d, config) - transformed["model_training_mode"] = - flattenDialogflowCXFlowNluSettingsModelTrainingMode(original["modelTrainingMode"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXFlowNluSettingsModelType(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowNluSettingsClassificationThreshold(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowNluSettingsModelTrainingMode(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXFlowLanguageCode(v interface{}, d *resource_dialogflow_cx_flow_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXFlowDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDialogflowCXFlowTransitionRoutesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedIntent, err := expandDialogflowCXFlowTransitionRoutesIntent(original["intent"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedIntent); val.IsValid() && !isEmptyValue(val) { - transformed["intent"] = transformedIntent - } - - transformedCondition, err := expandDialogflowCXFlowTransitionRoutesCondition(original["condition"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedCondition); val.IsValid() && !isEmptyValue(val) { - transformed["condition"] = transformedCondition - } - - transformedTriggerFulfillment, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillment(original["trigger_fulfillment"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { - transformed["triggerFulfillment"] = transformedTriggerFulfillment - } - - transformedTargetPage, err := expandDialogflowCXFlowTransitionRoutesTargetPage(original["target_page"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { - transformed["targetPage"] = transformedTargetPage - } - - transformedTargetFlow, err := expandDialogflowCXFlowTransitionRoutesTargetFlow(original["target_flow"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { - transformed["targetFlow"] = transformedTargetFlow - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXFlowTransitionRoutesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesIntent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessages, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(original["messages"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { - transformed["messages"] = transformedMessages - } - - transformedWebhook, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(original["webhook"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { - transformed["webhook"] = transformedWebhook - } - - transformedReturnPartialResponses, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { - transformed["returnPartialResponses"] = transformedReturnPartialResponses - } - - transformedTag, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedAllowPlaybackInterruption, err := expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { - transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption - } - - return transformed, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRoutesTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDialogflowCXFlowEventHandlersName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedEvent, err := expandDialogflowCXFlowEventHandlersEvent(original["event"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedEvent); val.IsValid() && !isEmptyValue(val) { - transformed["event"] = transformedEvent - } - - transformedTriggerFulfillment, err := expandDialogflowCXFlowEventHandlersTriggerFulfillment(original["trigger_fulfillment"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { - transformed["triggerFulfillment"] = transformedTriggerFulfillment - } - - transformedTargetPage, err := expandDialogflowCXFlowEventHandlersTargetPage(original["target_page"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { - transformed["targetPage"] = transformedTargetPage - } - - transformedTargetFlow, err := expandDialogflowCXFlowEventHandlersTargetFlow(original["target_flow"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { - transformed["targetFlow"] = transformedTargetFlow - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXFlowEventHandlersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersEvent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessages, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(original["messages"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { - transformed["messages"] = transformedMessages - } - - transformedWebhook, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(original["webhook"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { - transformed["webhook"] = transformedWebhook - } - - transformedReturnPartialResponses, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { - transformed["returnPartialResponses"] = transformedReturnPartialResponses - } - - transformedTag, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedAllowPlaybackInterruption, err := expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { - transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption - } - - return transformed, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowEventHandlersTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowTransitionRouteGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowNluSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedModelType, err := expandDialogflowCXFlowNluSettingsModelType(original["model_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedModelType); val.IsValid() && !isEmptyValue(val) { - transformed["modelType"] = transformedModelType - } - - transformedClassificationThreshold, err := expandDialogflowCXFlowNluSettingsClassificationThreshold(original["classification_threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedClassificationThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["classificationThreshold"] = transformedClassificationThreshold - } - - transformedModelTrainingMode, err := expandDialogflowCXFlowNluSettingsModelTrainingMode(original["model_training_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_flow_reflect.ValueOf(transformedModelTrainingMode); val.IsValid() && !isEmptyValue(val) { - transformed["modelTrainingMode"] = transformedModelTrainingMode - } - - return transformed, nil -} - -func expandDialogflowCXFlowNluSettingsModelType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowNluSettingsClassificationThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowNluSettingsModelTrainingMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXFlowLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowCXIntent() *resource_dialogflow_cx_intent_schema.Resource { - return &resource_dialogflow_cx_intent_schema.Resource{ - Create: resourceDialogflowCXIntentCreate, - Read: resourceDialogflowCXIntentRead, - Update: resourceDialogflowCXIntentUpdate, - Delete: resourceDialogflowCXIntentDelete, - - Importer: &resource_dialogflow_cx_intent_schema.ResourceImporter{ - State: resourceDialogflowCXIntentImport, - }, - - Timeouts: &resource_dialogflow_cx_intent_schema.ResourceTimeout{ - Create: resource_dialogflow_cx_intent_schema.DefaultTimeout(40 * resource_dialogflow_cx_intent_time.Minute), - Update: resource_dialogflow_cx_intent_schema.DefaultTimeout(40 * resource_dialogflow_cx_intent_time.Minute), - Delete: resource_dialogflow_cx_intent_schema.DefaultTimeout(4 * resource_dialogflow_cx_intent_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_cx_intent_schema.Schema{ - "display_name": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_cx_intent_validation.StringLenBetween(0, 64), - Description: `The human-readable name of the intent, unique within the agent.`, - }, - "description": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_intent_validation.StringLenBetween(0, 140), - Description: `Human readable description for better understanding an intent like its scope, content, result etc. Maximum character limit: 140 characters.`, - }, - "is_fallback": { - Type: resource_dialogflow_cx_intent_schema.TypeBool, - Optional: true, - Description: `Indicates whether this is a fallback intent. Currently only default fallback intent is allowed in the agent, which is added upon agent creation. -Adding training phrases to fallback intent is useful in the case of requests that are mistakenly matched, since training phrases assigned to fallback intents act as negative examples that triggers no-match event.`, - }, - "labels": { - Type: resource_dialogflow_cx_intent_schema.TypeMap, - Optional: true, - Description: `The key/value metadata to label an intent. Labels can contain lowercase letters, digits and the symbols '-' and '_'. International characters are allowed, including letters from unicase alphabets. Keys must start with a letter. Keys and values can be no longer than 63 characters and no more than 128 bytes. -Prefix "sys-" is reserved for Dialogflow defined labels. Currently allowed Dialogflow defined labels include: * sys-head * sys-contextual The above labels do not require value. "sys-head" means the intent is a head intent. "sys.contextual" means the intent is a contextual intent. -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_dialogflow_cx_intent_schema.Schema{Type: resource_dialogflow_cx_intent_schema.TypeString}, - }, - "language_code": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The language of the following fields in intent: -Intent.training_phrases.parts.text -If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, - }, - "parameters": { - Type: resource_dialogflow_cx_intent_schema.TypeList, - Optional: true, - Description: `The collection of parameters associated with the intent.`, - Elem: &resource_dialogflow_cx_intent_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_intent_schema.Schema{ - "entity_type": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Required: true, - Description: `The entity type of the parameter. -Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types.`, - }, - "id": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Required: true, - Description: `The unique identifier of the parameter. This field is used by training phrases to annotate their parts.`, - }, - "is_list": { - Type: resource_dialogflow_cx_intent_schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter represents a list of values.`, - }, - "redact": { - Type: resource_dialogflow_cx_intent_schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter content should be redacted in log. If redaction is enabled, the parameter content will be replaced by parameter name during logging. -Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled.`, - }, - }, - }, - }, - "parent": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The agent to create an intent for. -Format: projects//locations//agents/.`, - }, - "priority": { - Type: resource_dialogflow_cx_intent_schema.TypeInt, - Optional: true, - Description: `The priority of this intent. Higher numbers represent higher priorities. -If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds to the Normal priority in the console. -If the supplied value is negative, the intent is ignored in runtime detect intent requests.`, - }, - "training_phrases": { - Type: resource_dialogflow_cx_intent_schema.TypeList, - Optional: true, - Description: `The collection of training phrases the agent is trained on to identify the intent.`, - Elem: &resource_dialogflow_cx_intent_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_intent_schema.Schema{ - "parts": { - Type: resource_dialogflow_cx_intent_schema.TypeList, - Required: true, - Description: `The ordered list of training phrase parts. The parts are concatenated in order to form the training phrase. -Note: The API does not automatically annotate training phrases like the Dialogflow Console does. -Note: Do not forget to include whitespace at part boundaries, so the training phrase is well formatted when the parts are concatenated. -If the training phrase does not need to be annotated with parameters, you just need a single part with only the Part.text field set. -If you want to annotate the training phrase, you must create multiple parts, where the fields of each part are populated in one of two ways: -Part.text is set to a part of the phrase that has no parameters. -Part.text is set to a part of the phrase that you want to annotate, and the parameterId field is set.`, - Elem: &resource_dialogflow_cx_intent_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_intent_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Required: true, - Description: `The text for this part.`, - }, - "parameter_id": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Optional: true, - Description: `The parameter used to annotate this part of the training phrase. This field is required for annotated parts of the training phrase.`, - }, - }, - }, - }, - "repeat_count": { - Type: resource_dialogflow_cx_intent_schema.TypeInt, - Optional: true, - Description: `Indicates how many times this example was added to the intent.`, - }, - "id": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Computed: true, - Description: `The unique identifier of the training phrase.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_cx_intent_schema.TypeString, - Computed: true, - Description: `The unique identifier of the intent. -Format: projects//locations//agents//intents/.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXIntentCreate(d *resource_dialogflow_cx_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - trainingPhrasesProp, err := expandDialogflowCXIntentTrainingPhrases(d.Get("training_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("training_phrases"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(trainingPhrasesProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, trainingPhrasesProp)) { - obj["trainingPhrases"] = trainingPhrasesProp - } - parametersProp, err := expandDialogflowCXIntentParameters(d.Get("parameters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parameters"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(parametersProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, parametersProp)) { - obj["parameters"] = parametersProp - } - priorityProp, err := expandDialogflowCXIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(priorityProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowCXIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(isFallbackProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - labelsProp, err := expandDialogflowCXIntentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(labelsProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandDialogflowCXIntentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(descriptionProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - languageCodeProp, err := expandDialogflowCXIntentLanguageCode(d.Get("language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_code"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(languageCodeProp)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, languageCodeProp)) { - obj["languageCode"] = languageCodeProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents") - if err != nil { - return err - } - - resource_dialogflow_cx_intent_log.Printf("[DEBUG] Creating new Intent: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_intent_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error creating Intent: %s", err) - } - if err := d.Set("name", flattenDialogflowCXIntentName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/intents/{{name}}") - if err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_cx_intent_log.Printf("[DEBUG] Finished creating Intent %q: %#v", d.Id(), res) - - return resourceDialogflowCXIntentRead(d, meta) -} - -func resourceDialogflowCXIntentRead(d *resource_dialogflow_cx_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_cx_intent_fmt.Sprintf("DialogflowCXIntent %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXIntentName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXIntentDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("training_phrases", flattenDialogflowCXIntentTrainingPhrases(res["trainingPhrases"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("parameters", flattenDialogflowCXIntentParameters(res["parameters"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("priority", flattenDialogflowCXIntentPriority(res["priority"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("is_fallback", flattenDialogflowCXIntentIsFallback(res["isFallback"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("labels", flattenDialogflowCXIntentLabels(res["labels"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("description", flattenDialogflowCXIntentDescription(res["description"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("language_code", flattenDialogflowCXIntentLanguageCode(res["languageCode"], d, config)); err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error reading Intent: %s", err) - } - - return nil -} - -func resourceDialogflowCXIntentUpdate(d *resource_dialogflow_cx_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - trainingPhrasesProp, err := expandDialogflowCXIntentTrainingPhrases(d.Get("training_phrases"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("training_phrases"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, trainingPhrasesProp)) { - obj["trainingPhrases"] = trainingPhrasesProp - } - parametersProp, err := expandDialogflowCXIntentParameters(d.Get("parameters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parameters"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, parametersProp)) { - obj["parameters"] = parametersProp - } - priorityProp, err := expandDialogflowCXIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowCXIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - labelsProp, err := expandDialogflowCXIntentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandDialogflowCXIntentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_intent_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") - if err != nil { - return err - } - - resource_dialogflow_cx_intent_log.Printf("[DEBUG] Updating Intent %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("training_phrases") { - updateMask = append(updateMask, "trainingPhrases") - } - - if d.HasChange("parameters") { - updateMask = append(updateMask, "parameters") - } - - if d.HasChange("priority") { - updateMask = append(updateMask, "priority") - } - - if d.HasChange("is_fallback") { - updateMask = append(updateMask, "isFallback") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_cx_intent_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_intent_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_cx_intent_fmt.Errorf("Error updating Intent %q: %s", d.Id(), err) - } else { - resource_dialogflow_cx_intent_log.Printf("[DEBUG] Finished updating Intent %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXIntentRead(d, meta) -} - -func resourceDialogflowCXIntentDelete(d *resource_dialogflow_cx_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/intents/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_cx_intent_log.Printf("[DEBUG] Deleting Intent %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_intent_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Intent") - } - - resource_dialogflow_cx_intent_log.Printf("[DEBUG] Finished deleting Intent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXIntentImport(d *resource_dialogflow_cx_intent_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_cx_intent_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/intents/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{parent}}/intents/{{name}}") - if err != nil { - return nil, resource_dialogflow_cx_intent_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_cx_intent_schema.ResourceData{d}, nil -} - -func flattenDialogflowCXIntentName(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXIntentDisplayName(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrases(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenDialogflowCXIntentTrainingPhrasesId(original["id"], d, config), - "parts": flattenDialogflowCXIntentTrainingPhrasesParts(original["parts"], d, config), - "repeat_count": flattenDialogflowCXIntentTrainingPhrasesRepeatCount(original["repeatCount"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXIntentTrainingPhrasesId(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrasesParts(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXIntentTrainingPhrasesPartsText(original["text"], d, config), - "parameter_id": flattenDialogflowCXIntentTrainingPhrasesPartsParameterId(original["parameterId"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXIntentTrainingPhrasesPartsText(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrasesPartsParameterId(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentTrainingPhrasesRepeatCount(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dialogflow_cx_intent_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDialogflowCXIntentParameters(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenDialogflowCXIntentParametersId(original["id"], d, config), - "entity_type": flattenDialogflowCXIntentParametersEntityType(original["entityType"], d, config), - "is_list": flattenDialogflowCXIntentParametersIsList(original["isList"], d, config), - "redact": flattenDialogflowCXIntentParametersRedact(original["redact"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXIntentParametersId(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentParametersEntityType(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentParametersIsList(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentParametersRedact(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentPriority(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dialogflow_cx_intent_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDialogflowCXIntentIsFallback(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentLabels(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentDescription(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXIntentLanguageCode(v interface{}, d *resource_dialogflow_cx_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXIntentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrases(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandDialogflowCXIntentTrainingPhrasesId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedParts, err := expandDialogflowCXIntentTrainingPhrasesParts(original["parts"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedParts); val.IsValid() && !isEmptyValue(val) { - transformed["parts"] = transformedParts - } - - transformedRepeatCount, err := expandDialogflowCXIntentTrainingPhrasesRepeatCount(original["repeat_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedRepeatCount); val.IsValid() && !isEmptyValue(val) { - transformed["repeatCount"] = transformedRepeatCount - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXIntentTrainingPhrasesId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrasesParts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXIntentTrainingPhrasesPartsText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedParameterId, err := expandDialogflowCXIntentTrainingPhrasesPartsParameterId(original["parameter_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedParameterId); val.IsValid() && !isEmptyValue(val) { - transformed["parameterId"] = transformedParameterId - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXIntentTrainingPhrasesPartsText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrasesPartsParameterId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentTrainingPhrasesRepeatCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandDialogflowCXIntentParametersId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedEntityType, err := expandDialogflowCXIntentParametersEntityType(original["entity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedEntityType); val.IsValid() && !isEmptyValue(val) { - transformed["entityType"] = transformedEntityType - } - - transformedIsList, err := expandDialogflowCXIntentParametersIsList(original["is_list"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedIsList); val.IsValid() && !isEmptyValue(val) { - transformed["isList"] = transformedIsList - } - - transformedRedact, err := expandDialogflowCXIntentParametersRedact(original["redact"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_intent_reflect.ValueOf(transformedRedact); val.IsValid() && !isEmptyValue(val) { - transformed["redact"] = transformedRedact - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXIntentParametersId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParametersEntityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParametersIsList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentParametersRedact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentIsFallback(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDialogflowCXIntentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXIntentLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowCXPage() *resource_dialogflow_cx_page_schema.Resource { - return &resource_dialogflow_cx_page_schema.Resource{ - Create: resourceDialogflowCXPageCreate, - Read: resourceDialogflowCXPageRead, - Update: resourceDialogflowCXPageUpdate, - Delete: resourceDialogflowCXPageDelete, - - Importer: &resource_dialogflow_cx_page_schema.ResourceImporter{ - State: resourceDialogflowCXPageImport, - }, - - Timeouts: &resource_dialogflow_cx_page_schema.ResourceTimeout{ - Create: resource_dialogflow_cx_page_schema.DefaultTimeout(40 * resource_dialogflow_cx_page_time.Minute), - Update: resource_dialogflow_cx_page_schema.DefaultTimeout(40 * resource_dialogflow_cx_page_time.Minute), - Delete: resource_dialogflow_cx_page_schema.DefaultTimeout(4 * resource_dialogflow_cx_page_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "display_name": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_cx_page_validation.StringLenBetween(0, 64), - Description: `The human-readable name of the page, unique within the agent.`, - }, - "entry_fulfillment": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The fulfillment to call when the session is entering the page.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "messages": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The list of rich message responses to present to the user.`, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The text response message.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `A collection of text responses.`, - Elem: &resource_dialogflow_cx_page_schema.Schema{ - Type: resource_dialogflow_cx_page_schema.TypeString, - }, - }, - "allow_playback_interruption": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Computed: true, - Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, - }, - }, - }, - }, - }, - }, - }, - "return_partial_responses": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Optional: true, - Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, - }, - "tag": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified.`, - }, - "webhook": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The webhook to call. Format: projects//locations//agents//webhooks/.`, - }, - }, - }, - }, - "event_handlers": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `Handlers associated with the page to handle events such as webhook errors, no match or no input.`, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "event": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The name of the event to handle.`, - }, - "target_flow": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The target flow to transition to. -Format: projects//locations//agents//flows/.`, - }, - "target_page": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The target page to transition to. -Format: projects//locations//agents//flows//pages/.`, - }, - "trigger_fulfillment": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "messages": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The list of rich message responses to present to the user.`, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The text response message.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `A collection of text responses.`, - Elem: &resource_dialogflow_cx_page_schema.Schema{ - Type: resource_dialogflow_cx_page_schema.TypeString, - }, - }, - "allow_playback_interruption": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Computed: true, - Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, - }, - }, - }, - }, - }, - }, - }, - "return_partial_responses": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Optional: true, - Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, - }, - "tag": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified.`, - }, - "webhook": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The webhook to call. Format: projects//locations//agents//webhooks/.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Computed: true, - Description: `The unique identifier of this event handler.`, - }, - }, - }, - }, - "form": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The form associated with the page, used for collecting parameters relevant to the page.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "parameters": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `Parameters to collect from the user.`, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "display_name": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The human-readable name of the parameter, unique within the form.`, - }, - "entity_type": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The entity type of the parameter. -Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types.`, - }, - "fill_behavior": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `Defines fill behavior for the parameter.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "initial_prompt_fulfillment": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The fulfillment to provide the initial prompt that the agent can present to the user in order to fill the parameter.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "messages": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The list of rich message responses to present to the user.`, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The text response message.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `A collection of text responses.`, - Elem: &resource_dialogflow_cx_page_schema.Schema{ - Type: resource_dialogflow_cx_page_schema.TypeString, - }, - }, - "allow_playback_interruption": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Computed: true, - Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, - }, - }, - }, - }, - }, - }, - }, - "return_partial_responses": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Optional: true, - Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, - }, - "tag": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified.`, - }, - "webhook": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The webhook to call. Format: projects//locations//agents//webhooks/.`, - }, - }, - }, - }, - }, - }, - }, - "is_list": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter represents a list of values.`, - }, - "redact": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter content should be redacted in log. -If redaction is enabled, the parameter content will be replaced by parameter name during logging. Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled.`, - }, - "required": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Optional: true, - Description: `Indicates whether the parameter is required. Optional parameters will not trigger prompts; however, they are filled if the user specifies them. -Required parameters must be filled before form filling concludes.`, - }, - }, - }, - }, - }, - }, - }, - "language_code": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The language of the following fields in page: - -Page.entry_fulfillment.messages -Page.entry_fulfillment.conditional_cases -Page.event_handlers.trigger_fulfillment.messages -Page.event_handlers.trigger_fulfillment.conditional_cases -Page.form.parameters.fill_behavior.initial_prompt_fulfillment.messages -Page.form.parameters.fill_behavior.initial_prompt_fulfillment.conditional_cases -Page.form.parameters.fill_behavior.reprompt_event_handlers.messages -Page.form.parameters.fill_behavior.reprompt_event_handlers.conditional_cases -Page.transition_routes.trigger_fulfillment.messages -Page.transition_routes.trigger_fulfillment.conditional_cases -If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used.`, - }, - "parent": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The flow to create a page for. -Format: projects//locations//agents//flows/.`, - }, - "transition_route_groups": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `Ordered list of TransitionRouteGroups associated with the page. Transition route groups must be unique within a page. -If multiple transition routes within a page scope refer to the same intent, then the precedence order is: page's transition route -> page's transition route group -> flow's transition routes. -If multiple transition route groups within a page contain the same intent, then the first group in the ordered list takes precedence. -Format:projects//locations//agents//flows//transitionRouteGroups/.`, - Elem: &resource_dialogflow_cx_page_schema.Schema{ - Type: resource_dialogflow_cx_page_schema.TypeString, - }, - }, - "transition_routes": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `A list of transitions for the transition rules of this page. They route the conversation to another page in the same flow, or another flow. -When we are in a certain page, the TransitionRoutes are evalauted in the following order: -TransitionRoutes defined in the page with intent specified. -TransitionRoutes defined in the transition route groups with intent specified. -TransitionRoutes defined in flow with intent specified. -TransitionRoutes defined in the transition route groups with intent specified. -TransitionRoutes defined in the page with only condition specified. -TransitionRoutes defined in the transition route groups with only condition specified.`, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "condition": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The condition to evaluate against form parameters or session parameters. -At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled.`, - }, - "intent": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The unique identifier of an Intent. -Format: projects//locations//agents//intents/. Indicates that the transition can only happen when the given intent is matched. At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled.`, - }, - "target_flow": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The target flow to transition to. -Format: projects//locations//agents//flows/.`, - }, - "target_page": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The target page to transition to. -Format: projects//locations//agents//flows//pages/.`, - }, - "trigger_fulfillment": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "messages": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The list of rich message responses to present to the user.`, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `The text response message.`, - MaxItems: 1, - Elem: &resource_dialogflow_cx_page_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_page_schema.Schema{ - "text": { - Type: resource_dialogflow_cx_page_schema.TypeList, - Optional: true, - Description: `A collection of text responses.`, - Elem: &resource_dialogflow_cx_page_schema.Schema{ - Type: resource_dialogflow_cx_page_schema.TypeString, - }, - }, - "allow_playback_interruption": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Computed: true, - Description: `Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request.`, - }, - }, - }, - }, - }, - }, - }, - "return_partial_responses": { - Type: resource_dialogflow_cx_page_schema.TypeBool, - Optional: true, - Description: `Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks.`, - }, - "tag": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified.`, - }, - "webhook": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Optional: true, - Description: `The webhook to call. Format: projects//locations//agents//webhooks/.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Computed: true, - Description: `The unique identifier of this transition route.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_cx_page_schema.TypeString, - Computed: true, - Description: `The unique identifier of the page. -Format: projects//locations//agents//flows//pages/.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXPageCreate(d *resource_dialogflow_cx_page_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXPageDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - entryFulfillmentProp, err := expandDialogflowCXPageEntryFulfillment(d.Get("entry_fulfillment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entry_fulfillment"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(entryFulfillmentProp)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, entryFulfillmentProp)) { - obj["entryFulfillment"] = entryFulfillmentProp - } - formProp, err := expandDialogflowCXPageForm(d.Get("form"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("form"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(formProp)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, formProp)) { - obj["form"] = formProp - } - transitionRouteGroupsProp, err := expandDialogflowCXPageTransitionRouteGroups(d.Get("transition_route_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_route_groups"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(transitionRouteGroupsProp)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, transitionRouteGroupsProp)) { - obj["transitionRouteGroups"] = transitionRouteGroupsProp - } - transitionRoutesProp, err := expandDialogflowCXPageTransitionRoutes(d.Get("transition_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_routes"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(transitionRoutesProp)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, transitionRoutesProp)) { - obj["transitionRoutes"] = transitionRoutesProp - } - eventHandlersProp, err := expandDialogflowCXPageEventHandlers(d.Get("event_handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_handlers"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(eventHandlersProp)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, eventHandlersProp)) { - obj["eventHandlers"] = eventHandlersProp - } - languageCodeProp, err := expandDialogflowCXPageLanguageCode(d.Get("language_code"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_code"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(languageCodeProp)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, languageCodeProp)) { - obj["languageCode"] = languageCodeProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/pages") - if err != nil { - return err - } - - resource_dialogflow_cx_page_log.Printf("[DEBUG] Creating new Page: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_page_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error creating Page: %s", err) - } - if err := d.Set("name", flattenDialogflowCXPageName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{parent}}/pages/{{name}}") - if err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_cx_page_log.Printf("[DEBUG] Finished creating Page %q: %#v", d.Id(), res) - - return resourceDialogflowCXPageRead(d, meta) -} - -func resourceDialogflowCXPageRead(d *resource_dialogflow_cx_page_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/pages/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_cx_page_fmt.Sprintf("DialogflowCXPage %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXPageName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXPageDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - if err := d.Set("entry_fulfillment", flattenDialogflowCXPageEntryFulfillment(res["entryFulfillment"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - if err := d.Set("form", flattenDialogflowCXPageForm(res["form"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - if err := d.Set("transition_route_groups", flattenDialogflowCXPageTransitionRouteGroups(res["transitionRouteGroups"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - if err := d.Set("transition_routes", flattenDialogflowCXPageTransitionRoutes(res["transitionRoutes"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - if err := d.Set("event_handlers", flattenDialogflowCXPageEventHandlers(res["eventHandlers"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - if err := d.Set("language_code", flattenDialogflowCXPageLanguageCode(res["languageCode"], d, config)); err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error reading Page: %s", err) - } - - return nil -} - -func resourceDialogflowCXPageUpdate(d *resource_dialogflow_cx_page_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXPageDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - entryFulfillmentProp, err := expandDialogflowCXPageEntryFulfillment(d.Get("entry_fulfillment"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entry_fulfillment"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, entryFulfillmentProp)) { - obj["entryFulfillment"] = entryFulfillmentProp - } - formProp, err := expandDialogflowCXPageForm(d.Get("form"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("form"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, formProp)) { - obj["form"] = formProp - } - transitionRouteGroupsProp, err := expandDialogflowCXPageTransitionRouteGroups(d.Get("transition_route_groups"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_route_groups"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, transitionRouteGroupsProp)) { - obj["transitionRouteGroups"] = transitionRouteGroupsProp - } - transitionRoutesProp, err := expandDialogflowCXPageTransitionRoutes(d.Get("transition_routes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transition_routes"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, transitionRoutesProp)) { - obj["transitionRoutes"] = transitionRoutesProp - } - eventHandlersProp, err := expandDialogflowCXPageEventHandlers(d.Get("event_handlers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("event_handlers"); !isEmptyValue(resource_dialogflow_cx_page_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_page_reflect.DeepEqual(v, eventHandlersProp)) { - obj["eventHandlers"] = eventHandlersProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/pages/{{name}}") - if err != nil { - return err - } - - resource_dialogflow_cx_page_log.Printf("[DEBUG] Updating Page %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("entry_fulfillment") { - updateMask = append(updateMask, "entryFulfillment") - } - - if d.HasChange("form") { - updateMask = append(updateMask, "form") - } - - if d.HasChange("transition_route_groups") { - updateMask = append(updateMask, "transitionRouteGroups") - } - - if d.HasChange("transition_routes") { - updateMask = append(updateMask, "transitionRoutes") - } - - if d.HasChange("event_handlers") { - updateMask = append(updateMask, "eventHandlers") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_cx_page_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_page_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_cx_page_fmt.Errorf("Error updating Page %q: %s", d.Id(), err) - } else { - resource_dialogflow_cx_page_log.Printf("[DEBUG] Finished updating Page %q: %#v", d.Id(), res) - } - - return resourceDialogflowCXPageRead(d, meta) -} - -func resourceDialogflowCXPageDelete(d *resource_dialogflow_cx_page_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/pages/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_cx_page_log.Printf("[DEBUG] Deleting Page %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_page_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Page") - } - - resource_dialogflow_cx_page_log.Printf("[DEBUG] Finished deleting Page %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXPageImport(d *resource_dialogflow_cx_page_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_cx_page_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/pages/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{parent}}/pages/{{name}}") - if err != nil { - return nil, resource_dialogflow_cx_page_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_cx_page_schema.ResourceData{d}, nil -} - -func flattenDialogflowCXPageName(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXPageDisplayName(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEntryFulfillment(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["messages"] = - flattenDialogflowCXPageEntryFulfillmentMessages(original["messages"], d, config) - transformed["webhook"] = - flattenDialogflowCXPageEntryFulfillmentWebhook(original["webhook"], d, config) - transformed["return_partial_responses"] = - flattenDialogflowCXPageEntryFulfillmentReturnPartialResponses(original["returnPartialResponses"], d, config) - transformed["tag"] = - flattenDialogflowCXPageEntryFulfillmentTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageEntryFulfillmentMessages(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXPageEntryFulfillmentMessagesText(original["text"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["text"] = - flattenDialogflowCXPageEntryFulfillmentMessagesTextText(original["text"], d, config) - transformed["allow_playback_interruption"] = - flattenDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageEntryFulfillmentMessagesTextText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEntryFulfillmentWebhook(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEntryFulfillmentReturnPartialResponses(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEntryFulfillmentTag(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageForm(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["parameters"] = - flattenDialogflowCXPageFormParameters(original["parameters"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageFormParameters(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "display_name": flattenDialogflowCXPageFormParametersDisplayName(original["displayName"], d, config), - "required": flattenDialogflowCXPageFormParametersRequired(original["required"], d, config), - "entity_type": flattenDialogflowCXPageFormParametersEntityType(original["entityType"], d, config), - "is_list": flattenDialogflowCXPageFormParametersIsList(original["isList"], d, config), - "fill_behavior": flattenDialogflowCXPageFormParametersFillBehavior(original["fillBehavior"], d, config), - "redact": flattenDialogflowCXPageFormParametersRedact(original["redact"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXPageFormParametersDisplayName(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersRequired(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersEntityType(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersIsList(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersFillBehavior(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["initial_prompt_fulfillment"] = - flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(original["initialPromptFulfillment"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["messages"] = - flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(original["messages"], d, config) - transformed["webhook"] = - flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(original["webhook"], d, config) - transformed["return_partial_responses"] = - flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(original["returnPartialResponses"], d, config) - transformed["tag"] = - flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(original["text"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["text"] = - flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(original["text"], d, config) - transformed["allow_playback_interruption"] = - flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageFormParametersRedact(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRouteGroups(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutes(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDialogflowCXPageTransitionRoutesName(original["name"], d, config), - "intent": flattenDialogflowCXPageTransitionRoutesIntent(original["intent"], d, config), - "condition": flattenDialogflowCXPageTransitionRoutesCondition(original["condition"], d, config), - "trigger_fulfillment": flattenDialogflowCXPageTransitionRoutesTriggerFulfillment(original["triggerFulfillment"], d, config), - "target_page": flattenDialogflowCXPageTransitionRoutesTargetPage(original["targetPage"], d, config), - "target_flow": flattenDialogflowCXPageTransitionRoutesTargetFlow(original["targetFlow"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXPageTransitionRoutesName(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesIntent(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesCondition(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["messages"] = - flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(original["messages"], d, config) - transformed["webhook"] = - flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(original["webhook"], d, config) - transformed["return_partial_responses"] = - flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(original["returnPartialResponses"], d, config) - transformed["tag"] = - flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(original["text"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["text"] = - flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(original["text"], d, config) - transformed["allow_playback_interruption"] = - flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesTargetPage(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageTransitionRoutesTargetFlow(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlers(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenDialogflowCXPageEventHandlersName(original["name"], d, config), - "event": flattenDialogflowCXPageEventHandlersEvent(original["event"], d, config), - "trigger_fulfillment": flattenDialogflowCXPageEventHandlersTriggerFulfillment(original["triggerFulfillment"], d, config), - "target_page": flattenDialogflowCXPageEventHandlersTargetPage(original["targetPage"], d, config), - "target_flow": flattenDialogflowCXPageEventHandlersTargetFlow(original["targetFlow"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXPageEventHandlersName(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersEvent(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["messages"] = - flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessages(original["messages"], d, config) - transformed["webhook"] = - flattenDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(original["webhook"], d, config) - transformed["return_partial_responses"] = - flattenDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(original["returnPartialResponses"], d, config) - transformed["tag"] = - flattenDialogflowCXPageEventHandlersTriggerFulfillmentTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "text": flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(original["text"], d, config), - }) - } - return transformed -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["text"] = - flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(original["text"], d, config) - transformed["allow_playback_interruption"] = - flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allowPlaybackInterruption"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersTriggerFulfillmentTag(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersTargetPage(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageEventHandlersTargetFlow(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXPageLanguageCode(v interface{}, d *resource_dialogflow_cx_page_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXPageDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEntryFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessages, err := expandDialogflowCXPageEntryFulfillmentMessages(original["messages"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { - transformed["messages"] = transformedMessages - } - - transformedWebhook, err := expandDialogflowCXPageEntryFulfillmentWebhook(original["webhook"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { - transformed["webhook"] = transformedWebhook - } - - transformedReturnPartialResponses, err := expandDialogflowCXPageEntryFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { - transformed["returnPartialResponses"] = transformedReturnPartialResponses - } - - transformedTag, err := expandDialogflowCXPageEntryFulfillmentTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandDialogflowCXPageEntryFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageEntryFulfillmentMessagesText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXPageEntryFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageEntryFulfillmentMessagesTextText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedAllowPlaybackInterruption, err := expandDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { - transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption - } - - return transformed, nil -} - -func expandDialogflowCXPageEntryFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEntryFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEntryFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEntryFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEntryFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageForm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedParameters, err := expandDialogflowCXPageFormParameters(original["parameters"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedParameters); val.IsValid() && !isEmptyValue(val) { - transformed["parameters"] = transformedParameters - } - - return transformed, nil -} - -func expandDialogflowCXPageFormParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDisplayName, err := expandDialogflowCXPageFormParametersDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - transformedRequired, err := expandDialogflowCXPageFormParametersRequired(original["required"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedRequired); val.IsValid() && !isEmptyValue(val) { - transformed["required"] = transformedRequired - } - - transformedEntityType, err := expandDialogflowCXPageFormParametersEntityType(original["entity_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedEntityType); val.IsValid() && !isEmptyValue(val) { - transformed["entityType"] = transformedEntityType - } - - transformedIsList, err := expandDialogflowCXPageFormParametersIsList(original["is_list"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedIsList); val.IsValid() && !isEmptyValue(val) { - transformed["isList"] = transformedIsList - } - - transformedFillBehavior, err := expandDialogflowCXPageFormParametersFillBehavior(original["fill_behavior"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedFillBehavior); val.IsValid() && !isEmptyValue(val) { - transformed["fillBehavior"] = transformedFillBehavior - } - - transformedRedact, err := expandDialogflowCXPageFormParametersRedact(original["redact"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedRedact); val.IsValid() && !isEmptyValue(val) { - transformed["redact"] = transformedRedact - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXPageFormParametersDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersRequired(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersEntityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersIsList(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersFillBehavior(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedInitialPromptFulfillment, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(original["initial_prompt_fulfillment"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedInitialPromptFulfillment); val.IsValid() && !isEmptyValue(val) { - transformed["initialPromptFulfillment"] = transformedInitialPromptFulfillment - } - - return transformed, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessages, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(original["messages"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { - transformed["messages"] = transformedMessages - } - - transformedWebhook, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(original["webhook"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { - transformed["webhook"] = transformedWebhook - } - - transformedReturnPartialResponses, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { - transformed["returnPartialResponses"] = transformedReturnPartialResponses - } - - transformedTag, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedAllowPlaybackInterruption, err := expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { - transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption - } - - return transformed, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersFillBehaviorInitialPromptFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageFormParametersRedact(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRouteGroups(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDialogflowCXPageTransitionRoutesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedIntent, err := expandDialogflowCXPageTransitionRoutesIntent(original["intent"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedIntent); val.IsValid() && !isEmptyValue(val) { - transformed["intent"] = transformedIntent - } - - transformedCondition, err := expandDialogflowCXPageTransitionRoutesCondition(original["condition"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedCondition); val.IsValid() && !isEmptyValue(val) { - transformed["condition"] = transformedCondition - } - - transformedTriggerFulfillment, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillment(original["trigger_fulfillment"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { - transformed["triggerFulfillment"] = transformedTriggerFulfillment - } - - transformedTargetPage, err := expandDialogflowCXPageTransitionRoutesTargetPage(original["target_page"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { - transformed["targetPage"] = transformedTargetPage - } - - transformedTargetFlow, err := expandDialogflowCXPageTransitionRoutesTargetFlow(original["target_flow"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { - transformed["targetFlow"] = transformedTargetFlow - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXPageTransitionRoutesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesIntent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesCondition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessages, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(original["messages"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { - transformed["messages"] = transformedMessages - } - - transformedWebhook, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(original["webhook"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { - transformed["webhook"] = transformedWebhook - } - - transformedReturnPartialResponses, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { - transformed["returnPartialResponses"] = transformedReturnPartialResponses - } - - transformedTag, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedAllowPlaybackInterruption, err := expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { - transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption - } - - return transformed, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageTransitionRoutesTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandDialogflowCXPageEventHandlersName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedEvent, err := expandDialogflowCXPageEventHandlersEvent(original["event"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedEvent); val.IsValid() && !isEmptyValue(val) { - transformed["event"] = transformedEvent - } - - transformedTriggerFulfillment, err := expandDialogflowCXPageEventHandlersTriggerFulfillment(original["trigger_fulfillment"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTriggerFulfillment); val.IsValid() && !isEmptyValue(val) { - transformed["triggerFulfillment"] = transformedTriggerFulfillment - } - - transformedTargetPage, err := expandDialogflowCXPageEventHandlersTargetPage(original["target_page"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTargetPage); val.IsValid() && !isEmptyValue(val) { - transformed["targetPage"] = transformedTargetPage - } - - transformedTargetFlow, err := expandDialogflowCXPageEventHandlersTargetFlow(original["target_flow"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTargetFlow); val.IsValid() && !isEmptyValue(val) { - transformed["targetFlow"] = transformedTargetFlow - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXPageEventHandlersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersEvent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessages, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessages(original["messages"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedMessages); val.IsValid() && !isEmptyValue(val) { - transformed["messages"] = transformedMessages - } - - transformedWebhook, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(original["webhook"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedWebhook); val.IsValid() && !isEmptyValue(val) { - transformed["webhook"] = transformedWebhook - } - - transformedReturnPartialResponses, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(original["return_partial_responses"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedReturnPartialResponses); val.IsValid() && !isEmptyValue(val) { - transformed["returnPartialResponses"] = transformedReturnPartialResponses - } - - transformedTag, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedText, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(original["text"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedText); val.IsValid() && !isEmptyValue(val) { - transformed["text"] = transformedText - } - - transformedAllowPlaybackInterruption, err := expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(original["allow_playback_interruption"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_cx_page_reflect.ValueOf(transformedAllowPlaybackInterruption); val.IsValid() && !isEmptyValue(val) { - transformed["allowPlaybackInterruption"] = transformedAllowPlaybackInterruption - } - - return transformed, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextText(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillmentMessagesTextAllowPlaybackInterruption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillmentWebhook(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillmentReturnPartialResponses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersTriggerFulfillmentTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersTargetPage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageEventHandlersTargetFlow(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXPageLanguageCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowCXVersion() *resource_dialogflow_cx_version_schema.Resource { - return &resource_dialogflow_cx_version_schema.Resource{ - Create: resourceDialogflowCXVersionCreate, - Read: resourceDialogflowCXVersionRead, - Update: resourceDialogflowCXVersionUpdate, - Delete: resourceDialogflowCXVersionDelete, - - Importer: &resource_dialogflow_cx_version_schema.ResourceImporter{ - State: resourceDialogflowCXVersionImport, - }, - - Timeouts: &resource_dialogflow_cx_version_schema.ResourceTimeout{ - Create: resource_dialogflow_cx_version_schema.DefaultTimeout(40 * resource_dialogflow_cx_version_time.Minute), - Update: resource_dialogflow_cx_version_schema.DefaultTimeout(40 * resource_dialogflow_cx_version_time.Minute), - Delete: resource_dialogflow_cx_version_schema.DefaultTimeout(4 * resource_dialogflow_cx_version_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_cx_version_schema.Schema{ - "display_name": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_cx_version_validation.StringLenBetween(0, 64), - Description: `The human-readable name of the version. Limit of 64 characters.`, - }, - "description": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_version_validation.StringLenBetween(0, 500), - Description: `The description of the version. The maximum length is 500 characters. If exceeded, the request is rejected.`, - }, - "parent": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Flow to create an Version for. -Format: projects//locations//agents//flows/.`, - }, - "create_time": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Computed: true, - Description: `A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Computed: true, - Description: `Format: projects//locations//agents//flows//versions/. Version ID is a self-increasing number generated by Dialogflow upon version creation.`, - }, - "nlu_settings": { - Type: resource_dialogflow_cx_version_schema.TypeList, - Computed: true, - Description: `The NLU settings of the flow at version creation.`, - Elem: &resource_dialogflow_cx_version_schema.Resource{ - Schema: map[string]*resource_dialogflow_cx_version_schema.Schema{ - "classification_threshold": { - Type: resource_dialogflow_cx_version_schema.TypeFloat, - Optional: true, - Description: `To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. If the returned score value is less than the threshold value, then a no-match event will be triggered. -The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used.`, - }, - "model_training_mode": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_version_validation.StringInSlice([]string{"MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL", ""}, false), - Description: `Indicates NLU model training mode. -* MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. -* MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. Possible values: ["MODEL_TRAINING_MODE_AUTOMATIC", "MODEL_TRAINING_MODE_MANUAL"]`, - }, - "model_type": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Optional: true, - ValidateFunc: resource_dialogflow_cx_version_validation.StringInSlice([]string{"MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED", ""}, false), - Description: `Indicates the type of NLU model. -* MODEL_TYPE_STANDARD: Use standard NLU model. -* MODEL_TYPE_ADVANCED: Use advanced NLU model. Possible values: ["MODEL_TYPE_STANDARD", "MODEL_TYPE_ADVANCED"]`, - }, - }, - }, - }, - "state": { - Type: resource_dialogflow_cx_version_schema.TypeString, - Computed: true, - Description: `The state of this version. -* RUNNING: Version is not ready to serve (e.g. training is running). -* SUCCEEDED: Training has succeeded and this version is ready to serve. -* FAILED: Version training failed.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowCXVersionCreate(d *resource_dialogflow_cx_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXVersionDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_version_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_cx_version_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXVersionDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_version_reflect.ValueOf(descriptionProp)) && (ok || !resource_dialogflow_cx_version_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions") - if err != nil { - return err - } - - resource_dialogflow_cx_version_log.Printf("[DEBUG] Creating new Version: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_version_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error creating Version: %s", err) - } - - id, err := replaceVars(d, config, "{{parent}}/versions/{{name}}") - if err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = dialogflowCXOperationWaitTimeWithResponse( - config, res, &opRes, "Creating Version", userAgent, - d.Timeout(resource_dialogflow_cx_version_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_dialogflow_cx_version_fmt.Errorf("Error waiting to create Version: %s", err) - } - - if err := d.Set("name", flattenDialogflowCXVersionName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{parent}}/versions/{{name}}") - if err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dialogflow_cx_version_log.Printf("[DEBUG] Finished creating Version %q: %#v", d.Id(), res) - - return resourceDialogflowCXVersionRead(d, meta) -} - -func resourceDialogflowCXVersionRead(d *resource_dialogflow_cx_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_cx_version_fmt.Sprintf("DialogflowCXVersion %q", d.Id())) - } - - if err := d.Set("name", flattenDialogflowCXVersionName(res["name"], d, config)); err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("display_name", flattenDialogflowCXVersionDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("description", flattenDialogflowCXVersionDescription(res["description"], d, config)); err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("nlu_settings", flattenDialogflowCXVersionNluSettings(res["nluSettings"], d, config)); err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("create_time", flattenDialogflowCXVersionCreateTime(res["createTime"], d, config)); err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error reading Version: %s", err) - } - if err := d.Set("state", flattenDialogflowCXVersionState(res["state"], d, config)); err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error reading Version: %s", err) - } - - return nil -} - -func resourceDialogflowCXVersionUpdate(d *resource_dialogflow_cx_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowCXVersionDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_cx_version_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_version_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandDialogflowCXVersionDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dialogflow_cx_version_reflect.ValueOf(v)) && (ok || !resource_dialogflow_cx_version_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") - if err != nil { - return err - } - - resource_dialogflow_cx_version_log.Printf("[DEBUG] Updating Version %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_cx_version_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_version_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_cx_version_fmt.Errorf("Error updating Version %q: %s", d.Id(), err) - } else { - resource_dialogflow_cx_version_log.Printf("[DEBUG] Finished updating Version %q: %#v", d.Id(), res) - } - - err = dialogflowCXOperationWaitTime( - config, res, "Updating Version", userAgent, - d.Timeout(resource_dialogflow_cx_version_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceDialogflowCXVersionRead(d, meta) -} - -func resourceDialogflowCXVersionDelete(d *resource_dialogflow_cx_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{DialogflowCXBasePath}}{{parent}}/versions/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_cx_version_log.Printf("[DEBUG] Deleting Version %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_cx_version_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Version") - } - - err = dialogflowCXOperationWaitTime( - config, res, "Deleting Version", userAgent, - d.Timeout(resource_dialogflow_cx_version_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_dialogflow_cx_version_log.Printf("[DEBUG] Finished deleting Version %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowCXVersionImport(d *resource_dialogflow_cx_version_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_cx_version_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "(?P.+)/versions/(?P[^/]+)", - "(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{parent}}/versions/{{name}}") - if err != nil { - return nil, resource_dialogflow_cx_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dialogflow_cx_version_schema.ResourceData{d}, nil -} - -func flattenDialogflowCXVersionName(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenDialogflowCXVersionDisplayName(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionDescription(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionNluSettings(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["model_type"] = - flattenDialogflowCXVersionNluSettingsModelType(original["modelType"], d, config) - transformed["classification_threshold"] = - flattenDialogflowCXVersionNluSettingsClassificationThreshold(original["classificationThreshold"], d, config) - transformed["model_training_mode"] = - flattenDialogflowCXVersionNluSettingsModelTrainingMode(original["modelTrainingMode"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowCXVersionNluSettingsModelType(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionNluSettingsClassificationThreshold(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionNluSettingsModelTrainingMode(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionCreateTime(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowCXVersionState(v interface{}, d *resource_dialogflow_cx_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowCXVersionDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowCXVersionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowEntityType() *resource_dialogflow_entity_type_schema.Resource { - return &resource_dialogflow_entity_type_schema.Resource{ - Create: resourceDialogflowEntityTypeCreate, - Read: resourceDialogflowEntityTypeRead, - Update: resourceDialogflowEntityTypeUpdate, - Delete: resourceDialogflowEntityTypeDelete, - - Importer: &resource_dialogflow_entity_type_schema.ResourceImporter{ - State: resourceDialogflowEntityTypeImport, - }, - - Timeouts: &resource_dialogflow_entity_type_schema.ResourceTimeout{ - Create: resource_dialogflow_entity_type_schema.DefaultTimeout(4 * resource_dialogflow_entity_type_time.Minute), - Update: resource_dialogflow_entity_type_schema.DefaultTimeout(4 * resource_dialogflow_entity_type_time.Minute), - Delete: resource_dialogflow_entity_type_schema.DefaultTimeout(4 * resource_dialogflow_entity_type_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_entity_type_schema.Schema{ - "display_name": { - Type: resource_dialogflow_entity_type_schema.TypeString, - Required: true, - Description: `The name of this entity type to be displayed on the console.`, - }, - "kind": { - Type: resource_dialogflow_entity_type_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_entity_type_validation.StringInSlice([]string{"KIND_MAP", "KIND_LIST", "KIND_REGEXP"}, false), - Description: `Indicates the kind of entity type. -* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value. -* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity -types can contain references to other entity types (with or without aliases). -* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values. Possible values: ["KIND_MAP", "KIND_LIST", "KIND_REGEXP"]`, - }, - "enable_fuzzy_extraction": { - Type: resource_dialogflow_entity_type_schema.TypeBool, - Optional: true, - Description: `Enables fuzzy entity extraction during classification.`, - }, - "entities": { - Type: resource_dialogflow_entity_type_schema.TypeList, - Optional: true, - Description: `The collection of entity entries associated with the entity type.`, - Elem: &resource_dialogflow_entity_type_schema.Resource{ - Schema: map[string]*resource_dialogflow_entity_type_schema.Schema{ - "synonyms": { - Type: resource_dialogflow_entity_type_schema.TypeList, - Required: true, - Description: `A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym -could be green onions. -For KIND_LIST entity types: -* This collection must contain exactly one synonym equal to value.`, - Elem: &resource_dialogflow_entity_type_schema.Schema{ - Type: resource_dialogflow_entity_type_schema.TypeString, - }, - }, - "value": { - Type: resource_dialogflow_entity_type_schema.TypeString, - Required: true, - Description: `The primary value associated with this entity entry. For example, if the entity type is vegetable, the value -could be scallions. -For KIND_MAP entity types: -* A reference value to be used in place of synonyms. -For KIND_LIST entity types: -* A string that can contain references to other entity types (with or without aliases).`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_entity_type_schema.TypeString, - Computed: true, - Description: `The unique identifier of the entity type. -Format: projects//agent/entityTypes/.`, - }, - "project": { - Type: resource_dialogflow_entity_type_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowEntityTypeCreate(d *resource_dialogflow_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(kindProp)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - enableFuzzyExtractionProp, err := expandDialogflowEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(enableFuzzyExtractionProp)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - entitiesProp, err := expandDialogflowEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(entitiesProp)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/entityTypes/") - if err != nil { - return err - } - - resource_dialogflow_entity_type_log.Printf("[DEBUG] Creating new EntityType: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_entity_type_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error creating EntityType: %s", err) - } - if err := d.Set("name", flattenDialogflowEntityTypeName(res["name"], d, config)); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_dialogflow_entity_type_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_dialogflow_entity_type_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_dialogflow_entity_type_log.Printf("[DEBUG] Finished creating EntityType %q: %#v", d.Id(), res) - - return resourceDialogflowEntityTypeRead(d, meta) -} - -func resourceDialogflowEntityTypeRead(d *resource_dialogflow_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_entity_type_fmt.Sprintf("DialogflowEntityType %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - - if err := d.Set("name", flattenDialogflowEntityTypeName(res["name"], d, config)); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("display_name", flattenDialogflowEntityTypeDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("kind", flattenDialogflowEntityTypeKind(res["kind"], d, config)); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("enable_fuzzy_extraction", flattenDialogflowEntityTypeEnableFuzzyExtraction(res["enableFuzzyExtraction"], d, config)); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - if err := d.Set("entities", flattenDialogflowEntityTypeEntities(res["entities"], d, config)); err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error reading EntityType: %s", err) - } - - return nil -} - -func resourceDialogflowEntityTypeUpdate(d *resource_dialogflow_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowEntityTypeDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - kindProp, err := expandDialogflowEntityTypeKind(d.Get("kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kind"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, kindProp)) { - obj["kind"] = kindProp - } - enableFuzzyExtractionProp, err := expandDialogflowEntityTypeEnableFuzzyExtraction(d.Get("enable_fuzzy_extraction"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_fuzzy_extraction"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, enableFuzzyExtractionProp)) { - obj["enableFuzzyExtraction"] = enableFuzzyExtractionProp - } - entitiesProp, err := expandDialogflowEntityTypeEntities(d.Get("entities"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entities"); !isEmptyValue(resource_dialogflow_entity_type_reflect.ValueOf(v)) && (ok || !resource_dialogflow_entity_type_reflect.DeepEqual(v, entitiesProp)) { - obj["entities"] = entitiesProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - resource_dialogflow_entity_type_log.Printf("[DEBUG] Updating EntityType %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_entity_type_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error updating EntityType %q: %s", d.Id(), err) - } else { - resource_dialogflow_entity_type_log.Printf("[DEBUG] Finished updating EntityType %q: %#v", d.Id(), res) - } - - return resourceDialogflowEntityTypeRead(d, meta) -} - -func resourceDialogflowEntityTypeDelete(d *resource_dialogflow_entity_type_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_entity_type_fmt.Errorf("Error fetching project for EntityType: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_entity_type_log.Printf("[DEBUG] Deleting EntityType %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_entity_type_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EntityType") - } - - resource_dialogflow_entity_type_log.Printf("[DEBUG] Finished deleting EntityType %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowEntityTypeImport(d *resource_dialogflow_entity_type_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_entity_type_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := resource_dialogflow_entity_type_strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, resource_dialogflow_entity_type_fmt.Errorf( - "Could not split project from name: %s", - d.Get("name"), - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, resource_dialogflow_entity_type_fmt.Errorf("Error setting project: %s", err) - } - return []*resource_dialogflow_entity_type_schema.ResourceData{d}, nil -} - -func flattenDialogflowEntityTypeName(v interface{}, d *resource_dialogflow_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeDisplayName(v interface{}, d *resource_dialogflow_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeKind(v interface{}, d *resource_dialogflow_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeEnableFuzzyExtraction(v interface{}, d *resource_dialogflow_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeEntities(v interface{}, d *resource_dialogflow_entity_type_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "value": flattenDialogflowEntityTypeEntitiesValue(original["value"], d, config), - "synonyms": flattenDialogflowEntityTypeEntitiesSynonyms(original["synonyms"], d, config), - }) - } - return transformed -} - -func flattenDialogflowEntityTypeEntitiesValue(v interface{}, d *resource_dialogflow_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowEntityTypeEntitiesSynonyms(v interface{}, d *resource_dialogflow_entity_type_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowEntityTypeDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeEnableFuzzyExtraction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeEntities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedValue, err := expandDialogflowEntityTypeEntitiesValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_entity_type_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedSynonyms, err := expandDialogflowEntityTypeEntitiesSynonyms(original["synonyms"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_entity_type_reflect.ValueOf(transformedSynonyms); val.IsValid() && !isEmptyValue(val) { - transformed["synonyms"] = transformedSynonyms - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowEntityTypeEntitiesValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowEntityTypeEntitiesSynonyms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDialogflowFulfillment() *resource_dialogflow_fulfillment_schema.Resource { - return &resource_dialogflow_fulfillment_schema.Resource{ - Create: resourceDialogflowFulfillmentCreate, - Read: resourceDialogflowFulfillmentRead, - Update: resourceDialogflowFulfillmentUpdate, - Delete: resourceDialogflowFulfillmentDelete, - - Importer: &resource_dialogflow_fulfillment_schema.ResourceImporter{ - State: resourceDialogflowFulfillmentImport, - }, - - Timeouts: &resource_dialogflow_fulfillment_schema.ResourceTimeout{ - Create: resource_dialogflow_fulfillment_schema.DefaultTimeout(4 * resource_dialogflow_fulfillment_time.Minute), - Update: resource_dialogflow_fulfillment_schema.DefaultTimeout(4 * resource_dialogflow_fulfillment_time.Minute), - Delete: resource_dialogflow_fulfillment_schema.DefaultTimeout(4 * resource_dialogflow_fulfillment_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_fulfillment_schema.Schema{ - "display_name": { - Type: resource_dialogflow_fulfillment_schema.TypeString, - Required: true, - Description: `The human-readable name of the fulfillment, unique within the agent.`, - }, - "enabled": { - Type: resource_dialogflow_fulfillment_schema.TypeBool, - Optional: true, - Description: `Whether fulfillment is enabled.`, - }, - "features": { - Type: resource_dialogflow_fulfillment_schema.TypeList, - Optional: true, - Description: `The field defines whether the fulfillment is enabled for certain features.`, - Elem: &resource_dialogflow_fulfillment_schema.Resource{ - Schema: map[string]*resource_dialogflow_fulfillment_schema.Schema{ - "type": { - Type: resource_dialogflow_fulfillment_schema.TypeString, - Required: true, - ValidateFunc: resource_dialogflow_fulfillment_validation.StringInSlice([]string{"SMALLTALK"}, false), - Description: `The type of the feature that enabled for fulfillment. -* SMALLTALK: Fulfillment is enabled for SmallTalk. Possible values: ["SMALLTALK"]`, - }, - }, - }, - }, - "generic_web_service": { - Type: resource_dialogflow_fulfillment_schema.TypeList, - Optional: true, - Description: `Represents configuration for a generic web service. Dialogflow supports two mechanisms for authentications: - Basic authentication with username and password. - Authentication with additional authentication headers.`, - MaxItems: 1, - Elem: &resource_dialogflow_fulfillment_schema.Resource{ - Schema: map[string]*resource_dialogflow_fulfillment_schema.Schema{ - "uri": { - Type: resource_dialogflow_fulfillment_schema.TypeString, - Required: true, - Description: `The fulfillment URI for receiving POST requests. It must use https protocol.`, - }, - "password": { - Type: resource_dialogflow_fulfillment_schema.TypeString, - Optional: true, - Description: `The password for HTTP Basic authentication.`, - }, - "request_headers": { - Type: resource_dialogflow_fulfillment_schema.TypeMap, - Optional: true, - Description: `The HTTP request headers to send together with fulfillment requests.`, - Elem: &resource_dialogflow_fulfillment_schema.Schema{Type: resource_dialogflow_fulfillment_schema.TypeString}, - }, - "username": { - Type: resource_dialogflow_fulfillment_schema.TypeString, - Optional: true, - Description: `The user name for HTTP Basic authentication.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_fulfillment_schema.TypeString, - Computed: true, - Description: `The unique identifier of the fulfillment. -Format: projects//agent/fulfillment - projects//locations//agent/fulfillment`, - }, - "project": { - Type: resource_dialogflow_fulfillment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowFulfillmentCreate(d *resource_dialogflow_fulfillment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowFulfillmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandDialogflowFulfillmentEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(enabledProp)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - featuresProp, err := expandDialogflowFulfillmentFeatures(d.Get("features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("features"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(featuresProp)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, featuresProp)) { - obj["features"] = featuresProp - } - genericWebServiceProp, err := expandDialogflowFulfillmentGenericWebService(d.Get("generic_web_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("generic_web_service"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(genericWebServiceProp)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, genericWebServiceProp)) { - obj["genericWebService"] = genericWebServiceProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/?updateMask=name,displayName,enabled,genericWebService,features") - if err != nil { - return err - } - - resource_dialogflow_fulfillment_log.Printf("[DEBUG] Creating new Fulfillment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_fulfillment_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error creating Fulfillment: %s", err) - } - if err := d.Set("name", flattenDialogflowFulfillmentName(res["name"], d, config)); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_dialogflow_fulfillment_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_dialogflow_fulfillment_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_dialogflow_fulfillment_log.Printf("[DEBUG] Finished creating Fulfillment %q: %#v", d.Id(), res) - - return resourceDialogflowFulfillmentRead(d, meta) -} - -func resourceDialogflowFulfillmentRead(d *resource_dialogflow_fulfillment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_fulfillment_fmt.Sprintf("DialogflowFulfillment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error reading Fulfillment: %s", err) - } - - if err := d.Set("name", flattenDialogflowFulfillmentName(res["name"], d, config)); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("display_name", flattenDialogflowFulfillmentDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("enabled", flattenDialogflowFulfillmentEnabled(res["enabled"], d, config)); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("features", flattenDialogflowFulfillmentFeatures(res["features"], d, config)); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error reading Fulfillment: %s", err) - } - if err := d.Set("generic_web_service", flattenDialogflowFulfillmentGenericWebService(res["genericWebService"], d, config)); err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error reading Fulfillment: %s", err) - } - - return nil -} - -func resourceDialogflowFulfillmentUpdate(d *resource_dialogflow_fulfillment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowFulfillmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(v)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandDialogflowFulfillmentEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(v)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - featuresProp, err := expandDialogflowFulfillmentFeatures(d.Get("features"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("features"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(v)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, featuresProp)) { - obj["features"] = featuresProp - } - genericWebServiceProp, err := expandDialogflowFulfillmentGenericWebService(d.Get("generic_web_service"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("generic_web_service"); !isEmptyValue(resource_dialogflow_fulfillment_reflect.ValueOf(v)) && (ok || !resource_dialogflow_fulfillment_reflect.DeepEqual(v, genericWebServiceProp)) { - obj["genericWebService"] = genericWebServiceProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/") - if err != nil { - return err - } - - resource_dialogflow_fulfillment_log.Printf("[DEBUG] Updating Fulfillment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("features") { - updateMask = append(updateMask, "features") - } - - if d.HasChange("generic_web_service") { - updateMask = append(updateMask, "genericWebService") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_dialogflow_fulfillment_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_fulfillment_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error updating Fulfillment %q: %s", d.Id(), err) - } else { - resource_dialogflow_fulfillment_log.Printf("[DEBUG] Finished updating Fulfillment %q: %#v", d.Id(), res) - } - - return resourceDialogflowFulfillmentRead(d, meta) -} - -func resourceDialogflowFulfillmentDelete(d *resource_dialogflow_fulfillment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_fulfillment_fmt.Errorf("Error fetching project for Fulfillment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/fulfillment/?updateMask=name,displayName,enabled,genericWebService,features") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_fulfillment_log.Printf("[DEBUG] Deleting Fulfillment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_fulfillment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Fulfillment") - } - - resource_dialogflow_fulfillment_log.Printf("[DEBUG] Finished deleting Fulfillment %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowFulfillmentImport(d *resource_dialogflow_fulfillment_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_fulfillment_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := resource_dialogflow_fulfillment_strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, resource_dialogflow_fulfillment_fmt.Errorf( - "Could not split project from name: %s", - d.Get("name"), - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, resource_dialogflow_fulfillment_fmt.Errorf("Error setting project: %s", err) - } - return []*resource_dialogflow_fulfillment_schema.ResourceData{d}, nil -} - -func flattenDialogflowFulfillmentName(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentDisplayName(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentEnabled(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentFeatures(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "type": flattenDialogflowFulfillmentFeaturesType(original["type"], d, config), - }) - } - return transformed -} - -func flattenDialogflowFulfillmentFeaturesType(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebService(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["uri"] = - flattenDialogflowFulfillmentGenericWebServiceUri(original["uri"], d, config) - transformed["username"] = - flattenDialogflowFulfillmentGenericWebServiceUsername(original["username"], d, config) - transformed["password"] = - flattenDialogflowFulfillmentGenericWebServicePassword(original["password"], d, config) - transformed["request_headers"] = - flattenDialogflowFulfillmentGenericWebServiceRequestHeaders(original["requestHeaders"], d, config) - return []interface{}{transformed} -} - -func flattenDialogflowFulfillmentGenericWebServiceUri(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebServiceUsername(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebServicePassword(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowFulfillmentGenericWebServiceRequestHeaders(v interface{}, d *resource_dialogflow_fulfillment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowFulfillmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentFeatures(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandDialogflowFulfillmentFeaturesType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_fulfillment_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDialogflowFulfillmentFeaturesType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebService(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUri, err := expandDialogflowFulfillmentGenericWebServiceUri(original["uri"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_fulfillment_reflect.ValueOf(transformedUri); val.IsValid() && !isEmptyValue(val) { - transformed["uri"] = transformedUri - } - - transformedUsername, err := expandDialogflowFulfillmentGenericWebServiceUsername(original["username"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_fulfillment_reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { - transformed["username"] = transformedUsername - } - - transformedPassword, err := expandDialogflowFulfillmentGenericWebServicePassword(original["password"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_fulfillment_reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { - transformed["password"] = transformedPassword - } - - transformedRequestHeaders, err := expandDialogflowFulfillmentGenericWebServiceRequestHeaders(original["request_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_dialogflow_fulfillment_reflect.ValueOf(transformedRequestHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeaders"] = transformedRequestHeaders - } - - return transformed, nil -} - -func expandDialogflowFulfillmentGenericWebServiceUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebServiceUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebServicePassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowFulfillmentGenericWebServiceRequestHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceDialogflowIntent() *resource_dialogflow_intent_schema.Resource { - return &resource_dialogflow_intent_schema.Resource{ - Create: resourceDialogflowIntentCreate, - Read: resourceDialogflowIntentRead, - Update: resourceDialogflowIntentUpdate, - Delete: resourceDialogflowIntentDelete, - - Importer: &resource_dialogflow_intent_schema.ResourceImporter{ - State: resourceDialogflowIntentImport, - }, - - Timeouts: &resource_dialogflow_intent_schema.ResourceTimeout{ - Create: resource_dialogflow_intent_schema.DefaultTimeout(4 * resource_dialogflow_intent_time.Minute), - Update: resource_dialogflow_intent_schema.DefaultTimeout(4 * resource_dialogflow_intent_time.Minute), - Delete: resource_dialogflow_intent_schema.DefaultTimeout(4 * resource_dialogflow_intent_time.Minute), - }, - - Schema: map[string]*resource_dialogflow_intent_schema.Schema{ - "display_name": { - Type: resource_dialogflow_intent_schema.TypeString, - Required: true, - Description: `The name of this intent to be displayed on the console.`, - }, - "action": { - Type: resource_dialogflow_intent_schema.TypeString, - Computed: true, - Optional: true, - Description: `The name of the action associated with the intent. -Note: The action name must not contain whitespaces.`, - }, - "default_response_platforms": { - Type: resource_dialogflow_intent_schema.TypeList, - Optional: true, - Description: `The list of platforms for which the first responses will be copied from the messages in PLATFORM_UNSPECIFIED -(i.e. default platform). Possible values: ["FACEBOOK", "SLACK", "TELEGRAM", "KIK", "SKYPE", "LINE", "VIBER", "ACTIONS_ON_GOOGLE", "GOOGLE_HANGOUTS"]`, - Elem: &resource_dialogflow_intent_schema.Schema{ - Type: resource_dialogflow_intent_schema.TypeString, - ValidateFunc: resource_dialogflow_intent_validation.StringInSlice([]string{"FACEBOOK", "SLACK", "TELEGRAM", "KIK", "SKYPE", "LINE", "VIBER", "ACTIONS_ON_GOOGLE", "GOOGLE_HANGOUTS"}, false), - }, - }, - "events": { - Type: resource_dialogflow_intent_schema.TypeList, - Optional: true, - Description: `The collection of event names that trigger the intent. If the collection of input contexts is not empty, all of -the contexts must be present in the active user session for an event to trigger this intent. See the -[events reference](https://cloud.google.com/dialogflow/docs/events-overview) for more details.`, - Elem: &resource_dialogflow_intent_schema.Schema{ - Type: resource_dialogflow_intent_schema.TypeString, - }, - }, - "input_context_names": { - Type: resource_dialogflow_intent_schema.TypeList, - Optional: true, - Description: `The list of context names required for this intent to be triggered. -Format: projects//agent/sessions/-/contexts/.`, - Elem: &resource_dialogflow_intent_schema.Schema{ - Type: resource_dialogflow_intent_schema.TypeString, - }, - }, - "is_fallback": { - Type: resource_dialogflow_intent_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Indicates whether this is a fallback intent.`, - }, - "ml_disabled": { - Type: resource_dialogflow_intent_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Indicates whether Machine Learning is disabled for the intent. -Note: If mlDisabled setting is set to true, then this intent is not taken into account during inference in ML -ONLY match mode. Also, auto-markup in the UI is turned off.`, - }, - "parent_followup_intent_name": { - Type: resource_dialogflow_intent_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The unique identifier of the parent intent in the chain of followup intents. -Format: projects//agent/intents/.`, - }, - "priority": { - Type: resource_dialogflow_intent_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The priority of this intent. Higher numbers represent higher priorities. - - If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds - to the Normal priority in the console. - - If the supplied value is negative, the intent is ignored in runtime detect intent requests.`, - }, - "reset_contexts": { - Type: resource_dialogflow_intent_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Indicates whether to delete all contexts in the current session when this intent is matched.`, - }, - "webhook_state": { - Type: resource_dialogflow_intent_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_dialogflow_intent_validation.StringInSlice([]string{"WEBHOOK_STATE_ENABLED", "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING", ""}, false), - Description: `Indicates whether webhooks are enabled for the intent. -* WEBHOOK_STATE_ENABLED: Webhook is enabled in the agent and in the intent. -* WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING: Webhook is enabled in the agent and in the intent. Also, each slot -filling prompt is forwarded to the webhook. Possible values: ["WEBHOOK_STATE_ENABLED", "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING"]`, - }, - "followup_intent_info": { - Type: resource_dialogflow_intent_schema.TypeList, - Computed: true, - Description: `Information about all followup intents that have this intent as a direct or indirect parent. We populate this field -only in the output.`, - Elem: &resource_dialogflow_intent_schema.Resource{ - Schema: map[string]*resource_dialogflow_intent_schema.Schema{ - "followup_intent_name": { - Type: resource_dialogflow_intent_schema.TypeString, - Optional: true, - Description: `The unique identifier of the followup intent. -Format: projects//agent/intents/.`, - }, - "parent_followup_intent_name": { - Type: resource_dialogflow_intent_schema.TypeString, - Optional: true, - Description: `The unique identifier of the followup intent's parent. -Format: projects//agent/intents/.`, - }, - }, - }, - }, - "name": { - Type: resource_dialogflow_intent_schema.TypeString, - Computed: true, - Description: `The unique identifier of this intent. -Format: projects//agent/intents/.`, - }, - "root_followup_intent_name": { - Type: resource_dialogflow_intent_schema.TypeString, - Computed: true, - Description: `The unique identifier of the root intent in the chain of followup intents. It identifies the correct followup -intents chain for this intent. -Format: projects//agent/intents/.`, - }, - "project": { - Type: resource_dialogflow_intent_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDialogflowIntentCreate(d *resource_dialogflow_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(displayNameProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - webhookStateProp, err := expandDialogflowIntentWebhookState(d.Get("webhook_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_state"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(webhookStateProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, webhookStateProp)) { - obj["webhookState"] = webhookStateProp - } - priorityProp, err := expandDialogflowIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(priorityProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(isFallbackProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - mlDisabledProp, err := expandDialogflowIntentMlDisabled(d.Get("ml_disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ml_disabled"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(mlDisabledProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, mlDisabledProp)) { - obj["mlDisabled"] = mlDisabledProp - } - inputContextNamesProp, err := expandDialogflowIntentInputContextNames(d.Get("input_context_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("input_context_names"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(inputContextNamesProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, inputContextNamesProp)) { - obj["inputContextNames"] = inputContextNamesProp - } - eventsProp, err := expandDialogflowIntentEvents(d.Get("events"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("events"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(eventsProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, eventsProp)) { - obj["events"] = eventsProp - } - actionProp, err := expandDialogflowIntentAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(actionProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - resetContextsProp, err := expandDialogflowIntentResetContexts(d.Get("reset_contexts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reset_contexts"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(resetContextsProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, resetContextsProp)) { - obj["resetContexts"] = resetContextsProp - } - defaultResponsePlatformsProp, err := expandDialogflowIntentDefaultResponsePlatforms(d.Get("default_response_platforms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_response_platforms"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(defaultResponsePlatformsProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, defaultResponsePlatformsProp)) { - obj["defaultResponsePlatforms"] = defaultResponsePlatformsProp - } - parentFollowupIntentNameProp, err := expandDialogflowIntentParentFollowupIntentName(d.Get("parent_followup_intent_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_followup_intent_name"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(parentFollowupIntentNameProp)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, parentFollowupIntentNameProp)) { - obj["parentFollowupIntentName"] = parentFollowupIntentNameProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}projects/{{project}}/agent/intents/") - if err != nil { - return err - } - - resource_dialogflow_intent_log.Printf("[DEBUG] Creating new Intent: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_intent_schema.TimeoutCreate)) - if err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error creating Intent: %s", err) - } - if err := d.Set("name", flattenDialogflowIntentName(res["name"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_dialogflow_intent_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_dialogflow_intent_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_dialogflow_intent_log.Printf("[DEBUG] Finished creating Intent %q: %#v", d.Id(), res) - - return resourceDialogflowIntentRead(d, meta) -} - -func resourceDialogflowIntentRead(d *resource_dialogflow_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dialogflow_intent_fmt.Sprintf("DialogflowIntent %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - - if err := d.Set("name", flattenDialogflowIntentName(res["name"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("display_name", flattenDialogflowIntentDisplayName(res["displayName"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("webhook_state", flattenDialogflowIntentWebhookState(res["webhookState"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("priority", flattenDialogflowIntentPriority(res["priority"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("is_fallback", flattenDialogflowIntentIsFallback(res["isFallback"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("ml_disabled", flattenDialogflowIntentMlDisabled(res["mlDisabled"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("input_context_names", flattenDialogflowIntentInputContextNames(res["inputContextNames"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("events", flattenDialogflowIntentEvents(res["events"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("action", flattenDialogflowIntentAction(res["action"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("reset_contexts", flattenDialogflowIntentResetContexts(res["resetContexts"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("default_response_platforms", flattenDialogflowIntentDefaultResponsePlatforms(res["defaultResponsePlatforms"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("root_followup_intent_name", flattenDialogflowIntentRootFollowupIntentName(res["rootFollowupIntentName"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("parent_followup_intent_name", flattenDialogflowIntentParentFollowupIntentName(res["parentFollowupIntentName"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - if err := d.Set("followup_intent_info", flattenDialogflowIntentFollowupIntentInfo(res["followupIntentInfo"], d, config)); err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error reading Intent: %s", err) - } - - return nil -} - -func resourceDialogflowIntentUpdate(d *resource_dialogflow_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandDialogflowIntentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - webhookStateProp, err := expandDialogflowIntentWebhookState(d.Get("webhook_state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("webhook_state"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, webhookStateProp)) { - obj["webhookState"] = webhookStateProp - } - priorityProp, err := expandDialogflowIntentPriority(d.Get("priority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("priority"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, priorityProp)) { - obj["priority"] = priorityProp - } - isFallbackProp, err := expandDialogflowIntentIsFallback(d.Get("is_fallback"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_fallback"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, isFallbackProp)) { - obj["isFallback"] = isFallbackProp - } - mlDisabledProp, err := expandDialogflowIntentMlDisabled(d.Get("ml_disabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ml_disabled"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, mlDisabledProp)) { - obj["mlDisabled"] = mlDisabledProp - } - inputContextNamesProp, err := expandDialogflowIntentInputContextNames(d.Get("input_context_names"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("input_context_names"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, inputContextNamesProp)) { - obj["inputContextNames"] = inputContextNamesProp - } - eventsProp, err := expandDialogflowIntentEvents(d.Get("events"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("events"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, eventsProp)) { - obj["events"] = eventsProp - } - actionProp, err := expandDialogflowIntentAction(d.Get("action"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("action"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, actionProp)) { - obj["action"] = actionProp - } - resetContextsProp, err := expandDialogflowIntentResetContexts(d.Get("reset_contexts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reset_contexts"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, resetContextsProp)) { - obj["resetContexts"] = resetContextsProp - } - defaultResponsePlatformsProp, err := expandDialogflowIntentDefaultResponsePlatforms(d.Get("default_response_platforms"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_response_platforms"); !isEmptyValue(resource_dialogflow_intent_reflect.ValueOf(v)) && (ok || !resource_dialogflow_intent_reflect.DeepEqual(v, defaultResponsePlatformsProp)) { - obj["defaultResponsePlatforms"] = defaultResponsePlatformsProp - } - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - resource_dialogflow_intent_log.Printf("[DEBUG] Updating Intent %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_intent_schema.TimeoutUpdate)) - - if err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error updating Intent %q: %s", d.Id(), err) - } else { - resource_dialogflow_intent_log.Printf("[DEBUG] Finished updating Intent %q: %#v", d.Id(), res) - } - - return resourceDialogflowIntentRead(d, meta) -} - -func resourceDialogflowIntentDelete(d *resource_dialogflow_intent_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dialogflow_intent_fmt.Errorf("Error fetching project for Intent: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DialogflowBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_dialogflow_intent_log.Printf("[DEBUG] Deleting Intent %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dialogflow_intent_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Intent") - } - - resource_dialogflow_intent_log.Printf("[DEBUG] Finished deleting Intent %q: %#v", d.Id(), res) - return nil -} - -func resourceDialogflowIntentImport(d *resource_dialogflow_intent_schema.ResourceData, meta interface{}) ([]*resource_dialogflow_intent_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := resource_dialogflow_intent_strings.Split(d.Get("name").(string), "/") - if len(stringParts) < 2 { - return nil, resource_dialogflow_intent_fmt.Errorf( - "Could not split project from name: %s", - d.Get("name"), - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, resource_dialogflow_intent_fmt.Errorf("Error setting project: %s", err) - } - return []*resource_dialogflow_intent_schema.ResourceData{d}, nil -} - -func flattenDialogflowIntentName(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentDisplayName(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentWebhookState(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentPriority(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dialogflow_intent_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDialogflowIntentIsFallback(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentMlDisabled(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentInputContextNames(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentEvents(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentAction(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentResetContexts(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentDefaultResponsePlatforms(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentRootFollowupIntentName(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentParentFollowupIntentName(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentFollowupIntentInfo(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "followup_intent_name": flattenDialogflowIntentFollowupIntentInfoFollowupIntentName(original["followupIntentName"], d, config), - "parent_followup_intent_name": flattenDialogflowIntentFollowupIntentInfoParentFollowupIntentName(original["parentFollowupIntentName"], d, config), - }) - } - return transformed -} - -func flattenDialogflowIntentFollowupIntentInfoFollowupIntentName(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDialogflowIntentFollowupIntentInfoParentFollowupIntentName(v interface{}, d *resource_dialogflow_intent_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDialogflowIntentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentWebhookState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentPriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentIsFallback(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentMlDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentInputContextNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentEvents(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentResetContexts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentDefaultResponsePlatforms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDialogflowIntentParentFollowupIntentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceDNSManagedZone() *resource_dns_managed_zone_schema.Resource { - return &resource_dns_managed_zone_schema.Resource{ - Create: resourceDNSManagedZoneCreate, - Read: resourceDNSManagedZoneRead, - Update: resourceDNSManagedZoneUpdate, - Delete: resourceDNSManagedZoneDelete, - - Importer: &resource_dns_managed_zone_schema.ResourceImporter{ - State: resourceDNSManagedZoneImport, - }, - - Timeouts: &resource_dns_managed_zone_schema.ResourceTimeout{ - Create: resource_dns_managed_zone_schema.DefaultTimeout(4 * resource_dns_managed_zone_time.Minute), - Update: resource_dns_managed_zone_schema.DefaultTimeout(4 * resource_dns_managed_zone_time.Minute), - Delete: resource_dns_managed_zone_schema.DefaultTimeout(4 * resource_dns_managed_zone_time.Minute), - }, - - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "dns_name": { - Type: resource_dns_managed_zone_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The DNS name of this managed zone, for instance "example.com.".`, - }, - "name": { - Type: resource_dns_managed_zone_schema.TypeString, - Required: true, - ForceNew: true, - Description: `User assigned name for this resource. -Must be unique within the project.`, - }, - "description": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - Description: `A textual description field. Defaults to 'Managed by Terraform'.`, - Default: "Managed by Terraform", - }, - "dnssec_config": { - Type: resource_dns_managed_zone_schema.TypeList, - Optional: true, - Description: `DNSSEC configuration`, - MaxItems: 1, - Elem: &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "default_key_specs": { - Type: resource_dns_managed_zone_schema.TypeList, - Computed: true, - Optional: true, - Description: `Specifies parameters that will be used for generating initial DnsKeys -for this ManagedZone. If you provide a spec for keySigning or zoneSigning, -you must also provide one for the other. -default_key_specs can only be updated when the state is 'off'.`, - Elem: &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "algorithm": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - ValidateFunc: resource_dns_managed_zone_validation.StringInSlice([]string{"ecdsap256sha256", "ecdsap384sha384", "rsasha1", "rsasha256", "rsasha512", ""}, false), - Description: `String mnemonic specifying the DNSSEC algorithm of this key Possible values: ["ecdsap256sha256", "ecdsap384sha384", "rsasha1", "rsasha256", "rsasha512"]`, - }, - "key_length": { - Type: resource_dns_managed_zone_schema.TypeInt, - Optional: true, - Description: `Length of the keys in bits`, - }, - "key_type": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - ValidateFunc: resource_dns_managed_zone_validation.StringInSlice([]string{"keySigning", "zoneSigning", ""}, false), - Description: `Specifies whether this is a key signing key (KSK) or a zone -signing key (ZSK). Key signing keys have the Secure Entry -Point flag set and, when active, will only be used to sign -resource record sets of type DNSKEY. Zone signing keys do -not have the Secure Entry Point flag set and will be used -to sign all other types of resource record sets. Possible values: ["keySigning", "zoneSigning"]`, - }, - "kind": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - Description: `Identifies what kind of resource this is`, - Default: "dns#dnsKeySpec", - }, - }, - }, - AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, - }, - "kind": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - Description: `Identifies what kind of resource this is`, - Default: "dns#managedZoneDnsSecConfig", - AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, - }, - "non_existence": { - Type: resource_dns_managed_zone_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_dns_managed_zone_validation.StringInSlice([]string{"nsec", "nsec3", ""}, false), - Description: `Specifies the mechanism used to provide authenticated denial-of-existence responses. -non_existence can only be updated when the state is 'off'. Possible values: ["nsec", "nsec3"]`, - AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, - }, - "state": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - ValidateFunc: resource_dns_managed_zone_validation.StringInSlice([]string{"off", "on", "transfer", ""}, false), - Description: `Specifies whether DNSSEC is enabled, and what mode it is in Possible values: ["off", "on", "transfer"]`, - AtLeastOneOf: []string{"dnssec_config.0.kind", "dnssec_config.0.non_existence", "dnssec_config.0.state", "dnssec_config.0.default_key_specs"}, - }, - }, - }, - }, - "forwarding_config": { - Type: resource_dns_managed_zone_schema.TypeList, - Optional: true, - Description: `The presence for this field indicates that outbound forwarding is enabled -for this zone. The value of this field contains the set of destinations -to forward to.`, - MaxItems: 1, - Elem: &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "target_name_servers": { - Type: resource_dns_managed_zone_schema.TypeSet, - Required: true, - Description: `List of target name servers to forward to. Cloud DNS will -select the best available name server if more than -one target is given.`, - Elem: dnsManagedZoneForwardingConfigTargetNameServersSchema(), - Set: func(v interface{}) int { - raw := v.(map[string]interface{}) - if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) - } - var buf resource_dns_managed_zone_bytes.Buffer - resource_dns_managed_zone_schema.SerializeResourceForHash(&buf, raw, dnsManagedZoneForwardingConfigTargetNameServersSchema()) - return hashcode(buf.String()) - }, - }, - }, - }, - }, - "labels": { - Type: resource_dns_managed_zone_schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this ManagedZone.`, - Elem: &resource_dns_managed_zone_schema.Schema{Type: resource_dns_managed_zone_schema.TypeString}, - }, - "peering_config": { - Type: resource_dns_managed_zone_schema.TypeList, - Optional: true, - Description: `The presence of this field indicates that DNS Peering is enabled for this -zone. The value of this field contains the network to peer with.`, - MaxItems: 1, - Elem: &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "target_network": { - Type: resource_dns_managed_zone_schema.TypeList, - Required: true, - Description: `The network with which to peer.`, - MaxItems: 1, - Elem: &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "network_url": { - Type: resource_dns_managed_zone_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The id or fully qualified URL of the VPC network to forward queries to. -This should be formatted like 'projects/{project}/global/networks/{network}' or -'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, - }, - }, - }, - }, - }, - }, - }, - "private_visibility_config": { - Type: resource_dns_managed_zone_schema.TypeList, - Optional: true, - Description: `For privately visible zones, the set of Virtual Private Cloud -resources that the zone is visible from.`, - MaxItems: 1, - Elem: &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "networks": { - Type: resource_dns_managed_zone_schema.TypeSet, - Required: true, - Description: `The list of VPC networks that can see this zone. Until the provider updates to use the Terraform 0.12 SDK in a future release, you -may experience issues with this resource while updating. If you've defined a 'networks' block and -add another 'networks' block while keeping the old block, Terraform will see an incorrect diff -and apply an incorrect update to the resource. If you encounter this issue, remove all 'networks' -blocks in an update and then apply another update adding all of them back simultaneously.`, - Elem: dnsManagedZonePrivateVisibilityConfigNetworksSchema(), - Set: func(v interface{}) int { - if v == nil { - return 0 - } - raw := v.(map[string]interface{}) - if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) - } - var buf resource_dns_managed_zone_bytes.Buffer - resource_dns_managed_zone_schema.SerializeResourceForHash(&buf, raw, dnsManagedZonePrivateVisibilityConfigNetworksSchema()) - return hashcode(buf.String()) - }, - }, - }, - }, - }, - "visibility": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_dns_managed_zone_validation.StringInSlice([]string{"private", "public", ""}, false), - DiffSuppressFunc: caseDiffSuppress, - Description: `The zone's visibility: public zones are exposed to the Internet, -while private zones are visible only to Virtual Private Cloud resources. Default value: "public" Possible values: ["private", "public"]`, - Default: "public", - }, - "name_servers": { - Type: resource_dns_managed_zone_schema.TypeList, - Computed: true, - Description: `Delegate your managed_zone to these virtual name servers; -defined by the server`, - Elem: &resource_dns_managed_zone_schema.Schema{ - Type: resource_dns_managed_zone_schema.TypeString, - }, - }, - "force_destroy": { - Type: resource_dns_managed_zone_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func dnsManagedZonePrivateVisibilityConfigNetworksSchema() *resource_dns_managed_zone_schema.Resource { - return &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "network_url": { - Type: resource_dns_managed_zone_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The id or fully qualified URL of the VPC network to bind to. -This should be formatted like 'projects/{project}/global/networks/{network}' or -'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, - }, - }, - } -} - -func dnsManagedZoneForwardingConfigTargetNameServersSchema() *resource_dns_managed_zone_schema.Resource { - return &resource_dns_managed_zone_schema.Resource{ - Schema: map[string]*resource_dns_managed_zone_schema.Schema{ - "ipv4_address": { - Type: resource_dns_managed_zone_schema.TypeString, - Required: true, - Description: `IPv4 address of a target name server.`, - }, - "forwarding_path": { - Type: resource_dns_managed_zone_schema.TypeString, - Optional: true, - ValidateFunc: resource_dns_managed_zone_validation.StringInSlice([]string{"default", "private", ""}, false), - Description: `Forwarding path for this TargetNameServer. If unset or 'default' Cloud DNS will make forwarding -decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go -to the Internet. When set to 'private', Cloud DNS will always send queries through VPC for this target Possible values: ["default", "private"]`, - }, - }, - } -} - -func resourceDNSManagedZoneCreate(d *resource_dns_managed_zone_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandDNSManagedZoneDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(descriptionProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - dnsNameProp, err := expandDNSManagedZoneDnsName(d.Get("dns_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dns_name"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(dnsNameProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, dnsNameProp)) { - obj["dnsName"] = dnsNameProp - } - dnssecConfigProp, err := expandDNSManagedZoneDnssecConfig(d.Get("dnssec_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dnssec_config"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(dnssecConfigProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, dnssecConfigProp)) { - obj["dnssecConfig"] = dnssecConfigProp - } - nameProp, err := expandDNSManagedZoneName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(nameProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - labelsProp, err := expandDNSManagedZoneLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(labelsProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - visibilityProp, err := expandDNSManagedZoneVisibility(d.Get("visibility"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("visibility"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(visibilityProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, visibilityProp)) { - obj["visibility"] = visibilityProp - } - privateVisibilityConfigProp, err := expandDNSManagedZonePrivateVisibilityConfig(d.Get("private_visibility_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_visibility_config"); ok || !resource_dns_managed_zone_reflect.DeepEqual(v, privateVisibilityConfigProp) { - obj["privateVisibilityConfig"] = privateVisibilityConfigProp - } - forwardingConfigProp, err := expandDNSManagedZoneForwardingConfig(d.Get("forwarding_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("forwarding_config"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(forwardingConfigProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, forwardingConfigProp)) { - obj["forwardingConfig"] = forwardingConfigProp - } - peeringConfigProp, err := expandDNSManagedZonePeeringConfig(d.Get("peering_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering_config"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(peeringConfigProp)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, peeringConfigProp)) { - obj["peeringConfig"] = peeringConfigProp - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones") - if err != nil { - return err - } - - resource_dns_managed_zone_log.Printf("[DEBUG] Creating new ManagedZone: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error fetching project for ManagedZone: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dns_managed_zone_schema.TimeoutCreate)) - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error creating ManagedZone: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dns_managed_zone_log.Printf("[DEBUG] Finished creating ManagedZone %q: %#v", d.Id(), res) - - return resourceDNSManagedZoneRead(d, meta) -} - -func resourceDNSManagedZoneRead(d *resource_dns_managed_zone_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error fetching project for ManagedZone: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dns_managed_zone_fmt.Sprintf("DNSManagedZone %q", d.Id())) - } - - if _, ok := d.GetOkExists("force_destroy"); !ok { - if err := d.Set("force_destroy", false); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error setting force_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - - if err := d.Set("description", flattenDNSManagedZoneDescription(res["description"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("dns_name", flattenDNSManagedZoneDnsName(res["dnsName"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("dnssec_config", flattenDNSManagedZoneDnssecConfig(res["dnssecConfig"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("name", flattenDNSManagedZoneName(res["name"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("name_servers", flattenDNSManagedZoneNameServers(res["nameServers"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("labels", flattenDNSManagedZoneLabels(res["labels"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("visibility", flattenDNSManagedZoneVisibility(res["visibility"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("private_visibility_config", flattenDNSManagedZonePrivateVisibilityConfig(res["privateVisibilityConfig"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("forwarding_config", flattenDNSManagedZoneForwardingConfig(res["forwardingConfig"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - if err := d.Set("peering_config", flattenDNSManagedZonePeeringConfig(res["peeringConfig"], d, config)); err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ManagedZone: %s", err) - } - - return nil -} - -func resourceDNSManagedZoneUpdate(d *resource_dns_managed_zone_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error fetching project for ManagedZone: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandDNSManagedZoneDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(v)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - dnssecConfigProp, err := expandDNSManagedZoneDnssecConfig(d.Get("dnssec_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dnssec_config"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(v)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, dnssecConfigProp)) { - obj["dnssecConfig"] = dnssecConfigProp - } - labelsProp, err := expandDNSManagedZoneLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(v)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - privateVisibilityConfigProp, err := expandDNSManagedZonePrivateVisibilityConfig(d.Get("private_visibility_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("private_visibility_config"); ok || !resource_dns_managed_zone_reflect.DeepEqual(v, privateVisibilityConfigProp) { - obj["privateVisibilityConfig"] = privateVisibilityConfigProp - } - forwardingConfigProp, err := expandDNSManagedZoneForwardingConfig(d.Get("forwarding_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("forwarding_config"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(v)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, forwardingConfigProp)) { - obj["forwardingConfig"] = forwardingConfigProp - } - peeringConfigProp, err := expandDNSManagedZonePeeringConfig(d.Get("peering_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("peering_config"); !isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(v)) && (ok || !resource_dns_managed_zone_reflect.DeepEqual(v, peeringConfigProp)) { - obj["peeringConfig"] = peeringConfigProp - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") - if err != nil { - return err - } - - resource_dns_managed_zone_log.Printf("[DEBUG] Updating ManagedZone %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dns_managed_zone_schema.TimeoutUpdate)) - - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error updating ManagedZone %q: %s", d.Id(), err) - } else { - resource_dns_managed_zone_log.Printf("[DEBUG] Finished updating ManagedZone %q: %#v", d.Id(), res) - } - - return resourceDNSManagedZoneRead(d, meta) -} - -func resourceDNSManagedZoneDelete(d *resource_dns_managed_zone_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error fetching project for ManagedZone: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/managedZones/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if d.Get("force_destroy").(bool) { - zone := d.Get("name").(string) - token := "" - for paginate := true; paginate; { - var resp *resource_dns_managed_zone_dns.ResourceRecordSetsListResponse - if token == "" { - resp, err = config.NewDnsClient(userAgent).ResourceRecordSets.List(project, zone).Do() - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ResourceRecordSets: %s", err) - } - } else { - resp, err = config.NewDnsClient(userAgent).ResourceRecordSets.List(project, zone).PageToken(token).Do() - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error reading ResourceRecordSets: %s", err) - } - } - - for _, rr := range resp.Rrsets { - - chg := &resource_dns_managed_zone_dns.Change{ - Deletions: []*resource_dns_managed_zone_dns.ResourceRecordSet{ - { - Name: rr.Name, - Type: rr.Type, - Ttl: rr.Ttl, - Rrdatas: rr.Rrdatas, - }, - }, - } - - if rr.Type == "NS" { - mz, err := config.NewDnsClient(userAgent).ManagedZones.Get(project, zone).Do() - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error retrieving managed zone %q from %q: %s", zone, project, err) - } - domain := mz.DnsName - - if domain == rr.Name { - resource_dns_managed_zone_log.Println("[DEBUG] NS records can't be deleted due to API restrictions, so they're being left in place. See https://www.terraform.io/docs/providers/google/r/dns_record_set.html for more information.") - continue - } - } - - if rr.Type == "SOA" { - resource_dns_managed_zone_log.Println("[DEBUG] SOA records can't be deleted due to API restrictions, so they're being left in place.") - continue - } - - resource_dns_managed_zone_log.Printf("[DEBUG] DNS Record delete request via MZ: %#v", chg) - chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Unable to delete ResourceRecordSets: %s", err) - } - - w := &DnsChangeWaiter{ - Service: config.NewDnsClient(userAgent), - Change: chg, - Project: project, - ManagedZone: zone, - } - _, err = w.Conf().WaitForState() - if err != nil { - return resource_dns_managed_zone_fmt.Errorf("Error waiting for Google DNS change: %s", err) - } - } - - token = resp.NextPageToken - paginate = token != "" - } - } - resource_dns_managed_zone_log.Printf("[DEBUG] Deleting ManagedZone %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dns_managed_zone_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ManagedZone") - } - - resource_dns_managed_zone_log.Printf("[DEBUG] Finished deleting ManagedZone %q: %#v", d.Id(), res) - return nil -} - -func resourceDNSManagedZoneImport(d *resource_dns_managed_zone_schema.ResourceData, meta interface{}) ([]*resource_dns_managed_zone_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/managedZones/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{name}}") - if err != nil { - return nil, resource_dns_managed_zone_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("force_destroy", false); err != nil { - return nil, resource_dns_managed_zone_fmt.Errorf("Error setting force_destroy: %s", err) - } - - return []*resource_dns_managed_zone_schema.ResourceData{d}, nil -} - -func flattenDNSManagedZoneDescription(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneDnsName(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneDnssecConfig(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kind"] = - flattenDNSManagedZoneDnssecConfigKind(original["kind"], d, config) - transformed["non_existence"] = - flattenDNSManagedZoneDnssecConfigNonExistence(original["nonExistence"], d, config) - transformed["state"] = - flattenDNSManagedZoneDnssecConfigState(original["state"], d, config) - transformed["default_key_specs"] = - flattenDNSManagedZoneDnssecConfigDefaultKeySpecs(original["defaultKeySpecs"], d, config) - return []interface{}{transformed} -} - -func flattenDNSManagedZoneDnssecConfigKind(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneDnssecConfigNonExistence(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneDnssecConfigState(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "algorithm": flattenDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(original["algorithm"], d, config), - "key_length": flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(original["keyLength"], d, config), - "key_type": flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(original["keyType"], d, config), - "kind": flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKind(original["kind"], d, config), - }) - } - return transformed -} - -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_dns_managed_zone_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneDnssecConfigDefaultKeySpecsKind(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneName(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneNameServers(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneLabels(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneVisibility(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_dns_managed_zone_reflect.ValueOf(v)) { - return "public" - } - - return v -} - -func flattenDNSManagedZonePrivateVisibilityConfig(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["networks"] = - flattenDNSManagedZonePrivateVisibilityConfigNetworks(original["networks"], d, config) - return []interface{}{transformed} -} - -func flattenDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_dns_managed_zone_schema.NewSet(func(v interface{}) int { - if v == nil { - return 0 - } - raw := v.(map[string]interface{}) - if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) - } - var buf resource_dns_managed_zone_bytes.Buffer - resource_dns_managed_zone_schema.SerializeResourceForHash(&buf, raw, dnsManagedZonePrivateVisibilityConfigNetworksSchema()) - return hashcode(buf.String()) - }, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "network_url": flattenDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(original["networkUrl"], d, config), - }) - } - return transformed -} - -func flattenDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneForwardingConfig(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_name_servers"] = - flattenDNSManagedZoneForwardingConfigTargetNameServers(original["targetNameServers"], d, config) - return []interface{}{transformed} -} - -func flattenDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_dns_managed_zone_schema.NewSet(func(v interface{}) int { - raw := v.(map[string]interface{}) - if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) - } - var buf resource_dns_managed_zone_bytes.Buffer - resource_dns_managed_zone_schema.SerializeResourceForHash(&buf, raw, dnsManagedZoneForwardingConfigTargetNameServersSchema()) - return hashcode(buf.String()) - }, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "ipv4_address": flattenDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(original["ipv4Address"], d, config), - "forwarding_path": flattenDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(original["forwardingPath"], d, config), - }) - } - return transformed -} - -func flattenDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSManagedZonePeeringConfig(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_network"] = - flattenDNSManagedZonePeeringConfigTargetNetwork(original["targetNetwork"], d, config) - return []interface{}{transformed} -} - -func flattenDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["network_url"] = - flattenDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(original["networkUrl"], d, config) - return []interface{}{transformed} -} - -func flattenDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(v interface{}, d *resource_dns_managed_zone_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDNSManagedZoneDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnssecConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKind, err := expandDNSManagedZoneDnssecConfigKind(original["kind"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedKind); val.IsValid() && !isEmptyValue(val) { - transformed["kind"] = transformedKind - } - - transformedNonExistence, err := expandDNSManagedZoneDnssecConfigNonExistence(original["non_existence"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedNonExistence); val.IsValid() && !isEmptyValue(val) { - transformed["nonExistence"] = transformedNonExistence - } - - transformedState, err := expandDNSManagedZoneDnssecConfigState(original["state"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedState); val.IsValid() && !isEmptyValue(val) { - transformed["state"] = transformedState - } - - transformedDefaultKeySpecs, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecs(original["default_key_specs"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedDefaultKeySpecs); val.IsValid() && !isEmptyValue(val) { - transformed["defaultKeySpecs"] = transformedDefaultKeySpecs - } - - return transformed, nil -} - -func expandDNSManagedZoneDnssecConfigKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnssecConfigNonExistence(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnssecConfigState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnssecConfigDefaultKeySpecs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAlgorithm, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(original["algorithm"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["algorithm"] = transformedAlgorithm - } - - transformedKeyLength, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(original["key_length"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedKeyLength); val.IsValid() && !isEmptyValue(val) { - transformed["keyLength"] = transformedKeyLength - } - - transformedKeyType, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(original["key_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedKeyType); val.IsValid() && !isEmptyValue(val) { - transformed["keyType"] = transformedKeyType - } - - transformedKind, err := expandDNSManagedZoneDnssecConfigDefaultKeySpecsKind(original["kind"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedKind); val.IsValid() && !isEmptyValue(val) { - transformed["kind"] = transformedKind - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKeyType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneDnssecConfigDefaultKeySpecsKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandDNSManagedZoneVisibility(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZonePrivateVisibilityConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - - transformed := make(map[string]interface{}) - emptyNetwork := make([]interface{}, 0) - transformed["networks"] = emptyNetwork - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNetworks, err := expandDNSManagedZonePrivateVisibilityConfigNetworks(original["networks"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedNetworks); val.IsValid() && !isEmptyValue(val) { - transformed["networks"] = transformedNetworks - } - - return transformed, nil -} - -func expandDNSManagedZonePrivateVisibilityConfigNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_dns_managed_zone_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNetworkUrl, err := expandDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(original["network_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !isEmptyValue(val) { - transformed["networkUrl"] = transformedNetworkUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDNSManagedZonePrivateVisibilityConfigNetworksNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || v.(string) == "" { - return "", nil - } else if resource_dns_managed_zone_strings.HasPrefix(v.(string), "https://") { - return v, nil - } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return ConvertSelfLinkToV1(url), nil -} - -func expandDNSManagedZoneForwardingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetNameServers, err := expandDNSManagedZoneForwardingConfigTargetNameServers(original["target_name_servers"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedTargetNameServers); val.IsValid() && !isEmptyValue(val) { - transformed["targetNameServers"] = transformedTargetNameServers - } - - return transformed, nil -} - -func expandDNSManagedZoneForwardingConfigTargetNameServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_dns_managed_zone_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpv4Address, err := expandDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(original["ipv4_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedIpv4Address); val.IsValid() && !isEmptyValue(val) { - transformed["ipv4Address"] = transformedIpv4Address - } - - transformedForwardingPath, err := expandDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(original["forwarding_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedForwardingPath); val.IsValid() && !isEmptyValue(val) { - transformed["forwardingPath"] = transformedForwardingPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDNSManagedZoneForwardingConfigTargetNameServersIpv4Address(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZoneForwardingConfigTargetNameServersForwardingPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSManagedZonePeeringConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetNetwork, err := expandDNSManagedZonePeeringConfigTargetNetwork(original["target_network"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedTargetNetwork); val.IsValid() && !isEmptyValue(val) { - transformed["targetNetwork"] = transformedTargetNetwork - } - - return transformed, nil -} - -func expandDNSManagedZonePeeringConfigTargetNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNetworkUrl, err := expandDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(original["network_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_managed_zone_reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !isEmptyValue(val) { - transformed["networkUrl"] = transformedNetworkUrl - } - - return transformed, nil -} - -func expandDNSManagedZonePeeringConfigTargetNetworkNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || v.(string) == "" { - return "", nil - } else if resource_dns_managed_zone_strings.HasPrefix(v.(string), "https://") { - return v, nil - } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return ConvertSelfLinkToV1(url), nil -} - -func resourceDNSPolicy() *resource_dns_policy_schema.Resource { - return &resource_dns_policy_schema.Resource{ - Create: resourceDNSPolicyCreate, - Read: resourceDNSPolicyRead, - Update: resourceDNSPolicyUpdate, - Delete: resourceDNSPolicyDelete, - - Importer: &resource_dns_policy_schema.ResourceImporter{ - State: resourceDNSPolicyImport, - }, - - Timeouts: &resource_dns_policy_schema.ResourceTimeout{ - Create: resource_dns_policy_schema.DefaultTimeout(4 * resource_dns_policy_time.Minute), - Update: resource_dns_policy_schema.DefaultTimeout(4 * resource_dns_policy_time.Minute), - Delete: resource_dns_policy_schema.DefaultTimeout(4 * resource_dns_policy_time.Minute), - }, - - Schema: map[string]*resource_dns_policy_schema.Schema{ - "name": { - Type: resource_dns_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `User assigned name for this policy.`, - }, - "alternative_name_server_config": { - Type: resource_dns_policy_schema.TypeList, - Optional: true, - Description: `Sets an alternative name server for the associated networks. -When specified, all DNS queries are forwarded to a name server that you choose. -Names such as .internal are not available when an alternative name server is specified.`, - MaxItems: 1, - Elem: &resource_dns_policy_schema.Resource{ - Schema: map[string]*resource_dns_policy_schema.Schema{ - "target_name_servers": { - Type: resource_dns_policy_schema.TypeSet, - Required: true, - Description: `Sets an alternative name server for the associated networks. When specified, -all DNS queries are forwarded to a name server that you choose. Names such as .internal -are not available when an alternative name server is specified.`, - Elem: dnsPolicyAlternativeNameServerConfigTargetNameServersSchema(), - Set: func(v interface{}) int { - raw := v.(map[string]interface{}) - if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) - } - var buf resource_dns_policy_bytes.Buffer - resource_dns_policy_schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) - return hashcode(buf.String()) - }, - }, - }, - }, - }, - "description": { - Type: resource_dns_policy_schema.TypeString, - Optional: true, - Description: `A textual description field. Defaults to 'Managed by Terraform'.`, - Default: "Managed by Terraform", - }, - "enable_inbound_forwarding": { - Type: resource_dns_policy_schema.TypeBool, - Optional: true, - Description: `Allows networks bound to this policy to receive DNS queries sent -by VMs or applications over VPN connections. When enabled, a -virtual IP address will be allocated from each of the sub-networks -that are bound to this policy.`, - }, - "enable_logging": { - Type: resource_dns_policy_schema.TypeBool, - Optional: true, - Description: `Controls whether logging is enabled for the networks bound to this policy. -Defaults to no logging if not set.`, - }, - "networks": { - Type: resource_dns_policy_schema.TypeSet, - Optional: true, - Description: `List of network names specifying networks to which this policy is applied.`, - Elem: dnsPolicyNetworksSchema(), - Set: func(v interface{}) int { - raw := v.(map[string]interface{}) - if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) - } - var buf resource_dns_policy_bytes.Buffer - resource_dns_policy_schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) - return hashcode(buf.String()) - }, - }, - "project": { - Type: resource_dns_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func dnsPolicyAlternativeNameServerConfigTargetNameServersSchema() *resource_dns_policy_schema.Resource { - return &resource_dns_policy_schema.Resource{ - Schema: map[string]*resource_dns_policy_schema.Schema{ - "ipv4_address": { - Type: resource_dns_policy_schema.TypeString, - Required: true, - Description: `IPv4 address to forward to.`, - }, - "forwarding_path": { - Type: resource_dns_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_dns_policy_validation.StringInSlice([]string{"default", "private", ""}, false), - Description: `Forwarding path for this TargetNameServer. If unset or 'default' Cloud DNS will make forwarding -decision based on address ranges, i.e. RFC1918 addresses go to the VPC, Non-RFC1918 addresses go -to the Internet. When set to 'private', Cloud DNS will always send queries through VPC for this target Possible values: ["default", "private"]`, - }, - }, - } -} - -func dnsPolicyNetworksSchema() *resource_dns_policy_schema.Resource { - return &resource_dns_policy_schema.Resource{ - Schema: map[string]*resource_dns_policy_schema.Schema{ - "network_url": { - Type: resource_dns_policy_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The id or fully qualified URL of the VPC network to forward queries to. -This should be formatted like 'projects/{project}/global/networks/{network}' or -'https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}'`, - }, - }, - } -} - -func resourceDNSPolicyCreate(d *resource_dns_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - alternativeNameServerConfigProp, err := expandDNSPolicyAlternativeNameServerConfig(d.Get("alternative_name_server_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alternative_name_server_config"); !isEmptyValue(resource_dns_policy_reflect.ValueOf(alternativeNameServerConfigProp)) && (ok || !resource_dns_policy_reflect.DeepEqual(v, alternativeNameServerConfigProp)) { - obj["alternativeNameServerConfig"] = alternativeNameServerConfigProp - } - descriptionProp, err := expandDNSPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dns_policy_reflect.ValueOf(descriptionProp)) && (ok || !resource_dns_policy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableInboundForwardingProp, err := expandDNSPolicyEnableInboundForwarding(d.Get("enable_inbound_forwarding"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_inbound_forwarding"); ok || !resource_dns_policy_reflect.DeepEqual(v, enableInboundForwardingProp) { - obj["enableInboundForwarding"] = enableInboundForwardingProp - } - enableLoggingProp, err := expandDNSPolicyEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); ok || !resource_dns_policy_reflect.DeepEqual(v, enableLoggingProp) { - obj["enableLogging"] = enableLoggingProp - } - nameProp, err := expandDNSPolicyName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_dns_policy_reflect.ValueOf(nameProp)) && (ok || !resource_dns_policy_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networksProp, err := expandDNSPolicyNetworks(d.Get("networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("networks"); !isEmptyValue(resource_dns_policy_reflect.ValueOf(networksProp)) && (ok || !resource_dns_policy_reflect.DeepEqual(v, networksProp)) { - obj["networks"] = networksProp - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies") - if err != nil { - return err - } - - resource_dns_policy_log.Printf("[DEBUG] Creating new Policy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_dns_policy_schema.TimeoutCreate)) - if err != nil { - return resource_dns_policy_fmt.Errorf("Error creating Policy: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/policies/{{name}}") - if err != nil { - return resource_dns_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_dns_policy_log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) - - return resourceDNSPolicyRead(d, meta) -} - -func resourceDNSPolicyRead(d *resource_dns_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_dns_policy_fmt.Sprintf("DNSPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_dns_policy_fmt.Errorf("Error reading Policy: %s", err) - } - - if err := d.Set("alternative_name_server_config", flattenDNSPolicyAlternativeNameServerConfig(res["alternativeNameServerConfig"], d, config)); err != nil { - return resource_dns_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("description", flattenDNSPolicyDescription(res["description"], d, config)); err != nil { - return resource_dns_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("enable_inbound_forwarding", flattenDNSPolicyEnableInboundForwarding(res["enableInboundForwarding"], d, config)); err != nil { - return resource_dns_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("enable_logging", flattenDNSPolicyEnableLogging(res["enableLogging"], d, config)); err != nil { - return resource_dns_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("name", flattenDNSPolicyName(res["name"], d, config)); err != nil { - return resource_dns_policy_fmt.Errorf("Error reading Policy: %s", err) - } - if err := d.Set("networks", flattenDNSPolicyNetworks(res["networks"], d, config)); err != nil { - return resource_dns_policy_fmt.Errorf("Error reading Policy: %s", err) - } - - return nil -} - -func resourceDNSPolicyUpdate(d *resource_dns_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("alternative_name_server_config") || d.HasChange("description") || d.HasChange("enable_inbound_forwarding") || d.HasChange("enable_logging") || d.HasChange("networks") { - obj := make(map[string]interface{}) - - alternativeNameServerConfigProp, err := expandDNSPolicyAlternativeNameServerConfig(d.Get("alternative_name_server_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alternative_name_server_config"); !isEmptyValue(resource_dns_policy_reflect.ValueOf(v)) && (ok || !resource_dns_policy_reflect.DeepEqual(v, alternativeNameServerConfigProp)) { - obj["alternativeNameServerConfig"] = alternativeNameServerConfigProp - } - descriptionProp, err := expandDNSPolicyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_dns_policy_reflect.ValueOf(v)) && (ok || !resource_dns_policy_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - enableInboundForwardingProp, err := expandDNSPolicyEnableInboundForwarding(d.Get("enable_inbound_forwarding"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_inbound_forwarding"); ok || !resource_dns_policy_reflect.DeepEqual(v, enableInboundForwardingProp) { - obj["enableInboundForwarding"] = enableInboundForwardingProp - } - enableLoggingProp, err := expandDNSPolicyEnableLogging(d.Get("enable_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_logging"); ok || !resource_dns_policy_reflect.DeepEqual(v, enableLoggingProp) { - obj["enableLogging"] = enableLoggingProp - } - networksProp, err := expandDNSPolicyNetworks(d.Get("networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("networks"); !isEmptyValue(resource_dns_policy_reflect.ValueOf(v)) && (ok || !resource_dns_policy_reflect.DeepEqual(v, networksProp)) { - obj["networks"] = networksProp - } - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_dns_policy_schema.TimeoutUpdate)) - if err != nil { - return resource_dns_policy_fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) - } else { - resource_dns_policy_log.Printf("[DEBUG] Finished updating Policy %q: %#v", d.Id(), res) - } - - } - - d.Partial(false) - - return resourceDNSPolicyRead(d, meta) -} - -func resourceDNSPolicyDelete(d *resource_dns_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_dns_policy_fmt.Errorf("Error fetching project for Policy: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if d.Get("networks.#").(int) > 0 { - patched := make(map[string]interface{}) - patched["networks"] = nil - - url, err := replaceVars(d, config, "{{DNSBasePath}}projects/{{project}}/policies/{{name}}") - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(config, "PATCH", project, url, userAgent, patched, d.Timeout(resource_dns_policy_schema.TimeoutUpdate)) - if err != nil { - return resource_dns_policy_fmt.Errorf("Error updating Policy %q: %s", d.Id(), err) - } - } - resource_dns_policy_log.Printf("[DEBUG] Deleting Policy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_dns_policy_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Policy") - } - - resource_dns_policy_log.Printf("[DEBUG] Finished deleting Policy %q: %#v", d.Id(), res) - return nil -} - -func resourceDNSPolicyImport(d *resource_dns_policy_schema.ResourceData, meta interface{}) ([]*resource_dns_policy_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/policies/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/policies/{{name}}") - if err != nil { - return nil, resource_dns_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dns_policy_schema.ResourceData{d}, nil -} - -func flattenDNSPolicyAlternativeNameServerConfig(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["target_name_servers"] = - flattenDNSPolicyAlternativeNameServerConfigTargetNameServers(original["targetNameServers"], d, config) - return []interface{}{transformed} -} - -func flattenDNSPolicyAlternativeNameServerConfigTargetNameServers(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_dns_policy_schema.NewSet(func(v interface{}) int { - raw := v.(map[string]interface{}) - if address, ok := raw["ipv4_address"]; ok { - hashcode(address.(string)) - } - var buf resource_dns_policy_bytes.Buffer - resource_dns_policy_schema.SerializeResourceForHash(&buf, raw, dnsPolicyAlternativeNameServerConfigTargetNameServersSchema()) - return hashcode(buf.String()) - }, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "ipv4_address": flattenDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(original["ipv4Address"], d, config), - "forwarding_path": flattenDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(original["forwardingPath"], d, config), - }) - } - return transformed -} - -func flattenDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyDescription(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyEnableInboundForwarding(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyEnableLogging(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyName(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenDNSPolicyNetworks(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_dns_policy_schema.NewSet(func(v interface{}) int { - raw := v.(map[string]interface{}) - if url, ok := raw["network_url"]; ok { - return selfLinkNameHash(url) - } - var buf resource_dns_policy_bytes.Buffer - resource_dns_policy_schema.SerializeResourceForHash(&buf, raw, dnsPolicyNetworksSchema()) - return hashcode(buf.String()) - }, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "network_url": flattenDNSPolicyNetworksNetworkUrl(original["networkUrl"], d, config), - }) - } - return transformed -} - -func flattenDNSPolicyNetworksNetworkUrl(v interface{}, d *resource_dns_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandDNSPolicyAlternativeNameServerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTargetNameServers, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServers(original["target_name_servers"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_policy_reflect.ValueOf(transformedTargetNameServers); val.IsValid() && !isEmptyValue(val) { - transformed["targetNameServers"] = transformedTargetNameServers - } - - return transformed, nil -} - -func expandDNSPolicyAlternativeNameServerConfigTargetNameServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_dns_policy_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpv4Address, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(original["ipv4_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_policy_reflect.ValueOf(transformedIpv4Address); val.IsValid() && !isEmptyValue(val) { - transformed["ipv4Address"] = transformedIpv4Address - } - - transformedForwardingPath, err := expandDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(original["forwarding_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_policy_reflect.ValueOf(transformedForwardingPath); val.IsValid() && !isEmptyValue(val) { - transformed["forwardingPath"] = transformedForwardingPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDNSPolicyAlternativeNameServerConfigTargetNameServersIpv4Address(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyAlternativeNameServerConfigTargetNameServersForwardingPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyEnableInboundForwarding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyEnableLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandDNSPolicyNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_dns_policy_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNetworkUrl, err := expandDNSPolicyNetworksNetworkUrl(original["network_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_dns_policy_reflect.ValueOf(transformedNetworkUrl); val.IsValid() && !isEmptyValue(val) { - transformed["networkUrl"] = transformedNetworkUrl - } - - req = append(req, transformed) - } - return req, nil -} - -func expandDNSPolicyNetworksNetworkUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || v.(string) == "" { - return "", nil - } else if resource_dns_policy_strings.HasPrefix(v.(string), "https://") { - return v, nil - } - url, err := replaceVars(d, config, "{{ComputeBasePath}}"+v.(string)) - if err != nil { - return "", err - } - return ConvertSelfLinkToV1(url), nil -} - -func rrdatasDnsDiffSuppress(k, old, new string, d *resource_dns_record_set_schema.ResourceData) bool { - o, n := d.GetChange("rrdatas") - if o == nil || n == nil { - return false - } - - oList := convertStringArr(o.([]interface{})) - nList := convertStringArr(n.([]interface{})) - - parseFunc := func(record string) string { - switch d.Get("type") { - case "AAAA": - - return resource_dns_record_set_net.ParseIP(record).String() - case "MX", "DS": - return resource_dns_record_set_strings.ToLower(record) - case "TXT": - return resource_dns_record_set_strings.ToLower(resource_dns_record_set_strings.Trim(record, `"`)) - default: - return record - } - } - return rrdatasListDiffSuppress(oList, nList, parseFunc, d) -} - -func rrdatasListDiffSuppress(oldList, newList []string, fun func(x string) string, _ *resource_dns_record_set_schema.ResourceData) bool { - - diff := make(map[string]bool, len(oldList)) - for _, oldRecord := range oldList { - - diff[fun(oldRecord)] = true - } - for _, newRecord := range newList { - - if diff[fun(newRecord)] { - diff[fun(newRecord)] = false - } else { - return false - } - } - - for _, element := range diff { - if element { - return false - } - } - return true -} - -func resourceDnsRecordSet() *resource_dns_record_set_schema.Resource { - return &resource_dns_record_set_schema.Resource{ - Create: resourceDnsRecordSetCreate, - Read: resourceDnsRecordSetRead, - Delete: resourceDnsRecordSetDelete, - Update: resourceDnsRecordSetUpdate, - Importer: &resource_dns_record_set_schema.ResourceImporter{ - State: resourceDnsRecordSetImportState, - }, - - Schema: map[string]*resource_dns_record_set_schema.Schema{ - "managed_zone": { - Type: resource_dns_record_set_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the zone in which this record set will reside.`, - }, - - "name": { - Type: resource_dns_record_set_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The DNS name this record set will apply to.`, - }, - - "rrdatas": { - Type: resource_dns_record_set_schema.TypeList, - Required: true, - Elem: &resource_dns_record_set_schema.Schema{ - Type: resource_dns_record_set_schema.TypeString, - }, - DiffSuppressFunc: rrdatasDnsDiffSuppress, - Description: `The string data for the records in this record set whose meaning depends on the DNS type. For TXT record, if the string data contains spaces, add surrounding \" if you don't want your string to get split on spaces. To specify a single record value longer than 255 characters such as a TXT record for DKIM, add \"\" inside the Terraform configuration string (e.g. "first255characters\"\"morecharacters").`, - }, - - "ttl": { - Type: resource_dns_record_set_schema.TypeInt, - Optional: true, - Description: `The time-to-live of this record set (seconds).`, - }, - - "type": { - Type: resource_dns_record_set_schema.TypeString, - Required: true, - Description: `The DNS record set type.`, - }, - - "project": { - Type: resource_dns_record_set_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceDnsRecordSetCreate(d *resource_dns_record_set_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - zone := d.Get("managed_zone").(string) - rType := d.Get("type").(string) - - chg := &resource_dns_record_set_dns.Change{ - Additions: []*resource_dns_record_set_dns.ResourceRecordSet{ - { - Name: name, - Type: rType, - Ttl: int64(d.Get("ttl").(int)), - Rrdatas: rrdata(d), - }, - }, - } - - resource_dns_record_set_log.Printf("[DEBUG] DNS record list request for %q", zone) - res, err := config.NewDnsClient(userAgent).ResourceRecordSets.List(project, zone).Do() - if err != nil { - return resource_dns_record_set_fmt.Errorf("Error retrieving record sets for %q: %s", zone, err) - } - var deletions []*resource_dns_record_set_dns.ResourceRecordSet - - for _, record := range res.Rrsets { - if record.Type != rType || record.Name != name { - continue - } - deletions = append(deletions, record) - } - if len(deletions) > 0 { - chg.Deletions = deletions - } - - resource_dns_record_set_log.Printf("[DEBUG] DNS Record create request: %#v", chg) - chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() - if err != nil { - return resource_dns_record_set_fmt.Errorf("Error creating DNS RecordSet: %s", err) - } - - d.SetId(resource_dns_record_set_fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s/%s", project, zone, name, rType)) - - w := &DnsChangeWaiter{ - Service: config.NewDnsClient(userAgent), - Change: chg, - Project: project, - ManagedZone: zone, - } - _, err = w.Conf().WaitForState() - if err != nil { - return resource_dns_record_set_fmt.Errorf("Error waiting for Google DNS change: %s", err) - } - - return resourceDnsRecordSetRead(d, meta) -} - -func resourceDnsRecordSetRead(d *resource_dns_record_set_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - - name := d.Get("name").(string) - dnsType := d.Get("type").(string) - - var resp *resource_dns_record_set_dns.ResourceRecordSetsListResponse - err = retry(func() error { - var reqErr error - resp, reqErr = config.NewDnsClient(userAgent).ResourceRecordSets.List( - project, zone).Name(name).Type(dnsType).Do() - return reqErr - }) - if err != nil { - return handleNotFoundError(err, d, resource_dns_record_set_fmt.Sprintf("DNS Record Set %q", d.Get("name").(string))) - } - if len(resp.Rrsets) == 0 { - - d.SetId("") - return nil - } - - if len(resp.Rrsets) > 1 { - return resource_dns_record_set_fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) - } - - if err := d.Set("type", resp.Rrsets[0].Type); err != nil { - return resource_dns_record_set_fmt.Errorf("Error setting type: %s", err) - } - if err := d.Set("ttl", resp.Rrsets[0].Ttl); err != nil { - return resource_dns_record_set_fmt.Errorf("Error setting ttl: %s", err) - } - if err := d.Set("rrdatas", resp.Rrsets[0].Rrdatas); err != nil { - return resource_dns_record_set_fmt.Errorf("Error setting rrdatas: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_dns_record_set_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceDnsRecordSetDelete(d *resource_dns_record_set_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - - if d.Get("type").(string) == "NS" { - mz, err := config.NewDnsClient(userAgent).ManagedZones.Get(project, zone).Do() - if err != nil { - return resource_dns_record_set_fmt.Errorf("Error retrieving managed zone %q from %q: %s", zone, project, err) - } - domain := mz.DnsName - - if domain == d.Get("name").(string) { - resource_dns_record_set_log.Println("[DEBUG] NS records can't be deleted due to API restrictions, so they're being left in place. See https://www.terraform.io/docs/providers/google/r/dns_record_set.html for more information.") - return nil - } - } - - chg := &resource_dns_record_set_dns.Change{ - Deletions: []*resource_dns_record_set_dns.ResourceRecordSet{ - { - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Ttl: int64(d.Get("ttl").(int)), - Rrdatas: rrdata(d), - }, - }, - } - - resource_dns_record_set_log.Printf("[DEBUG] DNS Record delete request: %#v", chg) - chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() - if err != nil { - return handleNotFoundError(err, d, "google_dns_record_set") - } - - w := &DnsChangeWaiter{ - Service: config.NewDnsClient(userAgent), - Change: chg, - Project: project, - ManagedZone: zone, - } - _, err = w.Conf().WaitForState() - if err != nil { - return resource_dns_record_set_fmt.Errorf("Error waiting for Google DNS change: %s", err) - } - - d.SetId("") - return nil -} - -func resourceDnsRecordSetUpdate(d *resource_dns_record_set_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - zone := d.Get("managed_zone").(string) - recordName := d.Get("name").(string) - - oldTtl, newTtl := d.GetChange("ttl") - oldType, newType := d.GetChange("type") - - oldCountRaw, _ := d.GetChange("rrdatas.#") - oldCount := oldCountRaw.(int) - - chg := &resource_dns_record_set_dns.Change{ - Deletions: []*resource_dns_record_set_dns.ResourceRecordSet{ - { - Name: recordName, - Type: oldType.(string), - Ttl: int64(oldTtl.(int)), - Rrdatas: make([]string, oldCount), - }, - }, - Additions: []*resource_dns_record_set_dns.ResourceRecordSet{ - { - Name: recordName, - Type: newType.(string), - Ttl: int64(newTtl.(int)), - Rrdatas: rrdata(d), - }, - }, - } - - for i := 0; i < oldCount; i++ { - rrKey := resource_dns_record_set_fmt.Sprintf("rrdatas.%d", i) - oldRR, _ := d.GetChange(rrKey) - chg.Deletions[0].Rrdatas[i] = oldRR.(string) - } - resource_dns_record_set_log.Printf("[DEBUG] DNS Record change request: %#v old: %#v new: %#v", chg, chg.Deletions[0], chg.Additions[0]) - chg, err = config.NewDnsClient(userAgent).Changes.Create(project, zone, chg).Do() - if err != nil { - return resource_dns_record_set_fmt.Errorf("Error changing DNS RecordSet: %s", err) - } - - w := &DnsChangeWaiter{ - Service: config.NewDnsClient(userAgent), - Change: chg, - Project: project, - ManagedZone: zone, - } - if _, err = w.Conf().WaitForState(); err != nil { - return resource_dns_record_set_fmt.Errorf("Error waiting for Google DNS change: %s", err) - } - - d.SetId(resource_dns_record_set_fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s/%s", project, zone, recordName, newType)) - - return resourceDnsRecordSetRead(d, meta) -} - -func resourceDnsRecordSetImportState(d *resource_dns_record_set_schema.ResourceData, meta interface{}) ([]*resource_dns_record_set_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/managedZones/(?P[^/]+)/rrsets/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/managedZones/{{managed_zone}}/rrsets/{{name}}/{{type}}") - if err != nil { - return nil, resource_dns_record_set_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_dns_record_set_schema.ResourceData{d}, nil -} - -func rrdata( - d *resource_dns_record_set_schema.ResourceData, -) []string { - rrdatasCount := d.Get("rrdatas.#").(int) - data := make([]string, rrdatasCount) - for i := 0; i < rrdatasCount; i++ { - data[i] = d.Get(resource_dns_record_set_fmt.Sprintf("rrdatas.%d", i)).(string) - } - return data -} - -func resourceEndpointsService() *resource_endpoints_service_schema.Resource { - return &resource_endpoints_service_schema.Resource{ - Create: resourceEndpointsServiceCreate, - Read: resourceEndpointsServiceRead, - Delete: resourceEndpointsServiceDelete, - Update: resourceEndpointsServiceUpdate, - - SchemaVersion: 1, - MigrateState: migrateEndpointsService, - - Timeouts: &resource_endpoints_service_schema.ResourceTimeout{ - Create: resource_endpoints_service_schema.DefaultTimeout(10 * resource_endpoints_service_time.Minute), - Update: resource_endpoints_service_schema.DefaultTimeout(10 * resource_endpoints_service_time.Minute), - Delete: resource_endpoints_service_schema.DefaultTimeout(10 * resource_endpoints_service_time.Minute), - }, - - Schema: map[string]*resource_endpoints_service_schema.Schema{ - "service_name": { - Type: resource_endpoints_service_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the service. Usually of the form $apiname.endpoints.$projectid.cloud.goog.`, - }, - "openapi_config": { - Type: resource_endpoints_service_schema.TypeString, - Optional: true, - ConflictsWith: []string{"grpc_config", "protoc_output_base64"}, - Description: `The full text of the OpenAPI YAML configuration as described here. Either this, or both of grpc_config and protoc_output_base64 must be specified.`, - }, - "grpc_config": { - Type: resource_endpoints_service_schema.TypeString, - Optional: true, - Description: `The full text of the Service Config YAML file (Example located here). If provided, must also provide protoc_output_base64. open_api config must not be provided.`, - }, - "protoc_output_base64": { - Type: resource_endpoints_service_schema.TypeString, - Optional: true, - Description: `The full contents of the Service Descriptor File generated by protoc. This should be a compiled .pb file, base64-encoded.`, - }, - "project": { - Type: resource_endpoints_service_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The project ID that the service belongs to. If not provided, provider project is used.`, - }, - "config_id": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The autogenerated ID for the configuration that is rolled out as part of the creation of this resource. Must be provided to compute engine instances as a tag.`, - }, - "apis": { - Type: resource_endpoints_service_schema.TypeList, - Computed: true, - Description: `A list of API objects.`, - Elem: &resource_endpoints_service_schema.Resource{ - Schema: map[string]*resource_endpoints_service_schema.Schema{ - "name": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The FQDN of the API as described in the provided config.`, - }, - "syntax": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `SYNTAX_PROTO2 or SYNTAX_PROTO3.`, - }, - "version": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `A version string for this api. If specified, will have the form major-version.minor-version, e.g. 1.10.`, - }, - "methods": { - Type: resource_endpoints_service_schema.TypeList, - Computed: true, - Description: `A list of Method objects.`, - Elem: &resource_endpoints_service_schema.Resource{ - Schema: map[string]*resource_endpoints_service_schema.Schema{ - "name": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The simple name of this method as described in the provided config.`, - }, - "syntax": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `SYNTAX_PROTO2 or SYNTAX_PROTO3.`, - }, - "request_type": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The type URL for the request to this API.`, - }, - "response_type": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The type URL for the response from this API.`, - }, - }, - }, - }, - }, - }, - }, - "dns_address": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The address at which the service can be found - usually the same as the service name.`, - }, - "endpoints": { - Type: resource_endpoints_service_schema.TypeList, - Computed: true, - Description: `A list of Endpoint objects.`, - Elem: &resource_endpoints_service_schema.Resource{ - Schema: map[string]*resource_endpoints_service_schema.Schema{ - "name": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The simple name of the endpoint as described in the config.`, - }, - "address": { - Type: resource_endpoints_service_schema.TypeString, - Computed: true, - Description: `The FQDN of the endpoint as described in the config.`, - }, - }, - }, - }, - }, - CustomizeDiff: predictServiceId, - UseJSONNumber: true, - } -} - -func predictServiceId(_ resource_endpoints_service_context.Context, d *resource_endpoints_service_schema.ResourceDiff, meta interface{}) error { - if !d.HasChange("openapi_config") && !d.HasChange("grpc_config") && !d.HasChange("protoc_output_base64") { - return nil - } - baseDate := resource_endpoints_service_time.Now().Format("2006-01-02") - oldConfigId := d.Get("config_id").(string) - if match, err := resource_endpoints_service_regexp.MatchString(`\d\d\d\d-\d\d-\d\dr\d*`, oldConfigId); !match || err != nil { - - return nil - } - if resource_endpoints_service_strings.HasPrefix(oldConfigId, baseDate) { - n, err := resource_endpoints_service_strconv.Atoi(resource_endpoints_service_strings.Split(oldConfigId, "r")[1]) - if err != nil { - return err - } - if err := d.SetNew("config_id", resource_endpoints_service_fmt.Sprintf("%sr%d", baseDate, n+1)); err != nil { - return err - } - } else { - if err := d.SetNew("config_id", baseDate+"r0"); err != nil { - return err - } - } - return nil -} - -func getEndpointServiceOpenAPIConfigSource(configText string) *resource_endpoints_service_servicemanagement.ConfigSource { - - configfile := resource_endpoints_service_servicemanagement.ConfigFile{ - FileContents: resource_endpoints_service_base64.StdEncoding.EncodeToString([]byte(configText)), - FileType: "OPEN_API_YAML", - FilePath: "heredoc.yaml", - } - return &resource_endpoints_service_servicemanagement.ConfigSource{ - Files: []*resource_endpoints_service_servicemanagement.ConfigFile{&configfile}, - } -} - -func getEndpointServiceGRPCConfigSource(serviceConfig, protoConfig string) *resource_endpoints_service_servicemanagement.ConfigSource { - - ymlConfigfile := resource_endpoints_service_servicemanagement.ConfigFile{ - FileContents: resource_endpoints_service_base64.StdEncoding.EncodeToString([]byte(serviceConfig)), - FileType: "SERVICE_CONFIG_YAML", - FilePath: "heredoc.yaml", - } - protoConfigfile := resource_endpoints_service_servicemanagement.ConfigFile{ - FileContents: protoConfig, - FileType: "FILE_DESCRIPTOR_SET_PROTO", - FilePath: "api_def.pb", - } - return &resource_endpoints_service_servicemanagement.ConfigSource{ - Files: []*resource_endpoints_service_servicemanagement.ConfigFile{&ymlConfigfile, &protoConfigfile}, - } -} - -func resourceEndpointsServiceCreate(d *resource_endpoints_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - serviceName := d.Get("service_name").(string) - resource_endpoints_service_log.Printf("[DEBUG] Create Endpoint Service %q", serviceName) - - resource_endpoints_service_log.Printf("[DEBUG] Checking for existing ManagedService %q", serviceName) - _, err = config.NewServiceManClient(userAgent).Services.Get(serviceName).Do() - if err != nil { - resource_endpoints_service_log.Printf("[DEBUG] Creating new ServiceManagement ManagedService %q", serviceName) - op, err := config.NewServiceManClient(userAgent).Services.Create( - &resource_endpoints_service_servicemanagement.ManagedService{ - ProducerProjectId: project, - ServiceName: serviceName, - }).Do() - if err != nil { - return err - } - - _, err = serviceManagementOperationWaitTime(config, op, "Creating new ManagedService.", userAgent, d.Timeout(resource_endpoints_service_schema.TimeoutCreate)) - if err != nil { - return err - } - } - - err = resourceEndpointsServiceUpdate(d, meta) - if err != nil { - return err - } - - d.SetId(serviceName) - return resourceEndpointsServiceRead(d, meta) -} - -func expandEndpointServiceConfigSource(d *resource_endpoints_service_schema.ResourceData, meta interface{}) (*resource_endpoints_service_servicemanagement.ConfigSource, error) { - if openapiConfig, ok := d.GetOk("openapi_config"); ok { - return getEndpointServiceOpenAPIConfigSource(openapiConfig.(string)), nil - } - - grpcConfig, gok := d.GetOk("grpc_config") - protocOutput, pok := d.GetOk("protoc_output_base64") - if gok && pok { - return getEndpointServiceGRPCConfigSource(grpcConfig.(string), protocOutput.(string)), nil - } - - return nil, resource_endpoints_service_errors.New("Could not parse config - either openapi_config or both grpc_config and protoc_output_base64 must be set.") -} - -func resourceEndpointsServiceUpdate(d *resource_endpoints_service_schema.ResourceData, meta interface{}) error { - - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - serviceName := d.Get("service_name").(string) - - resource_endpoints_service_log.Printf("[DEBUG] Updating ManagedService %q", serviceName) - - cfgSource, err := expandEndpointServiceConfigSource(d, meta) - if err != nil { - return err - } - - resource_endpoints_service_log.Printf("[DEBUG] Updating ManagedService %q", serviceName) - - resource_endpoints_service_log.Printf("[DEBUG] Submitting config for ManagedService %q", serviceName) - op, err := config.NewServiceManClient(userAgent).Services.Configs.Submit( - serviceName, - &resource_endpoints_service_servicemanagement.SubmitConfigSourceRequest{ - ConfigSource: cfgSource, - }).Do() - if err != nil { - return err - } - s, err := serviceManagementOperationWaitTime(config, op, "Submitting service config.", userAgent, d.Timeout(resource_endpoints_service_schema.TimeoutUpdate)) - if err != nil { - return err - } - var serviceConfig resource_endpoints_service_servicemanagement.SubmitConfigSourceResponse - if err := resource_endpoints_service_json.Unmarshal(s, &serviceConfig); err != nil { - return err - } - - rollout := resource_endpoints_service_servicemanagement.Rollout{ - ServiceName: serviceName, - TrafficPercentStrategy: &resource_endpoints_service_servicemanagement.TrafficPercentStrategy{ - Percentages: map[string]float64{serviceConfig.ServiceConfig.Id: 100.0}, - }, - } - - resource_endpoints_service_log.Printf("[DEBUG] Creating new rollout for ManagedService %q", serviceName) - op, err = config.NewServiceManClient(userAgent).Services.Rollouts.Create(serviceName, &rollout).Do() - if err != nil { - return err - } - _, err = serviceManagementOperationWaitTime(config, op, "Performing service rollout.", userAgent, d.Timeout(resource_endpoints_service_schema.TimeoutUpdate)) - if err != nil { - return err - } - - return resourceEndpointsServiceRead(d, meta) -} - -func resourceEndpointsServiceDelete(d *resource_endpoints_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - resource_endpoints_service_log.Printf("[DEBUG] Deleting ManagedService %q", d.Id()) - - op, err := config.NewServiceManClient(userAgent).Services.Delete(d.Get("service_name").(string)).Do() - if err != nil { - return err - } - _, err = serviceManagementOperationWaitTime(config, op, "Deleting service.", userAgent, d.Timeout(resource_endpoints_service_schema.TimeoutDelete)) - d.SetId("") - return err -} - -func resourceEndpointsServiceRead(d *resource_endpoints_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - resource_endpoints_service_log.Printf("[DEBUG] Reading ManagedService %q", d.Id()) - - service, err := config.NewServiceManClient(userAgent).Services.GetConfig(d.Get("service_name").(string)).Do() - if err != nil { - return err - } - - if err := d.Set("config_id", service.Id); err != nil { - return resource_endpoints_service_fmt.Errorf("Error setting config_id: %s", err) - } - if err := d.Set("dns_address", service.Name); err != nil { - return resource_endpoints_service_fmt.Errorf("Error setting dns_address: %s", err) - } - if err := d.Set("apis", flattenServiceManagementAPIs(service.Apis)); err != nil { - return resource_endpoints_service_fmt.Errorf("Error setting apis: %s", err) - } - if err := d.Set("endpoints", flattenServiceManagementEndpoints(service.Endpoints)); err != nil { - return resource_endpoints_service_fmt.Errorf("Error setting endpoints: %s", err) - } - - return nil -} - -func flattenServiceManagementAPIs(apis []*resource_endpoints_service_servicemanagement.Api) []map[string]interface{} { - flattened := make([]map[string]interface{}, len(apis)) - for i, a := range apis { - flattened[i] = map[string]interface{}{ - "name": a.Name, - "version": a.Version, - "syntax": a.Syntax, - "methods": flattenServiceManagementMethods(a.Methods), - } - } - return flattened -} - -func flattenServiceManagementMethods(methods []*resource_endpoints_service_servicemanagement.Method) []map[string]interface{} { - flattened := make([]map[string]interface{}, len(methods)) - for i, m := range methods { - flattened[i] = map[string]interface{}{ - "name": m.Name, - "syntax": m.Syntax, - "request_type": m.RequestTypeUrl, - "response_type": m.ResponseTypeUrl, - } - } - return flattened -} - -func flattenServiceManagementEndpoints(endpoints []*resource_endpoints_service_servicemanagement.Endpoint) []map[string]interface{} { - flattened := make([]map[string]interface{}, len(endpoints)) - for i, e := range endpoints { - flattened[i] = map[string]interface{}{ - "name": e.Name, - "address": e.Target, - } - } - return flattened -} - -func migrateEndpointsService(v int, is *resource_endpoints_service_migration_terraform.InstanceState, meta interface{}) (*resource_endpoints_service_migration_terraform.InstanceState, error) { - switch v { - case 0: - if is.Attributes["protoc_output"] == "" { - resource_endpoints_service_migration_log.Println("[DEBUG] Nothing to migrate to V1.") - return is, nil - } - is.Attributes["protoc_output_base64"] = resource_endpoints_service_migration_base64.StdEncoding.EncodeToString([]byte(is.Attributes["protoc_output"])) - is.Attributes["protoc_output"] = "" - return is, nil - default: - return nil, resource_endpoints_service_migration_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func resourceEssentialContactsContact() *resource_essential_contacts_contact_schema.Resource { - return &resource_essential_contacts_contact_schema.Resource{ - Create: resourceEssentialContactsContactCreate, - Read: resourceEssentialContactsContactRead, - Update: resourceEssentialContactsContactUpdate, - Delete: resourceEssentialContactsContactDelete, - - Importer: &resource_essential_contacts_contact_schema.ResourceImporter{ - State: resourceEssentialContactsContactImport, - }, - - Timeouts: &resource_essential_contacts_contact_schema.ResourceTimeout{ - Create: resource_essential_contacts_contact_schema.DefaultTimeout(4 * resource_essential_contacts_contact_time.Minute), - Update: resource_essential_contacts_contact_schema.DefaultTimeout(4 * resource_essential_contacts_contact_time.Minute), - Delete: resource_essential_contacts_contact_schema.DefaultTimeout(4 * resource_essential_contacts_contact_time.Minute), - }, - - Schema: map[string]*resource_essential_contacts_contact_schema.Schema{ - "email": { - Type: resource_essential_contacts_contact_schema.TypeString, - Required: true, - Description: `The email address to send notifications to. This does not need to be a Google account.`, - }, - "language_tag": { - Type: resource_essential_contacts_contact_schema.TypeString, - Required: true, - Description: `The preferred language for notifications, as a ISO 639-1 language code. See Supported languages for a list of supported languages.`, - }, - "notification_category_subscriptions": { - Type: resource_essential_contacts_contact_schema.TypeList, - Required: true, - Description: `The categories of notifications that the contact will receive communications for.`, - Elem: &resource_essential_contacts_contact_schema.Schema{ - Type: resource_essential_contacts_contact_schema.TypeString, - }, - }, - "parent": { - Type: resource_essential_contacts_contact_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource to save this contact for. Format: organizations/{organization_id}, folders/{folder_id} or projects/{project_id}`, - }, - "name": { - Type: resource_essential_contacts_contact_schema.TypeString, - Computed: true, - Description: `The identifier for the contact. Format: {resourceType}/{resource_id}/contacts/{contact_id}`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceEssentialContactsContactCreate(d *resource_essential_contacts_contact_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - emailProp, err := expandEssentialContactsContactEmail(d.Get("email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("email"); !isEmptyValue(resource_essential_contacts_contact_reflect.ValueOf(emailProp)) && (ok || !resource_essential_contacts_contact_reflect.DeepEqual(v, emailProp)) { - obj["email"] = emailProp - } - notificationCategorySubscriptionsProp, err := expandEssentialContactsContactNotificationCategorySubscriptions(d.Get("notification_category_subscriptions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_category_subscriptions"); !isEmptyValue(resource_essential_contacts_contact_reflect.ValueOf(notificationCategorySubscriptionsProp)) && (ok || !resource_essential_contacts_contact_reflect.DeepEqual(v, notificationCategorySubscriptionsProp)) { - obj["notificationCategorySubscriptions"] = notificationCategorySubscriptionsProp - } - languageTagProp, err := expandEssentialContactsContactLanguageTag(d.Get("language_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_tag"); !isEmptyValue(resource_essential_contacts_contact_reflect.ValueOf(languageTagProp)) && (ok || !resource_essential_contacts_contact_reflect.DeepEqual(v, languageTagProp)) { - obj["languageTag"] = languageTagProp - } - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{parent}}/contacts") - if err != nil { - return err - } - - resource_essential_contacts_contact_log.Printf("[DEBUG] Creating new Contact: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_essential_contacts_contact_schema.TimeoutCreate)) - if err != nil { - return resource_essential_contacts_contact_fmt.Errorf("Error creating Contact: %s", err) - } - if err := d.Set("name", flattenEssentialContactsContactName(res["name"], d, config)); err != nil { - return resource_essential_contacts_contact_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_essential_contacts_contact_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_essential_contacts_contact_log.Printf("[DEBUG] Finished creating Contact %q: %#v", d.Id(), res) - - return resourceEssentialContactsContactRead(d, meta) -} - -func resourceEssentialContactsContactRead(d *resource_essential_contacts_contact_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_essential_contacts_contact_fmt.Sprintf("EssentialContactsContact %q", d.Id())) - } - - if err := d.Set("name", flattenEssentialContactsContactName(res["name"], d, config)); err != nil { - return resource_essential_contacts_contact_fmt.Errorf("Error reading Contact: %s", err) - } - if err := d.Set("email", flattenEssentialContactsContactEmail(res["email"], d, config)); err != nil { - return resource_essential_contacts_contact_fmt.Errorf("Error reading Contact: %s", err) - } - if err := d.Set("notification_category_subscriptions", flattenEssentialContactsContactNotificationCategorySubscriptions(res["notificationCategorySubscriptions"], d, config)); err != nil { - return resource_essential_contacts_contact_fmt.Errorf("Error reading Contact: %s", err) - } - if err := d.Set("language_tag", flattenEssentialContactsContactLanguageTag(res["languageTag"], d, config)); err != nil { - return resource_essential_contacts_contact_fmt.Errorf("Error reading Contact: %s", err) - } - - return nil -} - -func resourceEssentialContactsContactUpdate(d *resource_essential_contacts_contact_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - emailProp, err := expandEssentialContactsContactEmail(d.Get("email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("email"); !isEmptyValue(resource_essential_contacts_contact_reflect.ValueOf(v)) && (ok || !resource_essential_contacts_contact_reflect.DeepEqual(v, emailProp)) { - obj["email"] = emailProp - } - notificationCategorySubscriptionsProp, err := expandEssentialContactsContactNotificationCategorySubscriptions(d.Get("notification_category_subscriptions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_category_subscriptions"); !isEmptyValue(resource_essential_contacts_contact_reflect.ValueOf(v)) && (ok || !resource_essential_contacts_contact_reflect.DeepEqual(v, notificationCategorySubscriptionsProp)) { - obj["notificationCategorySubscriptions"] = notificationCategorySubscriptionsProp - } - languageTagProp, err := expandEssentialContactsContactLanguageTag(d.Get("language_tag"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("language_tag"); !isEmptyValue(resource_essential_contacts_contact_reflect.ValueOf(v)) && (ok || !resource_essential_contacts_contact_reflect.DeepEqual(v, languageTagProp)) { - obj["languageTag"] = languageTagProp - } - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") - if err != nil { - return err - } - - resource_essential_contacts_contact_log.Printf("[DEBUG] Updating Contact %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("email") { - updateMask = append(updateMask, "email") - } - - if d.HasChange("notification_category_subscriptions") { - updateMask = append(updateMask, "notificationCategorySubscriptions") - } - - if d.HasChange("language_tag") { - updateMask = append(updateMask, "languageTag") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_essential_contacts_contact_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_essential_contacts_contact_schema.TimeoutUpdate)) - - if err != nil { - return resource_essential_contacts_contact_fmt.Errorf("Error updating Contact %q: %s", d.Id(), err) - } else { - resource_essential_contacts_contact_log.Printf("[DEBUG] Finished updating Contact %q: %#v", d.Id(), res) - } - - return resourceEssentialContactsContactRead(d, meta) -} - -func resourceEssentialContactsContactDelete(d *resource_essential_contacts_contact_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{EssentialContactsBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_essential_contacts_contact_log.Printf("[DEBUG] Deleting Contact %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_essential_contacts_contact_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Contact") - } - - resource_essential_contacts_contact_log.Printf("[DEBUG] Finished deleting Contact %q: %#v", d.Id(), res) - return nil -} - -func resourceEssentialContactsContactImport(d *resource_essential_contacts_contact_schema.ResourceData, meta interface{}) ([]*resource_essential_contacts_contact_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, resource_essential_contacts_contact_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_essential_contacts_contact_schema.ResourceData{d}, nil -} - -func flattenEssentialContactsContactName(v interface{}, d *resource_essential_contacts_contact_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenEssentialContactsContactEmail(v interface{}, d *resource_essential_contacts_contact_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenEssentialContactsContactNotificationCategorySubscriptions(v interface{}, d *resource_essential_contacts_contact_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenEssentialContactsContactLanguageTag(v interface{}, d *resource_essential_contacts_contact_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandEssentialContactsContactEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandEssentialContactsContactNotificationCategorySubscriptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandEssentialContactsContactLanguageTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceEventarcTrigger() *resource_eventarc_trigger_schema.Resource { - return &resource_eventarc_trigger_schema.Resource{ - Create: resourceEventarcTriggerCreate, - Read: resourceEventarcTriggerRead, - Update: resourceEventarcTriggerUpdate, - Delete: resourceEventarcTriggerDelete, - - Importer: &resource_eventarc_trigger_schema.ResourceImporter{ - State: resourceEventarcTriggerImport, - }, - - Timeouts: &resource_eventarc_trigger_schema.ResourceTimeout{ - Create: resource_eventarc_trigger_schema.DefaultTimeout(10 * resource_eventarc_trigger_time.Minute), - Update: resource_eventarc_trigger_schema.DefaultTimeout(10 * resource_eventarc_trigger_time.Minute), - Delete: resource_eventarc_trigger_schema.DefaultTimeout(10 * resource_eventarc_trigger_time.Minute), - }, - - Schema: map[string]*resource_eventarc_trigger_schema.Schema{ - "destination": { - Type: resource_eventarc_trigger_schema.TypeList, - Required: true, - Description: "Required. Destination specifies where the events should be sent to.", - MaxItems: 1, - Elem: EventarcTriggerDestinationSchema(), - }, - - "location": { - Type: resource_eventarc_trigger_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The location for the resource", - }, - - "matching_criteria": { - Type: resource_eventarc_trigger_schema.TypeSet, - Required: true, - Description: "Required. null The list of filters that applies to event attributes. Only events that match all the provided filters will be sent to the destination.", - Elem: EventarcTriggerMatchingCriteriaSchema(), - Set: resource_eventarc_trigger_schema.HashResource(EventarcTriggerMatchingCriteriaSchema()), - }, - - "name": { - Type: resource_eventarc_trigger_schema.TypeString, - Required: true, - Description: "Required. The resource name of the trigger. Must be unique within the location on the project and must be in `projects/{project}/locations/{location}/triggers/{trigger}` format.", - }, - - "labels": { - Type: resource_eventarc_trigger_schema.TypeMap, - Optional: true, - Description: "Optional. User labels attached to the triggers that can be used to group resources.", - Elem: &resource_eventarc_trigger_schema.Schema{Type: resource_eventarc_trigger_schema.TypeString}, - }, - - "project": { - Type: resource_eventarc_trigger_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - - "service_account": { - Type: resource_eventarc_trigger_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Optional. The IAM service account email associated with the trigger. The service account represents the identity of the trigger. The principal who calls this API must have `iam.serviceAccounts.actAs` permission in the service account. See https://cloud.google.com/iam/docs/understanding-service-accounts?hl=en#sa_common for more information. For Cloud Run destinations, this service account is used to generate identity tokens when invoking the service. See https://cloud.google.com/run/docs/triggering/pubsub-push#create-service-account for information on how to invoke authenticated Cloud Run services. In order to create Audit Log triggers, the service account should also have `roles/eventarc.eventReceiver` IAM role.", - }, - - "transport": { - Type: resource_eventarc_trigger_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: "Optional. In order to deliver messages, Eventarc may use other GCP products as transport intermediary. This field contains a reference to that transport intermediary. This information can be used for debugging purposes.", - MaxItems: 1, - Elem: EventarcTriggerTransportSchema(), - }, - - "create_time": { - Type: resource_eventarc_trigger_schema.TypeString, - Computed: true, - Description: "Output only. The creation time.", - }, - - "etag": { - Type: resource_eventarc_trigger_schema.TypeString, - Computed: true, - Description: "Output only. This checksum is computed by the server based on the value of other fields, and may be sent only on create requests to ensure the client has an up-to-date value before proceeding.", - }, - - "uid": { - Type: resource_eventarc_trigger_schema.TypeString, - Computed: true, - Description: "Output only. Server assigned unique identifier for the trigger. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted.", - }, - - "update_time": { - Type: resource_eventarc_trigger_schema.TypeString, - Computed: true, - Description: "Output only. The last-modified time.", - }, - }, - } -} - -func EventarcTriggerDestinationSchema() *resource_eventarc_trigger_schema.Resource { - return &resource_eventarc_trigger_schema.Resource{ - Schema: map[string]*resource_eventarc_trigger_schema.Schema{ - "cloud_function": { - Type: resource_eventarc_trigger_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The Cloud Function resource name. Only Cloud Functions V2 is supported. Format: projects/{project}/locations/{location}/functions/{function}", - }, - - "cloud_run_service": { - Type: resource_eventarc_trigger_schema.TypeList, - Optional: true, - Description: "Cloud Run fully-managed service that receives the events. The service should be running in the same project of the trigger.", - MaxItems: 1, - Elem: EventarcTriggerDestinationCloudRunServiceSchema(), - }, - }, - } -} - -func EventarcTriggerDestinationCloudRunServiceSchema() *resource_eventarc_trigger_schema.Resource { - return &resource_eventarc_trigger_schema.Resource{ - Schema: map[string]*resource_eventarc_trigger_schema.Schema{ - "service": { - Type: resource_eventarc_trigger_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "Required. The name of the Cloud Run service being addressed. See https://cloud.google.com/run/docs/reference/rest/v1/namespaces.services. Only services located in the same project of the trigger object can be addressed.", - }, - - "path": { - Type: resource_eventarc_trigger_schema.TypeString, - Optional: true, - Description: "Optional. The relative path on the Cloud Run service the events should be sent to. The value must conform to the definition of URI path segment (section 3.3 of RFC2396). Examples: \"/route\", \"route\", \"route/subroute\".", - }, - - "region": { - Type: resource_eventarc_trigger_schema.TypeString, - Computed: true, - Optional: true, - Description: "Required. The region the Cloud Run service is deployed in.", - }, - }, - } -} - -func EventarcTriggerMatchingCriteriaSchema() *resource_eventarc_trigger_schema.Resource { - return &resource_eventarc_trigger_schema.Resource{ - Schema: map[string]*resource_eventarc_trigger_schema.Schema{ - "attribute": { - Type: resource_eventarc_trigger_schema.TypeString, - Required: true, - Description: "Required. The name of a CloudEvents attribute. Currently, only a subset of attributes are supported for filtering. All triggers MUST provide a filter for the 'type' attribute.", - }, - - "value": { - Type: resource_eventarc_trigger_schema.TypeString, - Required: true, - Description: "Required. The value for the attribute.", - }, - }, - } -} - -func EventarcTriggerTransportSchema() *resource_eventarc_trigger_schema.Resource { - return &resource_eventarc_trigger_schema.Resource{ - Schema: map[string]*resource_eventarc_trigger_schema.Schema{ - "pubsub": { - Type: resource_eventarc_trigger_schema.TypeList, - Optional: true, - ForceNew: true, - Description: "The Pub/Sub topic and subscription used by Eventarc as delivery intermediary.", - MaxItems: 1, - Elem: EventarcTriggerTransportPubsubSchema(), - }, - }, - } -} - -func EventarcTriggerTransportPubsubSchema() *resource_eventarc_trigger_schema.Resource { - return &resource_eventarc_trigger_schema.Resource{ - Schema: map[string]*resource_eventarc_trigger_schema.Schema{ - "topic": { - Type: resource_eventarc_trigger_schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Optional. The name of the Pub/Sub topic created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/topics/{TOPIC_NAME You may set an existing topic for triggers of the type google.cloud.pubsub.topic.v1.messagePublished` only. The topic you provide here will not be deleted by Eventarc at trigger deletion.", - }, - - "subscription": { - Type: resource_eventarc_trigger_schema.TypeString, - Computed: true, - Description: "Output only. The name of the Pub/Sub subscription created and managed by Eventarc system as a transport for the event delivery. Format: `projects/{PROJECT_ID}/subscriptions/{SUBSCRIPTION_NAME}`.", - }, - }, - } -} - -func resourceEventarcTriggerCreate(d *resource_eventarc_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_eventarc_trigger_eventarceventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: resource_eventarc_trigger_dcldcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: resource_eventarc_trigger_dcldcl.String(d.Get("name").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: resource_eventarc_trigger_dcldcl.String(project), - ServiceAccount: resource_eventarc_trigger_dcldcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") - if err != nil { - return resource_eventarc_trigger_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(resource_eventarc_trigger_schema.TimeoutCreate)) - res, err := client.ApplyTrigger(resource_eventarc_trigger_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_eventarc_trigger_dcldcl.DiffAfterApplyError); ok { - resource_eventarc_trigger_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_eventarc_trigger_fmt.Errorf("Error creating Trigger: %s", err) - } - - resource_eventarc_trigger_log.Printf("[DEBUG] Finished creating Trigger %q: %#v", d.Id(), res) - - return resourceEventarcTriggerRead(d, meta) -} - -func resourceEventarcTriggerRead(d *resource_eventarc_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_eventarc_trigger_eventarceventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: resource_eventarc_trigger_dcldcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: resource_eventarc_trigger_dcldcl.String(d.Get("name").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: resource_eventarc_trigger_dcldcl.String(project), - ServiceAccount: resource_eventarc_trigger_dcldcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(resource_eventarc_trigger_schema.TimeoutRead)) - res, err := client.GetTrigger(resource_eventarc_trigger_context.Background(), obj) - if err != nil { - resourceName := resource_eventarc_trigger_fmt.Sprintf("EventarcTrigger %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("destination", flattenEventarcTriggerDestination(res.Destination)); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting destination in state: %s", err) - } - if err = d.Set("location", res.Location); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting location in state: %s", err) - } - if err = d.Set("matching_criteria", flattenEventarcTriggerMatchingCriteriaArray(res.MatchingCriteria)); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting matching_criteria in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("service_account", res.ServiceAccount); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting service_account in state: %s", err) - } - if err = d.Set("transport", flattenEventarcTriggerTransport(res.Transport)); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting transport in state: %s", err) - } - if err = d.Set("create_time", res.CreateTime); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting create_time in state: %s", err) - } - if err = d.Set("etag", res.Etag); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting etag in state: %s", err) - } - if err = d.Set("uid", res.Uid); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting uid in state: %s", err) - } - if err = d.Set("update_time", res.UpdateTime); err != nil { - return resource_eventarc_trigger_fmt.Errorf("error setting update_time in state: %s", err) - } - - return nil -} - -func resourceEventarcTriggerUpdate(d *resource_eventarc_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_eventarc_trigger_eventarceventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: resource_eventarc_trigger_dcldcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: resource_eventarc_trigger_dcldcl.String(d.Get("name").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: resource_eventarc_trigger_dcldcl.String(project), - ServiceAccount: resource_eventarc_trigger_dcldcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(resource_eventarc_trigger_schema.TimeoutUpdate)) - res, err := client.ApplyTrigger(resource_eventarc_trigger_context.Background(), obj, directive...) - - if _, ok := err.(resource_eventarc_trigger_dcldcl.DiffAfterApplyError); ok { - resource_eventarc_trigger_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_eventarc_trigger_fmt.Errorf("Error updating Trigger: %s", err) - } - - resource_eventarc_trigger_log.Printf("[DEBUG] Finished creating Trigger %q: %#v", d.Id(), res) - - return resourceEventarcTriggerRead(d, meta) -} - -func resourceEventarcTriggerDelete(d *resource_eventarc_trigger_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_eventarc_trigger_eventarceventarc.Trigger{ - Destination: expandEventarcTriggerDestination(d.Get("destination")), - Location: resource_eventarc_trigger_dcldcl.String(d.Get("location").(string)), - MatchingCriteria: expandEventarcTriggerMatchingCriteriaArray(d.Get("matching_criteria")), - Name: resource_eventarc_trigger_dcldcl.String(d.Get("name").(string)), - Labels: checkStringMap(d.Get("labels")), - Project: resource_eventarc_trigger_dcldcl.String(project), - ServiceAccount: resource_eventarc_trigger_dcldcl.String(d.Get("service_account").(string)), - Transport: expandEventarcTriggerTransport(d.Get("transport")), - } - - resource_eventarc_trigger_log.Printf("[DEBUG] Deleting Trigger %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLEventarcClient(config, userAgent, billingProject, d.Timeout(resource_eventarc_trigger_schema.TimeoutDelete)) - if err := client.DeleteTrigger(resource_eventarc_trigger_context.Background(), obj); err != nil { - return resource_eventarc_trigger_fmt.Errorf("Error deleting Trigger: %s", err) - } - - resource_eventarc_trigger_log.Printf("[DEBUG] Finished deleting Trigger %q", d.Id()) - return nil -} - -func resourceEventarcTriggerImport(d *resource_eventarc_trigger_schema.ResourceData, meta interface{}) ([]*resource_eventarc_trigger_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/triggers/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/triggers/{{name}}") - if err != nil { - return nil, resource_eventarc_trigger_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_eventarc_trigger_schema.ResourceData{d}, nil -} - -func expandEventarcTriggerDestination(o interface{}) *resource_eventarc_trigger_eventarceventarc.TriggerDestination { - if o == nil { - return resource_eventarc_trigger_eventarceventarc.EmptyTriggerDestination - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_eventarc_trigger_eventarceventarc.EmptyTriggerDestination - } - obj := objArr[0].(map[string]interface{}) - return &resource_eventarc_trigger_eventarceventarc.TriggerDestination{ - CloudFunction: resource_eventarc_trigger_dcldcl.String(obj["cloud_function"].(string)), - CloudRunService: expandEventarcTriggerDestinationCloudRunService(obj["cloud_run_service"]), - } -} - -func flattenEventarcTriggerDestination(obj *resource_eventarc_trigger_eventarceventarc.TriggerDestination) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "cloud_function": obj.CloudFunction, - "cloud_run_service": flattenEventarcTriggerDestinationCloudRunService(obj.CloudRunService), - } - - return []interface{}{transformed} - -} - -func expandEventarcTriggerDestinationCloudRunService(o interface{}) *resource_eventarc_trigger_eventarceventarc.TriggerDestinationCloudRunService { - if o == nil { - return resource_eventarc_trigger_eventarceventarc.EmptyTriggerDestinationCloudRunService - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_eventarc_trigger_eventarceventarc.EmptyTriggerDestinationCloudRunService - } - obj := objArr[0].(map[string]interface{}) - return &resource_eventarc_trigger_eventarceventarc.TriggerDestinationCloudRunService{ - Service: resource_eventarc_trigger_dcldcl.String(obj["service"].(string)), - Path: resource_eventarc_trigger_dcldcl.String(obj["path"].(string)), - Region: resource_eventarc_trigger_dcldcl.StringOrNil(obj["region"].(string)), - } -} - -func flattenEventarcTriggerDestinationCloudRunService(obj *resource_eventarc_trigger_eventarceventarc.TriggerDestinationCloudRunService) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "service": obj.Service, - "path": obj.Path, - "region": obj.Region, - } - - return []interface{}{transformed} - -} - -func expandEventarcTriggerMatchingCriteriaArray(o interface{}) []resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria { - if o == nil { - return make([]resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria, 0) - } - - o = o.(*resource_eventarc_trigger_schema.Set).List() - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria, 0) - } - - items := make([]resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria, 0, len(objs)) - for _, item := range objs { - i := expandEventarcTriggerMatchingCriteria(item) - items = append(items, *i) - } - - return items -} - -func expandEventarcTriggerMatchingCriteria(o interface{}) *resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria { - if o == nil { - return resource_eventarc_trigger_eventarceventarc.EmptyTriggerMatchingCriteria - } - - obj := o.(map[string]interface{}) - return &resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria{ - Attribute: resource_eventarc_trigger_dcldcl.String(obj["attribute"].(string)), - Value: resource_eventarc_trigger_dcldcl.String(obj["value"].(string)), - } -} - -func flattenEventarcTriggerMatchingCriteriaArray(objs []resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenEventarcTriggerMatchingCriteria(&item) - items = append(items, i) - } - - return items -} - -func flattenEventarcTriggerMatchingCriteria(obj *resource_eventarc_trigger_eventarceventarc.TriggerMatchingCriteria) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "attribute": obj.Attribute, - "value": obj.Value, - } - - return transformed - -} - -func expandEventarcTriggerTransport(o interface{}) *resource_eventarc_trigger_eventarceventarc.TriggerTransport { - if o == nil { - return nil - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return nil - } - obj := objArr[0].(map[string]interface{}) - return &resource_eventarc_trigger_eventarceventarc.TriggerTransport{ - Pubsub: expandEventarcTriggerTransportPubsub(obj["pubsub"]), - } -} - -func flattenEventarcTriggerTransport(obj *resource_eventarc_trigger_eventarceventarc.TriggerTransport) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "pubsub": flattenEventarcTriggerTransportPubsub(obj.Pubsub), - } - - return []interface{}{transformed} - -} - -func expandEventarcTriggerTransportPubsub(o interface{}) *resource_eventarc_trigger_eventarceventarc.TriggerTransportPubsub { - if o == nil { - return resource_eventarc_trigger_eventarceventarc.EmptyTriggerTransportPubsub - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_eventarc_trigger_eventarceventarc.EmptyTriggerTransportPubsub - } - obj := objArr[0].(map[string]interface{}) - return &resource_eventarc_trigger_eventarceventarc.TriggerTransportPubsub{ - Topic: resource_eventarc_trigger_dcldcl.String(obj["topic"].(string)), - } -} - -func flattenEventarcTriggerTransportPubsub(obj *resource_eventarc_trigger_eventarceventarc.TriggerTransportPubsub) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "topic": obj.Topic, - "subscription": obj.Subscription, - } - - return []interface{}{transformed} - -} - -func resourceFilestoreInstance() *resource_filestore_instance_schema.Resource { - return &resource_filestore_instance_schema.Resource{ - Create: resourceFilestoreInstanceCreate, - Read: resourceFilestoreInstanceRead, - Update: resourceFilestoreInstanceUpdate, - Delete: resourceFilestoreInstanceDelete, - - Importer: &resource_filestore_instance_schema.ResourceImporter{ - State: resourceFilestoreInstanceImport, - }, - - Timeouts: &resource_filestore_instance_schema.ResourceTimeout{ - Create: resource_filestore_instance_schema.DefaultTimeout(6 * resource_filestore_instance_time.Minute), - Update: resource_filestore_instance_schema.DefaultTimeout(6 * resource_filestore_instance_time.Minute), - Delete: resource_filestore_instance_schema.DefaultTimeout(6 * resource_filestore_instance_time.Minute), - }, - - Schema: map[string]*resource_filestore_instance_schema.Schema{ - "file_shares": { - Type: resource_filestore_instance_schema.TypeList, - Required: true, - Description: `File system shares on the instance. For this version, only a -single file share is supported.`, - MaxItems: 1, - Elem: &resource_filestore_instance_schema.Resource{ - Schema: map[string]*resource_filestore_instance_schema.Schema{ - "capacity_gb": { - Type: resource_filestore_instance_schema.TypeInt, - Required: true, - Description: `File share capacity in GiB. This must be at least 1024 GiB -for the standard tier, or 2560 GiB for the premium tier.`, - }, - "name": { - Type: resource_filestore_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the fileshare (16 characters or less)`, - }, - }, - }, - }, - "name": { - Type: resource_filestore_instance_schema.TypeString, - Required: true, - Description: `The resource name of the instance.`, - }, - "networks": { - Type: resource_filestore_instance_schema.TypeList, - Required: true, - ForceNew: true, - Description: `VPC networks to which the instance is connected. For this version, -only a single network is supported.`, - MinItems: 1, - Elem: &resource_filestore_instance_schema.Resource{ - Schema: map[string]*resource_filestore_instance_schema.Schema{ - "modes": { - Type: resource_filestore_instance_schema.TypeList, - Required: true, - ForceNew: true, - Description: `IP versions for which the instance has -IP addresses assigned. Possible values: ["ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"]`, - Elem: &resource_filestore_instance_schema.Schema{ - Type: resource_filestore_instance_schema.TypeString, - ValidateFunc: resource_filestore_instance_validation.StringInSlice([]string{"ADDRESS_MODE_UNSPECIFIED", "MODE_IPV4", "MODE_IPV6"}, false), - }, - }, - "network": { - Type: resource_filestore_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the GCE VPC network to which the -instance is connected.`, - }, - "reserved_ip_range": { - Type: resource_filestore_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `A /29 CIDR block that identifies the range of IP -addresses reserved for this instance.`, - }, - "ip_addresses": { - Type: resource_filestore_instance_schema.TypeList, - Computed: true, - Description: `A list of IPv4 or IPv6 addresses.`, - Elem: &resource_filestore_instance_schema.Schema{ - Type: resource_filestore_instance_schema.TypeString, - }, - }, - }, - }, - }, - "tier": { - Type: resource_filestore_instance_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_filestore_instance_validation.StringInSlice([]string{"TIER_UNSPECIFIED", "STANDARD", "PREMIUM", "BASIC_HDD", "BASIC_SSD", "HIGH_SCALE_SSD"}, false), - Description: `The service tier of the instance. Possible values: ["TIER_UNSPECIFIED", "STANDARD", "PREMIUM", "BASIC_HDD", "BASIC_SSD", "HIGH_SCALE_SSD"]`, - }, - "zone": { - Type: resource_filestore_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Filestore zone of the instance.`, - }, - "description": { - Type: resource_filestore_instance_schema.TypeString, - Optional: true, - Description: `A description of the instance.`, - }, - "labels": { - Type: resource_filestore_instance_schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user-provided metadata.`, - Elem: &resource_filestore_instance_schema.Schema{Type: resource_filestore_instance_schema.TypeString}, - }, - "create_time": { - Type: resource_filestore_instance_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "etag": { - Type: resource_filestore_instance_schema.TypeString, - Computed: true, - Description: `Server-specified ETag for the instance resource to prevent -simultaneous updates from overwriting each other.`, - }, - "project": { - Type: resource_filestore_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFilestoreInstanceCreate(d *resource_filestore_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandFilestoreInstanceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(descriptionProp)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - tierProp, err := expandFilestoreInstanceTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(tierProp)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - labelsProp, err := expandFilestoreInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(labelsProp)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - fileSharesProp, err := expandFilestoreInstanceFileShares(d.Get("file_shares"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("file_shares"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(fileSharesProp)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, fileSharesProp)) { - obj["fileShares"] = fileSharesProp - } - networksProp, err := expandFilestoreInstanceNetworks(d.Get("networks"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("networks"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(networksProp)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, networksProp)) { - obj["networks"] = networksProp - } - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{zone}}/instances?instanceId={{name}}") - if err != nil { - return err - } - - resource_filestore_instance_log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_filestore_instance_schema.TimeoutCreate), isNotFilestoreQuotaError) - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error creating Instance: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/instances/{{name}}") - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = filestoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(resource_filestore_instance_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_filestore_instance_fmt.Errorf("Error waiting to create Instance: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/instances/{{name}}") - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_filestore_instance_log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceFilestoreInstanceRead(d, meta) -} - -func resourceFilestoreInstanceRead(d *resource_filestore_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{zone}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isNotFilestoreQuotaError) - if err != nil { - return handleNotFoundError(err, d, resource_filestore_instance_fmt.Sprintf("FilestoreInstance %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("description", flattenFilestoreInstanceDescription(res["description"], d, config)); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenFilestoreInstanceCreateTime(res["createTime"], d, config)); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("tier", flattenFilestoreInstanceTier(res["tier"], d, config)); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenFilestoreInstanceLabels(res["labels"], d, config)); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("file_shares", flattenFilestoreInstanceFileShares(res["fileShares"], d, config)); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("networks", flattenFilestoreInstanceNetworks(res["networks"], d, config)); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("etag", flattenFilestoreInstanceEtag(res["etag"], d, config)); err != nil { - return resource_filestore_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceFilestoreInstanceUpdate(d *resource_filestore_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandFilestoreInstanceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(v)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandFilestoreInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(v)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - fileSharesProp, err := expandFilestoreInstanceFileShares(d.Get("file_shares"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("file_shares"); !isEmptyValue(resource_filestore_instance_reflect.ValueOf(v)) && (ok || !resource_filestore_instance_reflect.DeepEqual(v, fileSharesProp)) { - obj["fileShares"] = fileSharesProp - } - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{zone}}/instances/{{name}}") - if err != nil { - return err - } - - resource_filestore_instance_log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("file_shares") { - updateMask = append(updateMask, "fileShares") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_filestore_instance_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_filestore_instance_schema.TimeoutUpdate), isNotFilestoreQuotaError) - - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - resource_filestore_instance_log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = filestoreOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(resource_filestore_instance_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceFilestoreInstanceRead(d, meta) -} - -func resourceFilestoreInstanceDelete(d *resource_filestore_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_filestore_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "filestore/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{FilestoreBasePath}}projects/{{project}}/locations/{{zone}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_filestore_instance_log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_filestore_instance_schema.TimeoutDelete), isNotFilestoreQuotaError) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = filestoreOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(resource_filestore_instance_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_filestore_instance_log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceFilestoreInstanceImport(d *resource_filestore_instance_schema.ResourceData, meta interface{}) ([]*resource_filestore_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/instances/{{name}}") - if err != nil { - return nil, resource_filestore_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_filestore_instance_schema.ResourceData{d}, nil -} - -func flattenFilestoreInstanceDescription(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceCreateTime(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceTier(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceLabels(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceFileShares(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenFilestoreInstanceFileSharesName(original["name"], d, config), - "capacity_gb": flattenFilestoreInstanceFileSharesCapacityGb(original["capacityGb"], d, config), - }) - } - return transformed -} - -func flattenFilestoreInstanceFileSharesName(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceFileSharesCapacityGb(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_filestore_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenFilestoreInstanceNetworks(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "network": flattenFilestoreInstanceNetworksNetwork(original["network"], d, config), - "modes": flattenFilestoreInstanceNetworksModes(original["modes"], d, config), - "reserved_ip_range": flattenFilestoreInstanceNetworksReservedIpRange(original["reservedIpRange"], d, config), - "ip_addresses": flattenFilestoreInstanceNetworksIpAddresses(original["ipAddresses"], d, config), - }) - } - return transformed -} - -func flattenFilestoreInstanceNetworksNetwork(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceNetworksModes(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceNetworksReservedIpRange(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceNetworksIpAddresses(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFilestoreInstanceEtag(v interface{}, d *resource_filestore_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFilestoreInstanceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreInstanceTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandFilestoreInstanceFileShares(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandFilestoreInstanceFileSharesName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_filestore_instance_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedCapacityGb, err := expandFilestoreInstanceFileSharesCapacityGb(original["capacity_gb"], d, config) - if err != nil { - return nil, err - } else if val := resource_filestore_instance_reflect.ValueOf(transformedCapacityGb); val.IsValid() && !isEmptyValue(val) { - transformed["capacityGb"] = transformedCapacityGb - } - - req = append(req, transformed) - } - return req, nil -} - -func expandFilestoreInstanceFileSharesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreInstanceFileSharesCapacityGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreInstanceNetworks(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNetwork, err := expandFilestoreInstanceNetworksNetwork(original["network"], d, config) - if err != nil { - return nil, err - } else if val := resource_filestore_instance_reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { - transformed["network"] = transformedNetwork - } - - transformedModes, err := expandFilestoreInstanceNetworksModes(original["modes"], d, config) - if err != nil { - return nil, err - } else if val := resource_filestore_instance_reflect.ValueOf(transformedModes); val.IsValid() && !isEmptyValue(val) { - transformed["modes"] = transformedModes - } - - transformedReservedIpRange, err := expandFilestoreInstanceNetworksReservedIpRange(original["reserved_ip_range"], d, config) - if err != nil { - return nil, err - } else if val := resource_filestore_instance_reflect.ValueOf(transformedReservedIpRange); val.IsValid() && !isEmptyValue(val) { - transformed["reservedIpRange"] = transformedReservedIpRange - } - - transformedIpAddresses, err := expandFilestoreInstanceNetworksIpAddresses(original["ip_addresses"], d, config) - if err != nil { - return nil, err - } else if val := resource_filestore_instance_reflect.ValueOf(transformedIpAddresses); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddresses"] = transformedIpAddresses - } - - req = append(req, transformed) - } - return req, nil -} - -func expandFilestoreInstanceNetworksNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreInstanceNetworksModes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreInstanceNetworksReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFilestoreInstanceNetworksIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceFirestoreDocument() *resource_firestore_document_schema.Resource { - return &resource_firestore_document_schema.Resource{ - Create: resourceFirestoreDocumentCreate, - Read: resourceFirestoreDocumentRead, - Update: resourceFirestoreDocumentUpdate, - Delete: resourceFirestoreDocumentDelete, - - Importer: &resource_firestore_document_schema.ResourceImporter{ - State: resourceFirestoreDocumentImport, - }, - - Timeouts: &resource_firestore_document_schema.ResourceTimeout{ - Create: resource_firestore_document_schema.DefaultTimeout(4 * resource_firestore_document_time.Minute), - Update: resource_firestore_document_schema.DefaultTimeout(4 * resource_firestore_document_time.Minute), - Delete: resource_firestore_document_schema.DefaultTimeout(4 * resource_firestore_document_time.Minute), - }, - - Schema: map[string]*resource_firestore_document_schema.Schema{ - "collection": { - Type: resource_firestore_document_schema.TypeString, - Required: true, - Description: `The collection ID, relative to database. For example: chatrooms or chatrooms/my-document/private-messages.`, - }, - "document_id": { - Type: resource_firestore_document_schema.TypeString, - Required: true, - Description: `The client-assigned document ID to use for this document during creation.`, - }, - "fields": { - Type: resource_firestore_document_schema.TypeString, - Required: true, - ValidateFunc: resource_firestore_document_validation.StringIsJSON, - StateFunc: func(v interface{}) string { - s, _ := resource_firestore_document_structure.NormalizeJsonString(v) - return s - }, - Description: `The document's [fields](https://cloud.google.com/firestore/docs/reference/rest/v1/projects.databases.documents) formated as a json string.`, - }, - "database": { - Type: resource_firestore_document_schema.TypeString, - Optional: true, - Description: `The Firestore database id. Defaults to '"(default)"'.`, - Default: "(default)", - }, - "create_time": { - Type: resource_firestore_document_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 format.`, - }, - "name": { - Type: resource_firestore_document_schema.TypeString, - Computed: true, - Description: `A server defined name for this index. Format: -'projects/{{project_id}}/databases/{{database_id}}/documents/{{path}}/{{document_id}}'`, - }, - "path": { - Type: resource_firestore_document_schema.TypeString, - Computed: true, - Description: `A relative path to the collection this document exists within`, - }, - "update_time": { - Type: resource_firestore_document_schema.TypeString, - Computed: true, - Description: `Last update timestamp in RFC3339 format.`, - }, - "project": { - Type: resource_firestore_document_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFirestoreDocumentCreate(d *resource_firestore_document_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - fieldsProp, err := expandFirestoreDocumentFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(resource_firestore_document_reflect.ValueOf(fieldsProp)) && (ok || !resource_firestore_document_reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/documents/{{collection}}?documentId={{document_id}}") - if err != nil { - return err - } - - resource_firestore_document_log.Printf("[DEBUG] Creating new Document: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_firestore_document_fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_firestore_document_schema.TimeoutCreate)) - if err != nil { - return resource_firestore_document_fmt.Errorf("Error creating Document: %s", err) - } - if err := d.Set("name", flattenFirestoreDocumentName(res["name"], d, config)); err != nil { - return resource_firestore_document_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_firestore_document_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_firestore_document_log.Printf("[DEBUG] Finished creating Document %q: %#v", d.Id(), res) - - return resourceFirestoreDocumentRead(d, meta) -} - -func resourceFirestoreDocumentRead(d *resource_firestore_document_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_firestore_document_fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_firestore_document_fmt.Sprintf("FirestoreDocument %q", d.Id())) - } - - res, err = resourceFirestoreDocumentDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_firestore_document_log.Printf("[DEBUG] Removing FirestoreDocument because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_firestore_document_fmt.Errorf("Error reading Document: %s", err) - } - - if err := d.Set("name", flattenFirestoreDocumentName(res["name"], d, config)); err != nil { - return resource_firestore_document_fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("path", flattenFirestoreDocumentPath(res["path"], d, config)); err != nil { - return resource_firestore_document_fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("fields", flattenFirestoreDocumentFields(res["fields"], d, config)); err != nil { - return resource_firestore_document_fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("create_time", flattenFirestoreDocumentCreateTime(res["createTime"], d, config)); err != nil { - return resource_firestore_document_fmt.Errorf("Error reading Document: %s", err) - } - if err := d.Set("update_time", flattenFirestoreDocumentUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_firestore_document_fmt.Errorf("Error reading Document: %s", err) - } - - return nil -} - -func resourceFirestoreDocumentUpdate(d *resource_firestore_document_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_firestore_document_fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - fieldsProp, err := expandFirestoreDocumentFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(resource_firestore_document_reflect.ValueOf(v)) && (ok || !resource_firestore_document_reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - resource_firestore_document_log.Printf("[DEBUG] Updating Document %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_firestore_document_schema.TimeoutUpdate)) - - if err != nil { - return resource_firestore_document_fmt.Errorf("Error updating Document %q: %s", d.Id(), err) - } else { - resource_firestore_document_log.Printf("[DEBUG] Finished updating Document %q: %#v", d.Id(), res) - } - - return resourceFirestoreDocumentRead(d, meta) -} - -func resourceFirestoreDocumentDelete(d *resource_firestore_document_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_firestore_document_fmt.Errorf("Error fetching project for Document: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_firestore_document_log.Printf("[DEBUG] Deleting Document %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_firestore_document_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Document") - } - - resource_firestore_document_log.Printf("[DEBUG] Finished deleting Document %q: %#v", d.Id(), res) - return nil -} - -func resourceFirestoreDocumentImport(d *resource_firestore_document_schema.ResourceData, meta interface{}) ([]*resource_firestore_document_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - re := resource_firestore_document_regexp.MustCompile("^projects/([^/]+)/databases/([^/]+)/documents/(.+)/([^/]+)$") - match := re.FindStringSubmatch(d.Get("name").(string)) - if len(match) > 0 { - if err := d.Set("project", match[1]); err != nil { - return nil, resource_firestore_document_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("database", match[2]); err != nil { - return nil, resource_firestore_document_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("collection", match[3]); err != nil { - return nil, resource_firestore_document_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("document_id", match[4]); err != nil { - return nil, resource_firestore_document_fmt.Errorf("Error setting project: %s", err) - } - } else { - return nil, resource_firestore_document_fmt.Errorf("import did not match the regex ^projects/([^/]+)/databases/([^/]+)/documents/(.+)/([^/]+)$") - } - - return []*resource_firestore_document_schema.ResourceData{d}, nil -} - -func flattenFirestoreDocumentName(v interface{}, d *resource_firestore_document_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDocumentPath(v interface{}, d *resource_firestore_document_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDocumentFields(v interface{}, d *resource_firestore_document_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := resource_firestore_document_json.Marshal(v) - if err != nil { - - resource_firestore_document_log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenFirestoreDocumentCreateTime(v interface{}, d *resource_firestore_document_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreDocumentUpdateTime(v interface{}, d *resource_firestore_document_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFirestoreDocumentFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := resource_firestore_document_json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func resourceFirestoreDocumentDecoder(d *resource_firestore_document_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - if name, ok := res["name"]; ok { - re := resource_firestore_document_regexp.MustCompile("^projects/[^/]+/databases/[^/]+/documents/(.+)$") - match := re.FindStringSubmatch(name.(string)) - if len(match) > 0 { - res["path"] = match[1] - } - } - return res, nil -} - -func firestoreIFieldsDiffSuppressFunc(k, old, new string, d TerraformResourceDataChange) bool { - kLength := "fields.#" - oldLength, newLength := d.GetChange(kLength) - oldInt, ok := oldLength.(int) - if !ok { - return false - } - newInt, ok := newLength.(int) - if !ok { - return false - } - - if oldInt == newInt+1 { - kold := resource_firestore_index_fmt.Sprintf("fields.%v.field_path", oldInt-1) - knew := resource_firestore_index_fmt.Sprintf("fields.%v.field_path", newInt-1) - - oldLastIndexName, _ := d.GetChange(kold) - _, newLastIndexName := d.GetChange(knew) - if oldLastIndexName == "__name__" && newLastIndexName != "__name__" { - oldBase := resource_firestore_index_fmt.Sprintf("fields.%v", oldInt-1) - if resource_firestore_index_strings.HasPrefix(k, oldBase) || k == kLength { - return true - } - } - } - return false -} - -func firestoreIFieldsDiffSuppress(k, old, new string, d *resource_firestore_index_schema.ResourceData) bool { - return firestoreIFieldsDiffSuppressFunc(k, old, new, d) -} - -func resourceFirestoreIndex() *resource_firestore_index_schema.Resource { - return &resource_firestore_index_schema.Resource{ - Create: resourceFirestoreIndexCreate, - Read: resourceFirestoreIndexRead, - Delete: resourceFirestoreIndexDelete, - - Importer: &resource_firestore_index_schema.ResourceImporter{ - State: resourceFirestoreIndexImport, - }, - - Timeouts: &resource_firestore_index_schema.ResourceTimeout{ - Create: resource_firestore_index_schema.DefaultTimeout(10 * resource_firestore_index_time.Minute), - Delete: resource_firestore_index_schema.DefaultTimeout(10 * resource_firestore_index_time.Minute), - }, - - Schema: map[string]*resource_firestore_index_schema.Schema{ - "collection": { - Type: resource_firestore_index_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The collection being indexed.`, - }, - "fields": { - Type: resource_firestore_index_schema.TypeList, - Required: true, - ForceNew: true, - DiffSuppressFunc: firestoreIFieldsDiffSuppress, - Description: `The fields supported by this index. The last field entry is always for -the field path '__name__'. If, on creation, '__name__' was not -specified as the last field, it will be added automatically with the -same direction as that of the last field defined. If the final field -in a composite index is not directional, the '__name__' will be -ordered '"ASCENDING"' (unless explicitly specified otherwise).`, - MinItems: 2, - Elem: &resource_firestore_index_schema.Resource{ - Schema: map[string]*resource_firestore_index_schema.Schema{ - "array_config": { - Type: resource_firestore_index_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_firestore_index_validation.StringInSlice([]string{"CONTAINS", ""}, false), - Description: `Indicates that this field supports operations on arrayValues. Only one of 'order' and 'arrayConfig' can -be specified. Possible values: ["CONTAINS"]`, - }, - "field_path": { - Type: resource_firestore_index_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the field.`, - }, - "order": { - Type: resource_firestore_index_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_firestore_index_validation.StringInSlice([]string{"ASCENDING", "DESCENDING", ""}, false), - Description: `Indicates that this field supports ordering by the specified order or comparing using =, <, <=, >, >=. -Only one of 'order' and 'arrayConfig' can be specified. Possible values: ["ASCENDING", "DESCENDING"]`, - }, - }, - }, - }, - "database": { - Type: resource_firestore_index_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Firestore database id. Defaults to '"(default)"'.`, - Default: "(default)", - }, - "query_scope": { - Type: resource_firestore_index_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_firestore_index_validation.StringInSlice([]string{"COLLECTION", "COLLECTION_GROUP", ""}, false), - Description: `The scope at which a query is run. Default value: "COLLECTION" Possible values: ["COLLECTION", "COLLECTION_GROUP"]`, - Default: "COLLECTION", - }, - "name": { - Type: resource_firestore_index_schema.TypeString, - Computed: true, - Description: `A server defined name for this index. Format: -'projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}'`, - }, - "project": { - Type: resource_firestore_index_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceFirestoreIndexCreate(d *resource_firestore_index_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - databaseProp, err := expandFirestoreIndexDatabase(d.Get("database"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database"); !isEmptyValue(resource_firestore_index_reflect.ValueOf(databaseProp)) && (ok || !resource_firestore_index_reflect.DeepEqual(v, databaseProp)) { - obj["database"] = databaseProp - } - collectionProp, err := expandFirestoreIndexCollection(d.Get("collection"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collection"); !isEmptyValue(resource_firestore_index_reflect.ValueOf(collectionProp)) && (ok || !resource_firestore_index_reflect.DeepEqual(v, collectionProp)) { - obj["collection"] = collectionProp - } - queryScopeProp, err := expandFirestoreIndexQueryScope(d.Get("query_scope"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("query_scope"); !isEmptyValue(resource_firestore_index_reflect.ValueOf(queryScopeProp)) && (ok || !resource_firestore_index_reflect.DeepEqual(v, queryScopeProp)) { - obj["queryScope"] = queryScopeProp - } - fieldsProp, err := expandFirestoreIndexFields(d.Get("fields"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fields"); !isEmptyValue(resource_firestore_index_reflect.ValueOf(fieldsProp)) && (ok || !resource_firestore_index_reflect.DeepEqual(v, fieldsProp)) { - obj["fields"] = fieldsProp - } - - obj, err = resourceFirestoreIndexEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes") - if err != nil { - return err - } - - resource_firestore_index_log.Printf("[DEBUG] Creating new Index: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_firestore_index_fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_firestore_index_schema.TimeoutCreate)) - if err != nil { - return resource_firestore_index_fmt.Errorf("Error creating Index: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_firestore_index_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = firestoreOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Index", userAgent, - d.Timeout(resource_firestore_index_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_firestore_index_fmt.Errorf("Error waiting to create Index: %s", err) - } - - if err := d.Set("name", flattenFirestoreIndexName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_firestore_index_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - metadata := res["metadata"].(map[string]interface{}) - name := metadata["index"].(string) - resource_firestore_index_log.Printf("[DEBUG] Setting Index name, id to %s", name) - if err := d.Set("name", name); err != nil { - return resource_firestore_index_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name) - - resource_firestore_index_log.Printf("[DEBUG] Finished creating Index %q: %#v", d.Id(), res) - - return resourceFirestoreIndexRead(d, meta) -} - -func resourceFirestoreIndexRead(d *resource_firestore_index_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_firestore_index_fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_firestore_index_fmt.Sprintf("FirestoreIndex %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_firestore_index_fmt.Errorf("Error reading Index: %s", err) - } - - if err := d.Set("name", flattenFirestoreIndexName(res["name"], d, config)); err != nil { - return resource_firestore_index_fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("query_scope", flattenFirestoreIndexQueryScope(res["queryScope"], d, config)); err != nil { - return resource_firestore_index_fmt.Errorf("Error reading Index: %s", err) - } - if err := d.Set("fields", flattenFirestoreIndexFields(res["fields"], d, config)); err != nil { - return resource_firestore_index_fmt.Errorf("Error reading Index: %s", err) - } - - return nil -} - -func resourceFirestoreIndexDelete(d *resource_firestore_index_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_firestore_index_fmt.Errorf("Error fetching project for Index: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{FirestoreBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_firestore_index_log.Printf("[DEBUG] Deleting Index %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_firestore_index_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Index") - } - - err = firestoreOperationWaitTime( - config, res, project, "Deleting Index", userAgent, - d.Timeout(resource_firestore_index_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_firestore_index_log.Printf("[DEBUG] Finished deleting Index %q: %#v", d.Id(), res) - return nil -} - -func resourceFirestoreIndexImport(d *resource_firestore_index_schema.ResourceData, meta interface{}) ([]*resource_firestore_index_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := resource_firestore_index_strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 8 { - return nil, resource_firestore_index_fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "projects/{{project}}/databases/{{database}}/collectionGroups/{{collection}}/indexes/{{server_generated_id}}", - ) - } - - if err := d.Set("project", stringParts[1]); err != nil { - return nil, resource_firestore_index_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("database", stringParts[3]); err != nil { - return nil, resource_firestore_index_fmt.Errorf("Error setting database: %s", err) - } - if err := d.Set("collection", stringParts[5]); err != nil { - return nil, resource_firestore_index_fmt.Errorf("Error setting collection: %s", err) - } - return []*resource_firestore_index_schema.ResourceData{d}, nil -} - -func flattenFirestoreIndexName(v interface{}, d *resource_firestore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexQueryScope(v interface{}, d *resource_firestore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexFields(v interface{}, d *resource_firestore_index_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "field_path": flattenFirestoreIndexFieldsFieldPath(original["fieldPath"], d, config), - "order": flattenFirestoreIndexFieldsOrder(original["order"], d, config), - "array_config": flattenFirestoreIndexFieldsArrayConfig(original["arrayConfig"], d, config), - }) - } - return transformed -} - -func flattenFirestoreIndexFieldsFieldPath(v interface{}, d *resource_firestore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexFieldsOrder(v interface{}, d *resource_firestore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenFirestoreIndexFieldsArrayConfig(v interface{}, d *resource_firestore_index_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandFirestoreIndexDatabase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexCollection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexQueryScope(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFieldPath, err := expandFirestoreIndexFieldsFieldPath(original["field_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_firestore_index_reflect.ValueOf(transformedFieldPath); val.IsValid() && !isEmptyValue(val) { - transformed["fieldPath"] = transformedFieldPath - } - - transformedOrder, err := expandFirestoreIndexFieldsOrder(original["order"], d, config) - if err != nil { - return nil, err - } else if val := resource_firestore_index_reflect.ValueOf(transformedOrder); val.IsValid() && !isEmptyValue(val) { - transformed["order"] = transformedOrder - } - - transformedArrayConfig, err := expandFirestoreIndexFieldsArrayConfig(original["array_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_firestore_index_reflect.ValueOf(transformedArrayConfig); val.IsValid() && !isEmptyValue(val) { - transformed["arrayConfig"] = transformedArrayConfig - } - - req = append(req, transformed) - } - return req, nil -} - -func expandFirestoreIndexFieldsFieldPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexFieldsOrder(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandFirestoreIndexFieldsArrayConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceFirestoreIndexEncoder(d *resource_firestore_index_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - delete(obj, "project") - delete(obj, "database") - delete(obj, "collection") - return obj, nil -} - -var accessApprovalCloudProductMapping = map[string]string{ - "appengine.googleapis.com": "App Engine", - "bigquery.googleapis.com": "BigQuery", - "bigtable.googleapis.com": "Cloud Bigtable", - "cloudkms.googleapis.com": "Cloud Key Management Service", - "compute.googleapis.com": "Compute Engine", - "dataflow.googleapis.com": "Cloud Dataflow", - "iam.googleapis.com": "Cloud Identity and Access Management", - "pubsub.googleapis.com": "Cloud Pub/Sub", - "storage.googleapis.com": "Cloud Storage", -} - -func accessApprovalEnrolledServicesHash(v interface{}) int { - var buf resource_folder_access_approval_settings_bytes.Buffer - m := v.(map[string]interface{}) - cp := m["cloud_product"].(string) - if n, ok := accessApprovalCloudProductMapping[cp]; ok { - cp = n - } - buf.WriteString(resource_folder_access_approval_settings_fmt.Sprintf("%s-", resource_folder_access_approval_settings_strings.ToLower(cp))) - buf.WriteString(resource_folder_access_approval_settings_fmt.Sprintf("%s-", resource_folder_access_approval_settings_strings.ToLower(m["enrollment_level"].(string)))) - return hashcode(buf.String()) -} - -func resourceAccessApprovalFolderSettings() *resource_folder_access_approval_settings_schema.Resource { - return &resource_folder_access_approval_settings_schema.Resource{ - Create: resourceAccessApprovalFolderSettingsCreate, - Read: resourceAccessApprovalFolderSettingsRead, - Update: resourceAccessApprovalFolderSettingsUpdate, - Delete: resourceAccessApprovalFolderSettingsDelete, - - Importer: &resource_folder_access_approval_settings_schema.ResourceImporter{ - State: resourceAccessApprovalFolderSettingsImport, - }, - - Timeouts: &resource_folder_access_approval_settings_schema.ResourceTimeout{ - Create: resource_folder_access_approval_settings_schema.DefaultTimeout(4 * resource_folder_access_approval_settings_time.Minute), - Update: resource_folder_access_approval_settings_schema.DefaultTimeout(4 * resource_folder_access_approval_settings_time.Minute), - Delete: resource_folder_access_approval_settings_schema.DefaultTimeout(4 * resource_folder_access_approval_settings_time.Minute), - }, - - Schema: map[string]*resource_folder_access_approval_settings_schema.Schema{ - "enrolled_services": { - Type: resource_folder_access_approval_settings_schema.TypeSet, - Required: true, - Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. -Access requests for the resource given by name against any of these services contained here will be required -to have explicit approval. Enrollment can only be done on an all or nothing basis. - -A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, - Elem: accessapprovalFolderSettingsEnrolledServicesSchema(), - Set: accessApprovalEnrolledServicesHash, - }, - "folder_id": { - Type: resource_folder_access_approval_settings_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the folder of the access approval settings.`, - }, - "notification_emails": { - Type: resource_folder_access_approval_settings_schema.TypeSet, - Computed: true, - Optional: true, - Description: `A list of email addresses to which notifications relating to approval requests should be sent. -Notifications relating to a resource will be sent to all emails in the settings of ancestor -resources of that resource. A maximum of 50 email addresses are allowed.`, - MaxItems: 50, - Elem: &resource_folder_access_approval_settings_schema.Schema{ - Type: resource_folder_access_approval_settings_schema.TypeString, - }, - Set: resource_folder_access_approval_settings_schema.HashString, - }, - "enrolled_ancestor": { - Type: resource_folder_access_approval_settings_schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Folder.`, - }, - "name": { - Type: resource_folder_access_approval_settings_schema.TypeString, - Computed: true, - Description: `The resource name of the settings. Format is "folders/{folder_id}/accessApprovalSettings"`, - }, - }, - UseJSONNumber: true, - } -} - -func accessapprovalFolderSettingsEnrolledServicesSchema() *resource_folder_access_approval_settings_schema.Resource { - return &resource_folder_access_approval_settings_schema.Resource{ - Schema: map[string]*resource_folder_access_approval_settings_schema.Schema{ - "cloud_product": { - Type: resource_folder_access_approval_settings_schema.TypeString, - Required: true, - Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): - * all - * App Engine - * BigQuery - * Cloud Bigtable - * Cloud Key Management Service - * Compute Engine - * Cloud Dataflow - * Cloud Identity and Access Management - * Cloud Pub/Sub - * Cloud Storage - * Persistent Disk - -Note: These values are supported as input, but considered a legacy format: - * all - * appengine.googleapis.com - * bigquery.googleapis.com - * bigtable.googleapis.com - * cloudkms.googleapis.com - * compute.googleapis.com - * dataflow.googleapis.com - * iam.googleapis.com - * pubsub.googleapis.com - * storage.googleapis.com`, - }, - "enrollment_level": { - Type: resource_folder_access_approval_settings_schema.TypeString, - Optional: true, - ValidateFunc: resource_folder_access_approval_settings_validation.StringInSlice([]string{"BLOCK_ALL", ""}, false), - Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, - Default: "BLOCK_ALL", - }, - }, - } -} - -func resourceAccessApprovalFolderSettingsCreate(d *resource_folder_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalFolderSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(resource_folder_access_approval_settings_reflect.ValueOf(notificationEmailsProp)) && (ok || !resource_folder_access_approval_settings_reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalFolderSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(resource_folder_access_approval_settings_reflect.ValueOf(enrolledServicesProp)) && (ok || !resource_folder_access_approval_settings_reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_folder_access_approval_settings_log.Printf("[DEBUG] Creating new FolderSettings: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_folder_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_folder_access_approval_settings_schema.TimeoutCreate)) - if err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error creating FolderSettings: %s", err) - } - if err := d.Set("name", flattenAccessApprovalFolderSettingsName(res["name"], d, config)); err != nil { - return resource_folder_access_approval_settings_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_folder_access_approval_settings_log.Printf("[DEBUG] Finished creating FolderSettings %q: %#v", d.Id(), res) - - return resourceAccessApprovalFolderSettingsRead(d, meta) -} - -func resourceAccessApprovalFolderSettingsRead(d *resource_folder_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_folder_access_approval_settings_fmt.Sprintf("AccessApprovalFolderSettings %q", d.Id())) - } - - if err := d.Set("name", flattenAccessApprovalFolderSettingsName(res["name"], d, config)); err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("notification_emails", flattenAccessApprovalFolderSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("enrolled_services", flattenAccessApprovalFolderSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error reading FolderSettings: %s", err) - } - if err := d.Set("enrolled_ancestor", flattenAccessApprovalFolderSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error reading FolderSettings: %s", err) - } - - return nil -} - -func resourceAccessApprovalFolderSettingsUpdate(d *resource_folder_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalFolderSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(resource_folder_access_approval_settings_reflect.ValueOf(v)) && (ok || !resource_folder_access_approval_settings_reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalFolderSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(resource_folder_access_approval_settings_reflect.ValueOf(v)) && (ok || !resource_folder_access_approval_settings_reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_folder_access_approval_settings_log.Printf("[DEBUG] Updating FolderSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_folder_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_folder_access_approval_settings_schema.TimeoutUpdate)) - - if err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error updating FolderSettings %q: %s", d.Id(), err) - } else { - resource_folder_access_approval_settings_log.Printf("[DEBUG] Finished updating FolderSettings %q: %#v", d.Id(), res) - } - - return resourceAccessApprovalFolderSettingsRead(d, meta) -} - -func resourceAccessApprovalFolderSettingsDelete(d *resource_folder_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["notificationEmails"] = []string{} - obj["enrolledServices"] = []string{} - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_folder_access_approval_settings_log.Printf("[DEBUG] Emptying FolderSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - updateMask = append(updateMask, "notificationEmails") - updateMask = append(updateMask, "enrolledServices") - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_folder_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - res, err := sendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(resource_folder_access_approval_settings_schema.TimeoutUpdate)) - - if err != nil { - return resource_folder_access_approval_settings_fmt.Errorf("Error emptying FolderSettings %q: %s", d.Id(), err) - } else { - resource_folder_access_approval_settings_log.Printf("[DEBUG] Finished emptying FolderSettings %q: %#v", d.Id(), res) - } - - return nil -} - -func resourceAccessApprovalFolderSettingsImport(d *resource_folder_access_approval_settings_schema.ResourceData, meta interface{}) ([]*resource_folder_access_approval_settings_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "folders/(?P[^/]+)/accessApprovalSettings", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "folders/{{folder_id}}/accessApprovalSettings") - if err != nil { - return nil, resource_folder_access_approval_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_folder_access_approval_settings_schema.ResourceData{d}, nil -} - -func flattenAccessApprovalFolderSettingsName(v interface{}, d *resource_folder_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsNotificationEmails(v interface{}, d *resource_folder_access_approval_settings_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_folder_access_approval_settings_schema.NewSet(resource_folder_access_approval_settings_schema.HashString, v.([]interface{})) -} - -func flattenAccessApprovalFolderSettingsEnrolledServices(v interface{}, d *resource_folder_access_approval_settings_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_folder_access_approval_settings_schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "cloud_product": flattenAccessApprovalFolderSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), - "enrollment_level": flattenAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), - }) - } - return transformed -} - -func flattenAccessApprovalFolderSettingsEnrolledServicesCloudProduct(v interface{}, d *resource_folder_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *resource_folder_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalFolderSettingsEnrolledAncestor(v interface{}, d *resource_folder_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessApprovalFolderSettingsNotificationEmails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_folder_access_approval_settings_schema.Set).List() - return v, nil -} - -func expandAccessApprovalFolderSettingsEnrolledServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_folder_access_approval_settings_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudProduct, err := expandAccessApprovalFolderSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) - if err != nil { - return nil, err - } else if val := resource_folder_access_approval_settings_reflect.ValueOf(transformedCloudProduct); val.IsValid() && !isEmptyValue(val) { - transformed["cloudProduct"] = transformedCloudProduct - } - - transformedEnrollmentLevel, err := expandAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_folder_access_approval_settings_reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !isEmptyValue(val) { - transformed["enrollmentLevel"] = transformedEnrollmentLevel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessApprovalFolderSettingsEnrolledServicesCloudProduct(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalFolderSettingsEnrolledServicesEnrollmentLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func suppressSuffixDiff(_, old, new string, _ *resource_game_services_game_server_cluster_schema.ResourceData) bool { - if resource_game_services_game_server_cluster_strings.HasSuffix(old, new) { - resource_game_services_game_server_cluster_log.Printf("[INFO] suppressing diff as %s is the same as the full path of %s", new, old) - return true - } - - return false -} - -func resourceGameServicesGameServerCluster() *resource_game_services_game_server_cluster_schema.Resource { - return &resource_game_services_game_server_cluster_schema.Resource{ - Create: resourceGameServicesGameServerClusterCreate, - Read: resourceGameServicesGameServerClusterRead, - Update: resourceGameServicesGameServerClusterUpdate, - Delete: resourceGameServicesGameServerClusterDelete, - - Importer: &resource_game_services_game_server_cluster_schema.ResourceImporter{ - State: resourceGameServicesGameServerClusterImport, - }, - - Timeouts: &resource_game_services_game_server_cluster_schema.ResourceTimeout{ - Create: resource_game_services_game_server_cluster_schema.DefaultTimeout(4 * resource_game_services_game_server_cluster_time.Minute), - Update: resource_game_services_game_server_cluster_schema.DefaultTimeout(4 * resource_game_services_game_server_cluster_time.Minute), - Delete: resource_game_services_game_server_cluster_schema.DefaultTimeout(4 * resource_game_services_game_server_cluster_time.Minute), - }, - - Schema: map[string]*resource_game_services_game_server_cluster_schema.Schema{ - "cluster_id": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Required. The resource name of the game server cluster`, - }, - "connection_info": { - Type: resource_game_services_game_server_cluster_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Game server cluster connection information. This information is used to -manage game server clusters.`, - MaxItems: 1, - Elem: &resource_game_services_game_server_cluster_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_cluster_schema.Schema{ - "gke_cluster_reference": { - Type: resource_game_services_game_server_cluster_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Reference of the GKE cluster where the game servers are installed.`, - MaxItems: 1, - Elem: &resource_game_services_game_server_cluster_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_cluster_schema.Schema{ - "cluster": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: suppressSuffixDiff, - Description: `The full or partial name of a GKE cluster, using one of the following -forms: - -* 'projects/{project_id}/locations/{location}/clusters/{cluster_id}' -* 'locations/{location}/clusters/{cluster_id}' -* '{cluster_id}' - -If project and location are not specified, the project and location of the -GameServerCluster resource are used to generate the full name of the -GKE cluster.`, - }, - }, - }, - }, - "namespace": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Required: true, - Description: `Namespace designated on the game server cluster where the game server -instances will be created. The namespace existence will be validated -during creation.`, - }, - }, - }, - }, - "realm_id": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The realm id of the game server realm.`, - }, - "description": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Optional: true, - Description: `Human readable description of the cluster.`, - }, - "labels": { - Type: resource_game_services_game_server_cluster_schema.TypeMap, - Optional: true, - Description: `The labels associated with this game server cluster. Each label is a -key-value pair.`, - Elem: &resource_game_services_game_server_cluster_schema.Schema{Type: resource_game_services_game_server_cluster_schema.TypeString}, - }, - "location": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Optional: true, - Description: `Location of the Cluster.`, - Default: "global", - }, - "name": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Computed: true, - Description: `The resource id of the game server cluster, eg: - -'projects/{project_id}/locations/{location}/realms/{realm_id}/gameServerClusters/{cluster_id}'. -For example, - -'projects/my-project/locations/{location}/realms/zanzibar/gameServerClusters/my-onprem-cluster'.`, - }, - "project": { - Type: resource_game_services_game_server_cluster_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesGameServerClusterCreate(d *resource_game_services_game_server_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesGameServerClusterLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_game_services_game_server_cluster_reflect.ValueOf(labelsProp)) && (ok || !resource_game_services_game_server_cluster_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - connectionInfoProp, err := expandGameServicesGameServerClusterConnectionInfo(d.Get("connection_info"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connection_info"); !isEmptyValue(resource_game_services_game_server_cluster_reflect.ValueOf(connectionInfoProp)) && (ok || !resource_game_services_game_server_cluster_reflect.DeepEqual(v, connectionInfoProp)) { - obj["connectionInfo"] = connectionInfoProp - } - descriptionProp, err := expandGameServicesGameServerClusterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_game_services_game_server_cluster_reflect.ValueOf(descriptionProp)) && (ok || !resource_game_services_game_server_cluster_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters?gameServerClusterId={{cluster_id}}") - if err != nil { - return err - } - - resource_game_services_game_server_cluster_log.Printf("[DEBUG] Creating new GameServerCluster: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_cluster_schema.TimeoutCreate)) - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error creating GameServerCluster: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = gameServicesOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating GameServerCluster", userAgent, - d.Timeout(resource_game_services_game_server_cluster_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_game_services_game_server_cluster_fmt.Errorf("Error waiting to create GameServerCluster: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerClusterName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_game_services_game_server_cluster_log.Printf("[DEBUG] Finished creating GameServerCluster %q: %#v", d.Id(), res) - - return resourceGameServicesGameServerClusterRead(d, meta) -} - -func resourceGameServicesGameServerClusterRead(d *resource_game_services_game_server_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_game_services_game_server_cluster_fmt.Sprintf("GameServicesGameServerCluster %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error reading GameServerCluster: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerClusterName(res["name"], d, config)); err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error reading GameServerCluster: %s", err) - } - if err := d.Set("labels", flattenGameServicesGameServerClusterLabels(res["labels"], d, config)); err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error reading GameServerCluster: %s", err) - } - if err := d.Set("connection_info", flattenGameServicesGameServerClusterConnectionInfo(res["connectionInfo"], d, config)); err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error reading GameServerCluster: %s", err) - } - if err := d.Set("description", flattenGameServicesGameServerClusterDescription(res["description"], d, config)); err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error reading GameServerCluster: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerClusterUpdate(d *resource_game_services_game_server_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesGameServerClusterLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_game_services_game_server_cluster_reflect.ValueOf(v)) && (ok || !resource_game_services_game_server_cluster_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - descriptionProp, err := expandGameServicesGameServerClusterDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_game_services_game_server_cluster_reflect.ValueOf(v)) && (ok || !resource_game_services_game_server_cluster_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return err - } - - resource_game_services_game_server_cluster_log.Printf("[DEBUG] Updating GameServerCluster %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_game_services_game_server_cluster_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_cluster_schema.TimeoutUpdate)) - - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error updating GameServerCluster %q: %s", d.Id(), err) - } else { - resource_game_services_game_server_cluster_log.Printf("[DEBUG] Finished updating GameServerCluster %q: %#v", d.Id(), res) - } - - err = gameServicesOperationWaitTime( - config, res, project, "Updating GameServerCluster", userAgent, - d.Timeout(resource_game_services_game_server_cluster_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesGameServerClusterRead(d, meta) -} - -func resourceGameServicesGameServerClusterDelete(d *resource_game_services_game_server_cluster_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_cluster_fmt.Errorf("Error fetching project for GameServerCluster: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_game_services_game_server_cluster_log.Printf("[DEBUG] Deleting GameServerCluster %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_cluster_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GameServerCluster") - } - - err = gameServicesOperationWaitTime( - config, res, project, "Deleting GameServerCluster", userAgent, - d.Timeout(resource_game_services_game_server_cluster_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_game_services_game_server_cluster_log.Printf("[DEBUG] Finished deleting GameServerCluster %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesGameServerClusterImport(d *resource_game_services_game_server_cluster_schema.ResourceData, meta interface{}) ([]*resource_game_services_game_server_cluster_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/realms/(?P[^/]+)/gameServerClusters/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}/gameServerClusters/{{cluster_id}}") - if err != nil { - return nil, resource_game_services_game_server_cluster_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_game_services_game_server_cluster_schema.ResourceData{d}, nil -} - -func flattenGameServicesGameServerClusterName(v interface{}, d *resource_game_services_game_server_cluster_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterLabels(v interface{}, d *resource_game_services_game_server_cluster_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterConnectionInfo(v interface{}, d *resource_game_services_game_server_cluster_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["gke_cluster_reference"] = - flattenGameServicesGameServerClusterConnectionInfoGkeClusterReference(original["gkeClusterReference"], d, config) - transformed["namespace"] = - flattenGameServicesGameServerClusterConnectionInfoNamespace(original["namespace"], d, config) - return []interface{}{transformed} -} - -func flattenGameServicesGameServerClusterConnectionInfoGkeClusterReference(v interface{}, d *resource_game_services_game_server_cluster_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cluster"] = - flattenGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(original["cluster"], d, config) - return []interface{}{transformed} -} - -func flattenGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(v interface{}, d *resource_game_services_game_server_cluster_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterConnectionInfoNamespace(v interface{}, d *resource_game_services_game_server_cluster_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerClusterDescription(v interface{}, d *resource_game_services_game_server_cluster_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesGameServerClusterLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGameServicesGameServerClusterConnectionInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGkeClusterReference, err := expandGameServicesGameServerClusterConnectionInfoGkeClusterReference(original["gke_cluster_reference"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_cluster_reflect.ValueOf(transformedGkeClusterReference); val.IsValid() && !isEmptyValue(val) { - transformed["gkeClusterReference"] = transformedGkeClusterReference - } - - transformedNamespace, err := expandGameServicesGameServerClusterConnectionInfoNamespace(original["namespace"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_cluster_reflect.ValueOf(transformedNamespace); val.IsValid() && !isEmptyValue(val) { - transformed["namespace"] = transformedNamespace - } - - return transformed, nil -} - -func expandGameServicesGameServerClusterConnectionInfoGkeClusterReference(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCluster, err := expandGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(original["cluster"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_cluster_reflect.ValueOf(transformedCluster); val.IsValid() && !isEmptyValue(val) { - transformed["cluster"] = transformedCluster - } - - return transformed, nil -} - -func expandGameServicesGameServerClusterConnectionInfoGkeClusterReferenceCluster(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerClusterConnectionInfoNamespace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerClusterDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceGameServicesGameServerConfig() *resource_game_services_game_server_config_schema.Resource { - return &resource_game_services_game_server_config_schema.Resource{ - Create: resourceGameServicesGameServerConfigCreate, - Read: resourceGameServicesGameServerConfigRead, - Delete: resourceGameServicesGameServerConfigDelete, - - Importer: &resource_game_services_game_server_config_schema.ResourceImporter{ - State: resourceGameServicesGameServerConfigImport, - }, - - Timeouts: &resource_game_services_game_server_config_schema.ResourceTimeout{ - Create: resource_game_services_game_server_config_schema.DefaultTimeout(4 * resource_game_services_game_server_config_time.Minute), - Delete: resource_game_services_game_server_config_schema.DefaultTimeout(4 * resource_game_services_game_server_config_time.Minute), - }, - - Schema: map[string]*resource_game_services_game_server_config_schema.Schema{ - "config_id": { - Type: resource_game_services_game_server_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique id for the deployment config.`, - }, - "deployment_id": { - Type: resource_game_services_game_server_config_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A unique id for the deployment.`, - }, - "fleet_configs": { - Type: resource_game_services_game_server_config_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The fleet config contains list of fleet specs. In the Single Cloud, there -will be only one.`, - Elem: &resource_game_services_game_server_config_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_config_schema.Schema{ - "fleet_spec": { - Type: resource_game_services_game_server_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The fleet spec, which is sent to Agones to configure fleet. -The spec can be passed as inline json but it is recommended to use a file reference -instead. File references can contain the json or yaml format of the fleet spec. Eg: - -* fleet_spec = jsonencode(yamldecode(file("fleet_configs.yaml"))) -* fleet_spec = file("fleet_configs.json") - -The format of the spec can be found : -'https://agones.dev/site/docs/reference/fleet/'.`, - }, - "name": { - Type: resource_game_services_game_server_config_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The name of the FleetConfig.`, - }, - }, - }, - }, - "description": { - Type: resource_game_services_game_server_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The description of the game server config.`, - }, - "labels": { - Type: resource_game_services_game_server_config_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `The labels associated with this game server config. Each label is a -key-value pair.`, - Elem: &resource_game_services_game_server_config_schema.Schema{Type: resource_game_services_game_server_config_schema.TypeString}, - }, - "location": { - Type: resource_game_services_game_server_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Location of the Deployment.`, - Default: "global", - }, - "scaling_configs": { - Type: resource_game_services_game_server_config_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Optional. This contains the autoscaling settings.`, - Elem: &resource_game_services_game_server_config_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_config_schema.Schema{ - "fleet_autoscaler_spec": { - Type: resource_game_services_game_server_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Fleet autoscaler spec, which is sent to Agones. -Example spec can be found : -https://agones.dev/site/docs/reference/fleetautoscaler/`, - }, - "name": { - Type: resource_game_services_game_server_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the ScalingConfig`, - }, - "schedules": { - Type: resource_game_services_game_server_config_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The schedules to which this scaling config applies.`, - Elem: &resource_game_services_game_server_config_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_config_schema.Schema{ - "cron_job_duration": { - Type: resource_game_services_game_server_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The duration for the cron job event. The duration of the event is effective -after the cron job's start time. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "cron_spec": { - Type: resource_game_services_game_server_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The cron definition of the scheduled event. See -https://en.wikipedia.org/wiki/Cron. Cron spec specifies the local time as -defined by the realm.`, - }, - "end_time": { - Type: resource_game_services_game_server_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The end time of the event. - -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "start_time": { - Type: resource_game_services_game_server_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The start time of the event. - -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - }, - "selectors": { - Type: resource_game_services_game_server_config_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Labels used to identify the clusters to which this scaling config -applies. A cluster is subject to this scaling config if its labels match -any of the selector entries.`, - Elem: &resource_game_services_game_server_config_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_config_schema.Schema{ - "labels": { - Type: resource_game_services_game_server_config_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Set of labels to group by.`, - Elem: &resource_game_services_game_server_config_schema.Schema{Type: resource_game_services_game_server_config_schema.TypeString}, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: resource_game_services_game_server_config_schema.TypeString, - Computed: true, - Description: `The resource name of the game server config, in the form: - -'projects/{project_id}/locations/{location}/gameServerDeployments/{deployment_id}/configs/{config_id}'.`, - }, - "project": { - Type: resource_game_services_game_server_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesGameServerConfigCreate(d *resource_game_services_game_server_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandGameServicesGameServerConfigDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_game_services_game_server_config_reflect.ValueOf(descriptionProp)) && (ok || !resource_game_services_game_server_config_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandGameServicesGameServerConfigLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_game_services_game_server_config_reflect.ValueOf(labelsProp)) && (ok || !resource_game_services_game_server_config_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - fleetConfigsProp, err := expandGameServicesGameServerConfigFleetConfigs(d.Get("fleet_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("fleet_configs"); !isEmptyValue(resource_game_services_game_server_config_reflect.ValueOf(fleetConfigsProp)) && (ok || !resource_game_services_game_server_config_reflect.DeepEqual(v, fleetConfigsProp)) { - obj["fleetConfigs"] = fleetConfigsProp - } - scalingConfigsProp, err := expandGameServicesGameServerConfigScalingConfigs(d.Get("scaling_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("scaling_configs"); !isEmptyValue(resource_game_services_game_server_config_reflect.ValueOf(scalingConfigsProp)) && (ok || !resource_game_services_game_server_config_reflect.DeepEqual(v, scalingConfigsProp)) { - obj["scalingConfigs"] = scalingConfigsProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs?configId={{config_id}}") - if err != nil { - return err - } - - resource_game_services_game_server_config_log.Printf("[DEBUG] Creating new GameServerConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error fetching project for GameServerConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_config_schema.TimeoutCreate)) - if err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error creating GameServerConfig: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") - if err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = gameServicesOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating GameServerConfig", userAgent, - d.Timeout(resource_game_services_game_server_config_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_game_services_game_server_config_fmt.Errorf("Error waiting to create GameServerConfig: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerConfigName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") - if err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_game_services_game_server_config_log.Printf("[DEBUG] Finished creating GameServerConfig %q: %#v", d.Id(), res) - - return resourceGameServicesGameServerConfigRead(d, meta) -} - -func resourceGameServicesGameServerConfigRead(d *resource_game_services_game_server_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error fetching project for GameServerConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_game_services_game_server_config_fmt.Sprintf("GameServicesGameServerConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error reading GameServerConfig: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerConfigName(res["name"], d, config)); err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error reading GameServerConfig: %s", err) - } - if err := d.Set("description", flattenGameServicesGameServerConfigDescription(res["description"], d, config)); err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error reading GameServerConfig: %s", err) - } - if err := d.Set("labels", flattenGameServicesGameServerConfigLabels(res["labels"], d, config)); err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error reading GameServerConfig: %s", err) - } - if err := d.Set("fleet_configs", flattenGameServicesGameServerConfigFleetConfigs(res["fleetConfigs"], d, config)); err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error reading GameServerConfig: %s", err) - } - if err := d.Set("scaling_configs", flattenGameServicesGameServerConfigScalingConfigs(res["scalingConfigs"], d, config)); err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error reading GameServerConfig: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerConfigDelete(d *resource_game_services_game_server_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_config_fmt.Errorf("Error fetching project for GameServerConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_game_services_game_server_config_log.Printf("[DEBUG] Deleting GameServerConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GameServerConfig") - } - - err = gameServicesOperationWaitTime( - config, res, project, "Deleting GameServerConfig", userAgent, - d.Timeout(resource_game_services_game_server_config_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_game_services_game_server_config_log.Printf("[DEBUG] Finished deleting GameServerConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesGameServerConfigImport(d *resource_game_services_game_server_config_schema.ResourceData, meta interface{}) ([]*resource_game_services_game_server_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/gameServerDeployments/(?P[^/]+)/configs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}/configs/{{config_id}}") - if err != nil { - return nil, resource_game_services_game_server_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_game_services_game_server_config_schema.ResourceData{d}, nil -} - -func flattenGameServicesGameServerConfigName(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigDescription(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigLabels(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigFleetConfigs(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "fleet_spec": flattenGameServicesGameServerConfigFleetConfigsFleetSpec(original["fleetSpec"], d, config), - "name": flattenGameServicesGameServerConfigFleetConfigsName(original["name"], d, config), - }) - } - return transformed -} - -func flattenGameServicesGameServerConfigFleetConfigsFleetSpec(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigFleetConfigsName(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigScalingConfigs(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenGameServicesGameServerConfigScalingConfigsName(original["name"], d, config), - "fleet_autoscaler_spec": flattenGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(original["fleetAutoscalerSpec"], d, config), - "selectors": flattenGameServicesGameServerConfigScalingConfigsSelectors(original["selectors"], d, config), - "schedules": flattenGameServicesGameServerConfigScalingConfigsSchedules(original["schedules"], d, config), - }) - } - return transformed -} - -func flattenGameServicesGameServerConfigScalingConfigsName(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "labels": flattenGameServicesGameServerConfigScalingConfigsSelectorsLabels(original["labels"], d, config), - }) - } - return transformed -} - -func flattenGameServicesGameServerConfigScalingConfigsSelectorsLabels(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "start_time": flattenGameServicesGameServerConfigScalingConfigsSchedulesStartTime(original["startTime"], d, config), - "end_time": flattenGameServicesGameServerConfigScalingConfigsSchedulesEndTime(original["endTime"], d, config), - "cron_job_duration": flattenGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(original["cronJobDuration"], d, config), - "cron_spec": flattenGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(original["cronSpec"], d, config), - }) - } - return transformed -} - -func flattenGameServicesGameServerConfigScalingConfigsSchedulesStartTime(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigScalingConfigsSchedulesEndTime(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(v interface{}, d *resource_game_services_game_server_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesGameServerConfigDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGameServicesGameServerConfigFleetConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFleetSpec, err := expandGameServicesGameServerConfigFleetConfigsFleetSpec(original["fleet_spec"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedFleetSpec); val.IsValid() && !isEmptyValue(val) { - transformed["fleetSpec"] = transformedFleetSpec - } - - transformedName, err := expandGameServicesGameServerConfigFleetConfigsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandGameServicesGameServerConfigFleetConfigsFleetSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigFleetConfigsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigScalingConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandGameServicesGameServerConfigScalingConfigsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedFleetAutoscalerSpec, err := expandGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(original["fleet_autoscaler_spec"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedFleetAutoscalerSpec); val.IsValid() && !isEmptyValue(val) { - transformed["fleetAutoscalerSpec"] = transformedFleetAutoscalerSpec - } - - transformedSelectors, err := expandGameServicesGameServerConfigScalingConfigsSelectors(original["selectors"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedSelectors); val.IsValid() && !isEmptyValue(val) { - transformed["selectors"] = transformedSelectors - } - - transformedSchedules, err := expandGameServicesGameServerConfigScalingConfigsSchedules(original["schedules"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedSchedules); val.IsValid() && !isEmptyValue(val) { - transformed["schedules"] = transformedSchedules - } - - req = append(req, transformed) - } - return req, nil -} - -func expandGameServicesGameServerConfigScalingConfigsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigScalingConfigsFleetAutoscalerSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigScalingConfigsSelectors(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandGameServicesGameServerConfigScalingConfigsSelectorsLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - req = append(req, transformed) - } - return req, nil -} - -func expandGameServicesGameServerConfigScalingConfigsSelectorsLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGameServicesGameServerConfigScalingConfigsSchedules(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedStartTime, err := expandGameServicesGameServerConfigScalingConfigsSchedulesStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandGameServicesGameServerConfigScalingConfigsSchedulesEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - transformedCronJobDuration, err := expandGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(original["cron_job_duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedCronJobDuration); val.IsValid() && !isEmptyValue(val) { - transformed["cronJobDuration"] = transformedCronJobDuration - } - - transformedCronSpec, err := expandGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(original["cron_spec"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_config_reflect.ValueOf(transformedCronSpec); val.IsValid() && !isEmptyValue(val) { - transformed["cronSpec"] = transformedCronSpec - } - - req = append(req, transformed) - } - return req, nil -} - -func expandGameServicesGameServerConfigScalingConfigsSchedulesStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigScalingConfigsSchedulesEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigScalingConfigsSchedulesCronJobDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerConfigScalingConfigsSchedulesCronSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceGameServicesGameServerDeployment() *resource_game_services_game_server_deployment_schema.Resource { - return &resource_game_services_game_server_deployment_schema.Resource{ - Create: resourceGameServicesGameServerDeploymentCreate, - Read: resourceGameServicesGameServerDeploymentRead, - Update: resourceGameServicesGameServerDeploymentUpdate, - Delete: resourceGameServicesGameServerDeploymentDelete, - - Importer: &resource_game_services_game_server_deployment_schema.ResourceImporter{ - State: resourceGameServicesGameServerDeploymentImport, - }, - - Timeouts: &resource_game_services_game_server_deployment_schema.ResourceTimeout{ - Create: resource_game_services_game_server_deployment_schema.DefaultTimeout(4 * resource_game_services_game_server_deployment_time.Minute), - Update: resource_game_services_game_server_deployment_schema.DefaultTimeout(4 * resource_game_services_game_server_deployment_time.Minute), - Delete: resource_game_services_game_server_deployment_schema.DefaultTimeout(4 * resource_game_services_game_server_deployment_time.Minute), - }, - - Schema: map[string]*resource_game_services_game_server_deployment_schema.Schema{ - "deployment_id": { - Type: resource_game_services_game_server_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A unique id for the deployment.`, - }, - "description": { - Type: resource_game_services_game_server_deployment_schema.TypeString, - Optional: true, - Description: `Human readable description of the game server deployment.`, - }, - "labels": { - Type: resource_game_services_game_server_deployment_schema.TypeMap, - Optional: true, - Description: `The labels associated with this game server deployment. Each label is a -key-value pair.`, - Elem: &resource_game_services_game_server_deployment_schema.Schema{Type: resource_game_services_game_server_deployment_schema.TypeString}, - }, - "location": { - Type: resource_game_services_game_server_deployment_schema.TypeString, - Optional: true, - Description: `Location of the Deployment.`, - Default: "global", - }, - "name": { - Type: resource_game_services_game_server_deployment_schema.TypeString, - Computed: true, - Description: `The resource id of the game server deployment, eg: - -'projects/{project_id}/locations/{location}/gameServerDeployments/{deployment_id}'. -For example, - -'projects/my-project/locations/{location}/gameServerDeployments/my-deployment'.`, - }, - "project": { - Type: resource_game_services_game_server_deployment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesGameServerDeploymentCreate(d *resource_game_services_game_server_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandGameServicesGameServerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_game_services_game_server_deployment_reflect.ValueOf(descriptionProp)) && (ok || !resource_game_services_game_server_deployment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandGameServicesGameServerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_game_services_game_server_deployment_reflect.ValueOf(labelsProp)) && (ok || !resource_game_services_game_server_deployment_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments?deploymentId={{deployment_id}}") - if err != nil { - return err - } - - resource_game_services_game_server_deployment_log.Printf("[DEBUG] Creating new GameServerDeployment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_deployment_schema.TimeoutCreate)) - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error creating GameServerDeployment: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = gameServicesOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating GameServerDeployment", userAgent, - d.Timeout(resource_game_services_game_server_deployment_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_game_services_game_server_deployment_fmt.Errorf("Error waiting to create GameServerDeployment: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerDeploymentName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_game_services_game_server_deployment_log.Printf("[DEBUG] Finished creating GameServerDeployment %q: %#v", d.Id(), res) - - return resourceGameServicesGameServerDeploymentRead(d, meta) -} - -func resourceGameServicesGameServerDeploymentRead(d *resource_game_services_game_server_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_game_services_game_server_deployment_fmt.Sprintf("GameServicesGameServerDeployment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerDeploymentName(res["name"], d, config)); err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - if err := d.Set("description", flattenGameServicesGameServerDeploymentDescription(res["description"], d, config)); err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - if err := d.Set("labels", flattenGameServicesGameServerDeploymentLabels(res["labels"], d, config)); err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error reading GameServerDeployment: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerDeploymentUpdate(d *resource_game_services_game_server_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandGameServicesGameServerDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_game_services_game_server_deployment_reflect.ValueOf(v)) && (ok || !resource_game_services_game_server_deployment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandGameServicesGameServerDeploymentLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_game_services_game_server_deployment_reflect.ValueOf(v)) && (ok || !resource_game_services_game_server_deployment_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return err - } - - resource_game_services_game_server_deployment_log.Printf("[DEBUG] Updating GameServerDeployment %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_game_services_game_server_deployment_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_deployment_schema.TimeoutUpdate)) - - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error updating GameServerDeployment %q: %s", d.Id(), err) - } else { - resource_game_services_game_server_deployment_log.Printf("[DEBUG] Finished updating GameServerDeployment %q: %#v", d.Id(), res) - } - - err = gameServicesOperationWaitTime( - config, res, project, "Updating GameServerDeployment", userAgent, - d.Timeout(resource_game_services_game_server_deployment_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesGameServerDeploymentRead(d, meta) -} - -func resourceGameServicesGameServerDeploymentDelete(d *resource_game_services_game_server_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_deployment_fmt.Errorf("Error fetching project for GameServerDeployment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_game_services_game_server_deployment_log.Printf("[DEBUG] Deleting GameServerDeployment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_deployment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GameServerDeployment") - } - - err = gameServicesOperationWaitTime( - config, res, project, "Deleting GameServerDeployment", userAgent, - d.Timeout(resource_game_services_game_server_deployment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_game_services_game_server_deployment_log.Printf("[DEBUG] Finished deleting GameServerDeployment %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesGameServerDeploymentImport(d *resource_game_services_game_server_deployment_schema.ResourceData, meta interface{}) ([]*resource_game_services_game_server_deployment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/gameServerDeployments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/gameServerDeployments/{{deployment_id}}") - if err != nil { - return nil, resource_game_services_game_server_deployment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_game_services_game_server_deployment_schema.ResourceData{d}, nil -} - -func flattenGameServicesGameServerDeploymentName(v interface{}, d *resource_game_services_game_server_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentDescription(v interface{}, d *resource_game_services_game_server_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentLabels(v interface{}, d *resource_game_services_game_server_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesGameServerDeploymentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerDeploymentLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceGameServicesGameServerDeploymentRollout() *resource_game_services_game_server_deployment_rollout_schema.Resource { - return &resource_game_services_game_server_deployment_rollout_schema.Resource{ - Create: resourceGameServicesGameServerDeploymentRolloutCreate, - Read: resourceGameServicesGameServerDeploymentRolloutRead, - Update: resourceGameServicesGameServerDeploymentRolloutUpdate, - Delete: resourceGameServicesGameServerDeploymentRolloutDelete, - - Importer: &resource_game_services_game_server_deployment_rollout_schema.ResourceImporter{ - State: resourceGameServicesGameServerDeploymentRolloutImport, - }, - - Timeouts: &resource_game_services_game_server_deployment_rollout_schema.ResourceTimeout{ - Create: resource_game_services_game_server_deployment_rollout_schema.DefaultTimeout(4 * resource_game_services_game_server_deployment_rollout_time.Minute), - Update: resource_game_services_game_server_deployment_rollout_schema.DefaultTimeout(4 * resource_game_services_game_server_deployment_rollout_time.Minute), - Delete: resource_game_services_game_server_deployment_rollout_schema.DefaultTimeout(4 * resource_game_services_game_server_deployment_rollout_time.Minute), - }, - - Schema: map[string]*resource_game_services_game_server_deployment_rollout_schema.Schema{ - "default_game_server_config": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeString, - Required: true, - Description: `This field points to the game server config that is -applied by default to all realms and clusters. For example, - -'projects/my-project/locations/global/gameServerDeployments/my-game/configs/my-config'.`, - }, - "deployment_id": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The deployment to rollout the new config to. Only 1 rollout must be associated with each deployment.`, - }, - "game_server_config_overrides": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeList, - Optional: true, - Description: `The game_server_config_overrides contains the per game server config -overrides. The overrides are processed in the order they are listed. As -soon as a match is found for a cluster, the rest of the list is not -processed.`, - Elem: &resource_game_services_game_server_deployment_rollout_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_deployment_rollout_schema.Schema{ - "config_version": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeString, - Optional: true, - Description: `Version of the configuration.`, - }, - "realms_selector": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeList, - Optional: true, - Description: `Selection by realms.`, - MaxItems: 1, - Elem: &resource_game_services_game_server_deployment_rollout_schema.Resource{ - Schema: map[string]*resource_game_services_game_server_deployment_rollout_schema.Schema{ - "realms": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeList, - Optional: true, - Description: `List of realms to match against.`, - Elem: &resource_game_services_game_server_deployment_rollout_schema.Schema{ - Type: resource_game_services_game_server_deployment_rollout_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeString, - Computed: true, - Description: `The resource id of the game server deployment - -eg: 'projects/my-project/locations/global/gameServerDeployments/my-deployment/rollout'.`, - }, - "project": { - Type: resource_game_services_game_server_deployment_rollout_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesGameServerDeploymentRolloutCreate(d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_game_services_game_server_deployment_rollout_log.Printf("[DEBUG] Creating GameServerDeploymentRollout %q: ", d.Id()) - - err = resourceGameServicesGameServerDeploymentRolloutUpdate(d, meta) - if err != nil { - d.SetId("") - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error trying to create GameServerDeploymentRollout: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerDeploymentRolloutRead(d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_game_services_game_server_deployment_rollout_fmt.Sprintf("GameServicesGameServerDeploymentRollout %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - - if err := d.Set("name", flattenGameServicesGameServerDeploymentRolloutName(res["name"], d, config)); err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - if err := d.Set("default_game_server_config", flattenGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(res["defaultGameServerConfig"], d, config)); err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - if err := d.Set("game_server_config_overrides", flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(res["gameServerConfigOverrides"], d, config)); err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error reading GameServerDeploymentRollout: %s", err) - } - - return nil -} - -func resourceGameServicesGameServerDeploymentRolloutUpdate(d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - defaultGameServerConfigProp, err := expandGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(d.Get("default_game_server_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_game_server_config"); !isEmptyValue(resource_game_services_game_server_deployment_rollout_reflect.ValueOf(v)) && (ok || !resource_game_services_game_server_deployment_rollout_reflect.DeepEqual(v, defaultGameServerConfigProp)) { - obj["defaultGameServerConfig"] = defaultGameServerConfigProp - } - gameServerConfigOverridesProp, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(d.Get("game_server_config_overrides"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("game_server_config_overrides"); !isEmptyValue(resource_game_services_game_server_deployment_rollout_reflect.ValueOf(v)) && (ok || !resource_game_services_game_server_deployment_rollout_reflect.DeepEqual(v, gameServerConfigOverridesProp)) { - obj["gameServerConfigOverrides"] = gameServerConfigOverridesProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return err - } - - resource_game_services_game_server_deployment_rollout_log.Printf("[DEBUG] Updating GameServerDeploymentRollout %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("default_game_server_config") { - updateMask = append(updateMask, "defaultGameServerConfig") - } - - if d.HasChange("game_server_config_overrides") { - updateMask = append(updateMask, "gameServerConfigOverrides") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_game_services_game_server_deployment_rollout_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_deployment_rollout_schema.TimeoutUpdate)) - - if err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error updating GameServerDeploymentRollout %q: %s", d.Id(), err) - } else { - resource_game_services_game_server_deployment_rollout_log.Printf("[DEBUG] Finished updating GameServerDeploymentRollout %q: %#v", d.Id(), res) - } - - err = gameServicesOperationWaitTime( - config, res, project, "Updating GameServerDeploymentRollout", userAgent, - d.Timeout(resource_game_services_game_server_deployment_rollout_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesGameServerDeploymentRolloutRead(d, meta) -} - -func resourceGameServicesGameServerDeploymentRolloutDelete(d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error fetching project for GameServerDeploymentRollout: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout?updateMask=defaultGameServerConfig") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_game_services_game_server_deployment_rollout_log.Printf("[DEBUG] Deleting GameServerDeploymentRollout %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_game_server_deployment_rollout_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "GameServerDeploymentRollout") - } - - err = gameServicesOperationWaitTime( - config, res, project, "Deleting GameServerDeploymentRollout", userAgent, - d.Timeout(resource_game_services_game_server_deployment_rollout_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_game_services_game_server_deployment_rollout_log.Printf("[DEBUG] Finished deleting GameServerDeploymentRollout %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesGameServerDeploymentRolloutImport(d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, meta interface{}) ([]*resource_game_services_game_server_deployment_rollout_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/gameServerDeployments/(?P[^/]+)/rollout", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/gameServerDeployments/{{deployment_id}}/rollout") - if err != nil { - return nil, resource_game_services_game_server_deployment_rollout_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_game_services_game_server_deployment_rollout_schema.ResourceData{d}, nil -} - -func flattenGameServicesGameServerDeploymentRolloutName(v interface{}, d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(v interface{}, d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(v interface{}, d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "realms_selector": flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(original["realmsSelector"], d, config), - "config_version": flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(original["configVersion"], d, config), - }) - } - return transformed -} - -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(v interface{}, d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["realms"] = - flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(original["realms"], d, config) - return []interface{}{transformed} -} - -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(v interface{}, d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(v interface{}, d *resource_game_services_game_server_deployment_rollout_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesGameServerDeploymentRolloutDefaultGameServerConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverrides(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRealmsSelector, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(original["realms_selector"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_deployment_rollout_reflect.ValueOf(transformedRealmsSelector); val.IsValid() && !isEmptyValue(val) { - transformed["realmsSelector"] = transformedRealmsSelector - } - - transformedConfigVersion, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(original["config_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_deployment_rollout_reflect.ValueOf(transformedConfigVersion); val.IsValid() && !isEmptyValue(val) { - transformed["configVersion"] = transformedConfigVersion - } - - req = append(req, transformed) - } - return req, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRealms, err := expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(original["realms"], d, config) - if err != nil { - return nil, err - } else if val := resource_game_services_game_server_deployment_rollout_reflect.ValueOf(transformedRealms); val.IsValid() && !isEmptyValue(val) { - transformed["realms"] = transformedRealms - } - - return transformed, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesRealmsSelectorRealms(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesGameServerDeploymentRolloutGameServerConfigOverridesConfigVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceGameServicesRealm() *resource_game_services_realm_schema.Resource { - return &resource_game_services_realm_schema.Resource{ - Create: resourceGameServicesRealmCreate, - Read: resourceGameServicesRealmRead, - Update: resourceGameServicesRealmUpdate, - Delete: resourceGameServicesRealmDelete, - - Importer: &resource_game_services_realm_schema.ResourceImporter{ - State: resourceGameServicesRealmImport, - }, - - Timeouts: &resource_game_services_realm_schema.ResourceTimeout{ - Create: resource_game_services_realm_schema.DefaultTimeout(4 * resource_game_services_realm_time.Minute), - Update: resource_game_services_realm_schema.DefaultTimeout(4 * resource_game_services_realm_time.Minute), - Delete: resource_game_services_realm_schema.DefaultTimeout(4 * resource_game_services_realm_time.Minute), - }, - - Schema: map[string]*resource_game_services_realm_schema.Schema{ - "realm_id": { - Type: resource_game_services_realm_schema.TypeString, - Required: true, - ForceNew: true, - Description: `GCP region of the Realm.`, - }, - "time_zone": { - Type: resource_game_services_realm_schema.TypeString, - Required: true, - Description: `Required. Time zone where all realm-specific policies are evaluated. The value of -this field must be from the IANA time zone database: -https://www.iana.org/time-zones.`, - }, - "description": { - Type: resource_game_services_realm_schema.TypeString, - Optional: true, - Description: `Human readable description of the realm.`, - }, - "labels": { - Type: resource_game_services_realm_schema.TypeMap, - Optional: true, - Description: `The labels associated with this realm. Each label is a key-value pair.`, - Elem: &resource_game_services_realm_schema.Schema{Type: resource_game_services_realm_schema.TypeString}, - }, - "location": { - Type: resource_game_services_realm_schema.TypeString, - Optional: true, - Description: `Location of the Realm.`, - Default: "global", - }, - "etag": { - Type: resource_game_services_realm_schema.TypeString, - Computed: true, - Description: `ETag of the resource.`, - }, - "name": { - Type: resource_game_services_realm_schema.TypeString, - Computed: true, - Description: `The resource id of the realm, of the form: -'projects/{project_id}/locations/{location}/realms/{realm_id}'. For -example, 'projects/my-project/locations/{location}/realms/my-realm'.`, - }, - "project": { - Type: resource_game_services_realm_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGameServicesRealmCreate(d *resource_game_services_realm_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesRealmLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_game_services_realm_reflect.ValueOf(labelsProp)) && (ok || !resource_game_services_realm_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - timeZoneProp, err := expandGameServicesRealmTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_game_services_realm_reflect.ValueOf(timeZoneProp)) && (ok || !resource_game_services_realm_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandGameServicesRealmDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_game_services_realm_reflect.ValueOf(descriptionProp)) && (ok || !resource_game_services_realm_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms?realmId={{realm_id}}") - if err != nil { - return err - } - - resource_game_services_realm_log.Printf("[DEBUG] Creating new Realm: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_realm_schema.TimeoutCreate)) - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error creating Realm: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = gameServicesOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Realm", userAgent, - d.Timeout(resource_game_services_realm_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_game_services_realm_fmt.Errorf("Error waiting to create Realm: %s", err) - } - - if err := d.Set("name", flattenGameServicesRealmName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_game_services_realm_log.Printf("[DEBUG] Finished creating Realm %q: %#v", d.Id(), res) - - return resourceGameServicesRealmRead(d, meta) -} - -func resourceGameServicesRealmRead(d *resource_game_services_realm_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_game_services_realm_fmt.Sprintf("GameServicesRealm %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_game_services_realm_fmt.Errorf("Error reading Realm: %s", err) - } - - if err := d.Set("name", flattenGameServicesRealmName(res["name"], d, config)); err != nil { - return resource_game_services_realm_fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("labels", flattenGameServicesRealmLabels(res["labels"], d, config)); err != nil { - return resource_game_services_realm_fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("time_zone", flattenGameServicesRealmTimeZone(res["timeZone"], d, config)); err != nil { - return resource_game_services_realm_fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("etag", flattenGameServicesRealmEtag(res["etag"], d, config)); err != nil { - return resource_game_services_realm_fmt.Errorf("Error reading Realm: %s", err) - } - if err := d.Set("description", flattenGameServicesRealmDescription(res["description"], d, config)); err != nil { - return resource_game_services_realm_fmt.Errorf("Error reading Realm: %s", err) - } - - return nil -} - -func resourceGameServicesRealmUpdate(d *resource_game_services_realm_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandGameServicesRealmLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_game_services_realm_reflect.ValueOf(v)) && (ok || !resource_game_services_realm_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - timeZoneProp, err := expandGameServicesRealmTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_game_services_realm_reflect.ValueOf(v)) && (ok || !resource_game_services_realm_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - descriptionProp, err := expandGameServicesRealmDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_game_services_realm_reflect.ValueOf(v)) && (ok || !resource_game_services_realm_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return err - } - - resource_game_services_realm_log.Printf("[DEBUG] Updating Realm %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("time_zone") { - updateMask = append(updateMask, "timeZone") - } - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_game_services_realm_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_realm_schema.TimeoutUpdate)) - - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error updating Realm %q: %s", d.Id(), err) - } else { - resource_game_services_realm_log.Printf("[DEBUG] Finished updating Realm %q: %#v", d.Id(), res) - } - - err = gameServicesOperationWaitTime( - config, res, project, "Updating Realm", userAgent, - d.Timeout(resource_game_services_realm_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGameServicesRealmRead(d, meta) -} - -func resourceGameServicesRealmDelete(d *resource_game_services_realm_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_game_services_realm_fmt.Errorf("Error fetching project for Realm: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GameServicesBasePath}}projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_game_services_realm_log.Printf("[DEBUG] Deleting Realm %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_game_services_realm_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Realm") - } - - err = gameServicesOperationWaitTime( - config, res, project, "Deleting Realm", userAgent, - d.Timeout(resource_game_services_realm_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_game_services_realm_log.Printf("[DEBUG] Finished deleting Realm %q: %#v", d.Id(), res) - return nil -} - -func resourceGameServicesRealmImport(d *resource_game_services_realm_schema.ResourceData, meta interface{}) ([]*resource_game_services_realm_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/realms/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/realms/{{realm_id}}") - if err != nil { - return nil, resource_game_services_realm_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_game_services_realm_schema.ResourceData{d}, nil -} - -func flattenGameServicesRealmName(v interface{}, d *resource_game_services_realm_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmLabels(v interface{}, d *resource_game_services_realm_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmTimeZone(v interface{}, d *resource_game_services_realm_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmEtag(v interface{}, d *resource_game_services_realm_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGameServicesRealmDescription(v interface{}, d *resource_game_services_realm_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGameServicesRealmLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGameServicesRealmTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandGameServicesRealmDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func suppressGkeHubEndpointSelfLinkDiff(_, old, new string, _ *resource_gke_hub_membership_schema.ResourceData) bool { - - selfLink := resource_gke_hub_membership_strings.TrimPrefix(old, "//container.googleapis.com/") - if selfLink == new { - return true - } - - return false -} - -func resourceGKEHubMembership() *resource_gke_hub_membership_schema.Resource { - return &resource_gke_hub_membership_schema.Resource{ - Create: resourceGKEHubMembershipCreate, - Read: resourceGKEHubMembershipRead, - Update: resourceGKEHubMembershipUpdate, - Delete: resourceGKEHubMembershipDelete, - - Importer: &resource_gke_hub_membership_schema.ResourceImporter{ - State: resourceGKEHubMembershipImport, - }, - - Timeouts: &resource_gke_hub_membership_schema.ResourceTimeout{ - Create: resource_gke_hub_membership_schema.DefaultTimeout(4 * resource_gke_hub_membership_time.Minute), - Update: resource_gke_hub_membership_schema.DefaultTimeout(4 * resource_gke_hub_membership_time.Minute), - Delete: resource_gke_hub_membership_schema.DefaultTimeout(4 * resource_gke_hub_membership_time.Minute), - }, - - Schema: map[string]*resource_gke_hub_membership_schema.Schema{ - "membership_id": { - Type: resource_gke_hub_membership_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The client-provided identifier of the membership.`, - }, - "authority": { - Type: resource_gke_hub_membership_schema.TypeList, - Optional: true, - Description: `Authority encodes how Google will recognize identities from this Membership. -See the workload identity documentation for more details: -https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity`, - MaxItems: 1, - Elem: &resource_gke_hub_membership_schema.Resource{ - Schema: map[string]*resource_gke_hub_membership_schema.Schema{ - "issuer": { - Type: resource_gke_hub_membership_schema.TypeString, - Required: true, - Description: `A JSON Web Token (JWT) issuer URI. 'issuer' must start with 'https://' and // be a valid -with length <2000 characters. For example: 'https://container.googleapis.com/v1/projects/my-project/locations/us-west1/clusters/my-cluster' (must be 'locations' rather than 'zones'). If the cluster is provisioned with Terraform, this is '"https://container.googleapis.com/v1/${google_container_cluster.my-cluster.id}"'.`, - }, - }, - }, - }, - "endpoint": { - Type: resource_gke_hub_membership_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, - MaxItems: 1, - Elem: &resource_gke_hub_membership_schema.Resource{ - Schema: map[string]*resource_gke_hub_membership_schema.Schema{ - "gke_cluster": { - Type: resource_gke_hub_membership_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `If this Membership is a Kubernetes API server hosted on GKE, this is a self link to its GCP resource.`, - MaxItems: 1, - Elem: &resource_gke_hub_membership_schema.Resource{ - Schema: map[string]*resource_gke_hub_membership_schema.Schema{ - "resource_link": { - Type: resource_gke_hub_membership_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: suppressGkeHubEndpointSelfLinkDiff, - Description: `Self-link of the GCP resource for the GKE cluster. -For example: '//container.googleapis.com/projects/my-project/zones/us-west1-a/clusters/my-cluster'. -It can be at the most 1000 characters in length. If the cluster is provisioned with Terraform, -this can be '"//container.googleapis.com/${google_container_cluster.my-cluster.id}"' or -'google_container_cluster.my-cluster.id'.`, - }, - }, - }, - }, - }, - }, - }, - "labels": { - Type: resource_gke_hub_membership_schema.TypeMap, - Optional: true, - Description: `Labels to apply to this membership.`, - Elem: &resource_gke_hub_membership_schema.Schema{Type: resource_gke_hub_membership_schema.TypeString}, - }, - "name": { - Type: resource_gke_hub_membership_schema.TypeString, - Computed: true, - Description: `The unique identifier of the membership.`, - }, - "project": { - Type: resource_gke_hub_membership_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGKEHubMembershipCreate(d *resource_gke_hub_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandGKEHubMembershipLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_gke_hub_membership_reflect.ValueOf(labelsProp)) && (ok || !resource_gke_hub_membership_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - endpointProp, err := expandGKEHubMembershipEndpoint(d.Get("endpoint"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("endpoint"); !isEmptyValue(resource_gke_hub_membership_reflect.ValueOf(endpointProp)) && (ok || !resource_gke_hub_membership_reflect.DeepEqual(v, endpointProp)) { - obj["endpoint"] = endpointProp - } - authorityProp, err := expandGKEHubMembershipAuthority(d.Get("authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authority"); !isEmptyValue(resource_gke_hub_membership_reflect.ValueOf(authorityProp)) && (ok || !resource_gke_hub_membership_reflect.DeepEqual(v, authorityProp)) { - obj["authority"] = authorityProp - } - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships?membershipId={{membership_id}}") - if err != nil { - return err - } - - resource_gke_hub_membership_log.Printf("[DEBUG] Creating new Membership: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_gke_hub_membership_schema.TimeoutCreate)) - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error creating Membership: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = gKEHubOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Membership", userAgent, - d.Timeout(resource_gke_hub_membership_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_gke_hub_membership_fmt.Errorf("Error waiting to create Membership: %s", err) - } - - if err := d.Set("name", flattenGKEHubMembershipName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_gke_hub_membership_log.Printf("[DEBUG] Finished creating Membership %q: %#v", d.Id(), res) - - return resourceGKEHubMembershipRead(d, meta) -} - -func resourceGKEHubMembershipRead(d *resource_gke_hub_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_gke_hub_membership_fmt.Sprintf("GKEHubMembership %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error reading Membership: %s", err) - } - - if err := d.Set("name", flattenGKEHubMembershipName(res["name"], d, config)); err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error reading Membership: %s", err) - } - if err := d.Set("labels", flattenGKEHubMembershipLabels(res["labels"], d, config)); err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error reading Membership: %s", err) - } - if err := d.Set("endpoint", flattenGKEHubMembershipEndpoint(res["endpoint"], d, config)); err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error reading Membership: %s", err) - } - if err := d.Set("authority", flattenGKEHubMembershipAuthority(res["authority"], d, config)); err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error reading Membership: %s", err) - } - - return nil -} - -func resourceGKEHubMembershipUpdate(d *resource_gke_hub_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandGKEHubMembershipLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_gke_hub_membership_reflect.ValueOf(v)) && (ok || !resource_gke_hub_membership_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - authorityProp, err := expandGKEHubMembershipAuthority(d.Get("authority"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authority"); !isEmptyValue(resource_gke_hub_membership_reflect.ValueOf(v)) && (ok || !resource_gke_hub_membership_reflect.DeepEqual(v, authorityProp)) { - obj["authority"] = authorityProp - } - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}projects/{{project}}/locations/global/memberships/{{membership_id}}") - if err != nil { - return err - } - - resource_gke_hub_membership_log.Printf("[DEBUG] Updating Membership %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("authority") { - updateMask = append(updateMask, "authority") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_gke_hub_membership_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_gke_hub_membership_schema.TimeoutUpdate)) - - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error updating Membership %q: %s", d.Id(), err) - } else { - resource_gke_hub_membership_log.Printf("[DEBUG] Finished updating Membership %q: %#v", d.Id(), res) - } - - err = gKEHubOperationWaitTime( - config, res, project, "Updating Membership", userAgent, - d.Timeout(resource_gke_hub_membership_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceGKEHubMembershipRead(d, meta) -} - -func resourceGKEHubMembershipDelete(d *resource_gke_hub_membership_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_gke_hub_membership_fmt.Errorf("Error fetching project for Membership: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{GKEHubBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_gke_hub_membership_log.Printf("[DEBUG] Deleting Membership %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_gke_hub_membership_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Membership") - } - - err = gKEHubOperationWaitTime( - config, res, project, "Deleting Membership", userAgent, - d.Timeout(resource_gke_hub_membership_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_gke_hub_membership_log.Printf("[DEBUG] Finished deleting Membership %q: %#v", d.Id(), res) - return nil -} - -func resourceGKEHubMembershipImport(d *resource_gke_hub_membership_schema.ResourceData, meta interface{}) ([]*resource_gke_hub_membership_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, resource_gke_hub_membership_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_gke_hub_membership_schema.ResourceData{d}, nil -} - -func flattenGKEHubMembershipName(v interface{}, d *resource_gke_hub_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEHubMembershipLabels(v interface{}, d *resource_gke_hub_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEHubMembershipEndpoint(v interface{}, d *resource_gke_hub_membership_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["gke_cluster"] = - flattenGKEHubMembershipEndpointGkeCluster(original["gkeCluster"], d, config) - return []interface{}{transformed} -} - -func flattenGKEHubMembershipEndpointGkeCluster(v interface{}, d *resource_gke_hub_membership_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resource_link"] = - flattenGKEHubMembershipEndpointGkeClusterResourceLink(original["resourceLink"], d, config) - return []interface{}{transformed} -} - -func flattenGKEHubMembershipEndpointGkeClusterResourceLink(v interface{}, d *resource_gke_hub_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenGKEHubMembershipAuthority(v interface{}, d *resource_gke_hub_membership_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["issuer"] = - flattenGKEHubMembershipAuthorityIssuer(original["issuer"], d, config) - return []interface{}{transformed} -} - -func flattenGKEHubMembershipAuthorityIssuer(v interface{}, d *resource_gke_hub_membership_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandGKEHubMembershipLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandGKEHubMembershipEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGkeCluster, err := expandGKEHubMembershipEndpointGkeCluster(original["gke_cluster"], d, config) - if err != nil { - return nil, err - } else if val := resource_gke_hub_membership_reflect.ValueOf(transformedGkeCluster); val.IsValid() && !isEmptyValue(val) { - transformed["gkeCluster"] = transformedGkeCluster - } - - return transformed, nil -} - -func expandGKEHubMembershipEndpointGkeCluster(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceLink, err := expandGKEHubMembershipEndpointGkeClusterResourceLink(original["resource_link"], d, config) - if err != nil { - return nil, err - } else if val := resource_gke_hub_membership_reflect.ValueOf(transformedResourceLink); val.IsValid() && !isEmptyValue(val) { - transformed["resourceLink"] = transformedResourceLink - } - - return transformed, nil -} - -func expandGKEHubMembershipEndpointGkeClusterResourceLink(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if resource_gke_hub_membership_strings.HasPrefix(v.(string), "//") { - return v, nil - } else { - v = "//container.googleapis.com/" + v.(string) - return v, nil - } -} - -func expandGKEHubMembershipAuthority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIssuer, err := expandGKEHubMembershipAuthorityIssuer(original["issuer"], d, config) - if err != nil { - return nil, err - } else if val := resource_gke_hub_membership_reflect.ValueOf(transformedIssuer); val.IsValid() && !isEmptyValue(val) { - transformed["issuer"] = transformedIssuer - } - - return transformed, nil -} - -func expandGKEHubMembershipAuthorityIssuer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceBillingSubaccount() *resource_google_billing_subaccount_schema.Resource { - return &resource_google_billing_subaccount_schema.Resource{ - Create: resourceBillingSubaccountCreate, - Read: resourceBillingSubaccountRead, - Delete: resourceBillingSubaccountDelete, - Update: resourceBillingSubaccountUpdate, - - Importer: &resource_google_billing_subaccount_schema.ResourceImporter{ - State: resource_google_billing_subaccount_schema.ImportStatePassthrough, - }, - - Schema: map[string]*resource_google_billing_subaccount_schema.Schema{ - "display_name": { - Type: resource_google_billing_subaccount_schema.TypeString, - Required: true, - }, - "master_billing_account": { - Type: resource_google_billing_subaccount_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - }, - "deletion_policy": { - Type: resource_google_billing_subaccount_schema.TypeString, - Optional: true, - Default: "", - ValidateFunc: resource_google_billing_subaccount_validation.StringInSlice([]string{"RENAME_ON_DESTROY", ""}, false), - }, - "billing_account_id": { - Type: resource_google_billing_subaccount_schema.TypeString, - Computed: true, - }, - "name": { - Type: resource_google_billing_subaccount_schema.TypeString, - Computed: true, - }, - "open": { - Type: resource_google_billing_subaccount_schema.TypeBool, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceBillingSubaccountCreate(d *resource_google_billing_subaccount_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - displayName := d.Get("display_name").(string) - masterBillingAccount := d.Get("master_billing_account").(string) - - billingAccount := &resource_google_billing_subaccount_cloudbilling.BillingAccount{ - DisplayName: displayName, - MasterBillingAccount: canonicalBillingAccountName(masterBillingAccount), - } - - res, err := config.NewBillingClient(userAgent).BillingAccounts.Create(billingAccount).Do() - if err != nil { - return resource_google_billing_subaccount_fmt.Errorf("Error creating billing subaccount '%s' in master account '%s': %s", displayName, masterBillingAccount, err) - } - - d.SetId(res.Name) - - return resourceBillingSubaccountRead(d, meta) -} - -func resourceBillingSubaccountRead(d *resource_google_billing_subaccount_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - id := d.Id() - - billingAccount, err := config.NewBillingClient(userAgent).BillingAccounts.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_google_billing_subaccount_fmt.Sprintf("Billing Subaccount Not Found : %s", id)) - } - - if err := d.Set("name", billingAccount.Name); err != nil { - return resource_google_billing_subaccount_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", billingAccount.DisplayName); err != nil { - return resource_google_billing_subaccount_fmt.Errorf("Error setting display_na,e: %s", err) - } - if err := d.Set("open", billingAccount.Open); err != nil { - return resource_google_billing_subaccount_fmt.Errorf("Error setting open: %s", err) - } - if err := d.Set("master_billing_account", billingAccount.MasterBillingAccount); err != nil { - return resource_google_billing_subaccount_fmt.Errorf("Error setting master_billing_account: %s", err) - } - if err := d.Set("billing_account_id", resource_google_billing_subaccount_strings.TrimPrefix(d.Get("name").(string), "billingAccounts/")); err != nil { - return resource_google_billing_subaccount_fmt.Errorf("Error setting billing_account_id: %s", err) - } - - return nil -} - -func resourceBillingSubaccountUpdate(d *resource_google_billing_subaccount_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - if ok := d.HasChange("display_name"); ok { - billingAccount := &resource_google_billing_subaccount_cloudbilling.BillingAccount{ - DisplayName: d.Get("display_name").(string), - } - _, err := config.NewBillingClient(userAgent).BillingAccounts.Patch(d.Id(), billingAccount).UpdateMask("display_name").Do() - if err != nil { - return handleNotFoundError(err, d, resource_google_billing_subaccount_fmt.Sprintf("Error updating billing account : %s", d.Id())) - } - } - return resourceBillingSubaccountRead(d, meta) -} - -func resourceBillingSubaccountDelete(d *resource_google_billing_subaccount_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - deletionPolicy := d.Get("deletion_policy").(string) - - if deletionPolicy == "RENAME_ON_DESTROY" { - t := resource_google_billing_subaccount_time.Now() - billingAccount := &resource_google_billing_subaccount_cloudbilling.BillingAccount{ - DisplayName: "Terraform Destroyed " + t.Format("20060102150405"), - } - _, err := config.NewBillingClient(userAgent).BillingAccounts.Patch(d.Id(), billingAccount).UpdateMask("display_name").Do() - if err != nil { - return handleNotFoundError(err, d, resource_google_billing_subaccount_fmt.Sprintf("Error updating billing account : %s", d.Id())) - } - } - - d.SetId("") - - return nil -} - -func resourceGoogleFolder() *resource_google_folder_schema.Resource { - return &resource_google_folder_schema.Resource{ - Create: resourceGoogleFolderCreate, - Read: resourceGoogleFolderRead, - Update: resourceGoogleFolderUpdate, - Delete: resourceGoogleFolderDelete, - - Importer: &resource_google_folder_schema.ResourceImporter{ - State: resourceGoogleFolderImportState, - }, - - Timeouts: &resource_google_folder_schema.ResourceTimeout{ - Create: resource_google_folder_schema.DefaultTimeout(4 * resource_google_folder_time.Minute), - Update: resource_google_folder_schema.DefaultTimeout(4 * resource_google_folder_time.Minute), - Read: resource_google_folder_schema.DefaultTimeout(4 * resource_google_folder_time.Minute), - Delete: resource_google_folder_schema.DefaultTimeout(4 * resource_google_folder_time.Minute), - }, - - Schema: map[string]*resource_google_folder_schema.Schema{ - - "parent": { - Type: resource_google_folder_schema.TypeString, - Required: true, - Description: `The resource name of the parent Folder or Organization. Must be of the form folders/{folder_id} or organizations/{org_id}.`, - }, - - "display_name": { - Type: resource_google_folder_schema.TypeString, - Required: true, - Description: `The folder's display name. A folder's display name must be unique amongst its siblings, e.g. no two folders with the same parent can share the same display name. The display name must start and end with a letter or digit, may contain letters, digits, spaces, hyphens and underscores and can be no longer than 30 characters.`, - }, - "folder_id": { - Type: resource_google_folder_schema.TypeString, - Computed: true, - Description: `The folder id from the name "folders/{folder_id}"`, - }, - - "name": { - Type: resource_google_folder_schema.TypeString, - Computed: true, - Description: `The resource name of the Folder. Its format is folders/{folder_id}.`, - }, - "lifecycle_state": { - Type: resource_google_folder_schema.TypeString, - Computed: true, - Description: `The lifecycle state of the folder such as ACTIVE or DELETE_REQUESTED.`, - }, - "create_time": { - Type: resource_google_folder_schema.TypeString, - Computed: true, - Description: `Timestamp when the Folder was created. Assigned by the server. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleFolderCreate(d *resource_google_folder_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - displayName := d.Get("display_name").(string) - parent := d.Get("parent").(string) - - var op *resource_google_folder_cloudresourcemanagerresourceManagerV2.Operation - err = retryTimeDuration(func() error { - var reqErr error - op, reqErr = config.NewResourceManagerV2Client(userAgent).Folders.Create(&resource_google_folder_cloudresourcemanagerresourceManagerV2.Folder{ - DisplayName: displayName, - }).Parent(parent).Do() - return reqErr - }, d.Timeout(resource_google_folder_schema.TimeoutCreate)) - if err != nil { - return resource_google_folder_fmt.Errorf("Error creating folder '%s' in '%s': %s", displayName, parent, err) - } - - opAsMap, err := ConvertToMap(op) - if err != nil { - return err - } - - err = resourceManagerOperationWaitTime(config, opAsMap, "creating folder", userAgent, d.Timeout(resource_google_folder_schema.TimeoutCreate)) - if err != nil { - return resource_google_folder_fmt.Errorf("Error creating folder '%s' in '%s': %s", displayName, parent, err) - } - - waitOp, err := config.NewResourceManagerClient(userAgent).Operations.Get(op.Name).Do() - if err != nil { - return resource_google_folder_fmt.Errorf("The folder '%s' has been created but we could not retrieve its id. Delete the folder manually and retry or use 'terraform import': %s", displayName, err) - } - - var responseMap map[string]interface{} - if err := resource_google_folder_json.Unmarshal(waitOp.Response, &responseMap); err == nil { - if val, ok := responseMap["name"]; ok { - if name, ok := val.(string); ok { - d.SetId(name) - return resourceGoogleFolderRead(d, meta) - } - } - } - return resource_google_folder_fmt.Errorf("The folder '%s' has been created but we could not retrieve its id. Delete the folder manually and retry or use 'terraform import'", displayName) -} - -func resourceGoogleFolderRead(d *resource_google_folder_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - folder, err := getGoogleFolder(d.Id(), userAgent, d, config) - if err != nil { - return handleNotFoundError(err, d, resource_google_folder_fmt.Sprintf("Folder Not Found : %s", d.Id())) - } - - if err := d.Set("name", folder.Name); err != nil { - return resource_google_folder_fmt.Errorf("Error setting name: %s", err) - } - folderId := resource_google_folder_strings.TrimPrefix(folder.Name, "folders/") - if err := d.Set("folder_id", folderId); err != nil { - return resource_google_folder_fmt.Errorf("Error setting folder_id: %s", err) - } - if err := d.Set("parent", folder.Parent); err != nil { - return resource_google_folder_fmt.Errorf("Error setting parent: %s", err) - } - if err := d.Set("display_name", folder.DisplayName); err != nil { - return resource_google_folder_fmt.Errorf("Error setting display_name: %s", err) - } - if err := d.Set("lifecycle_state", folder.LifecycleState); err != nil { - return resource_google_folder_fmt.Errorf("Error setting lifecycle_state: %s", err) - } - if err := d.Set("create_time", folder.CreateTime); err != nil { - return resource_google_folder_fmt.Errorf("Error setting create_time: %s", err) - } - - return nil -} - -func resourceGoogleFolderUpdate(d *resource_google_folder_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - displayName := d.Get("display_name").(string) - - d.Partial(true) - if d.HasChange("display_name") { - err := retry(func() error { - _, reqErr := config.NewResourceManagerV2Client(userAgent).Folders.Patch(d.Id(), &resource_google_folder_cloudresourcemanagerresourceManagerV2.Folder{ - DisplayName: displayName, - }).Do() - return reqErr - }) - if err != nil { - return resource_google_folder_fmt.Errorf("Error updating display_name to '%s': %s", displayName, err) - } - } - - if d.HasChange("parent") { - newParent := d.Get("parent").(string) - - var op *resource_google_folder_cloudresourcemanagerresourceManagerV2.Operation - err := retry(func() error { - var reqErr error - op, reqErr = config.NewResourceManagerV2Client(userAgent).Folders.Move(d.Id(), &resource_google_folder_cloudresourcemanagerresourceManagerV2.MoveFolderRequest{ - DestinationParent: newParent, - }).Do() - return reqErr - }) - if err != nil { - return resource_google_folder_fmt.Errorf("Error moving folder '%s' to '%s': %s", displayName, newParent, err) - } - - opAsMap, err := ConvertToMap(op) - if err != nil { - return err - } - - err = resourceManagerOperationWaitTime(config, opAsMap, "move folder", userAgent, d.Timeout(resource_google_folder_schema.TimeoutUpdate)) - if err != nil { - return resource_google_folder_fmt.Errorf("Error moving folder '%s' to '%s': %s", displayName, newParent, err) - } - } - - d.Partial(false) - - return nil -} - -func resourceGoogleFolderDelete(d *resource_google_folder_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - displayName := d.Get("display_name").(string) - - err = retryTimeDuration(func() error { - _, reqErr := config.NewResourceManagerV2Client(userAgent).Folders.Delete(d.Id()).Do() - return reqErr - }, d.Timeout(resource_google_folder_schema.TimeoutDelete)) - if err != nil { - return resource_google_folder_fmt.Errorf("Error deleting folder '%s': %s", displayName, err) - } - return nil -} - -func resourceGoogleFolderImportState(d *resource_google_folder_schema.ResourceData, m interface{}) ([]*resource_google_folder_schema.ResourceData, error) { - id := d.Id() - - if !resource_google_folder_strings.HasPrefix(d.Id(), "folders/") { - id = resource_google_folder_fmt.Sprintf("folders/%s", id) - } - - d.SetId(id) - - return []*resource_google_folder_schema.ResourceData{d}, nil -} - -func getGoogleFolder(folderName, userAgent string, d *resource_google_folder_schema.ResourceData, config *Config) (*resource_google_folder_cloudresourcemanagerresourceManagerV2.Folder, error) { - var folder *resource_google_folder_cloudresourcemanagerresourceManagerV2.Folder - err := retryTimeDuration(func() error { - var reqErr error - folder, reqErr = config.NewResourceManagerV2Client(userAgent).Folders.Get(folderName).Do() - return reqErr - }, d.Timeout(resource_google_folder_schema.TimeoutRead)) - if err != nil { - return nil, err - } - return folder, nil -} - -func resourceGoogleFolderOrganizationPolicy() *resource_google_folder_organization_policy_schema.Resource { - return &resource_google_folder_organization_policy_schema.Resource{ - Create: resourceGoogleFolderOrganizationPolicyCreate, - Read: resourceGoogleFolderOrganizationPolicyRead, - Update: resourceGoogleFolderOrganizationPolicyUpdate, - Delete: resourceGoogleFolderOrganizationPolicyDelete, - - Importer: &resource_google_folder_organization_policy_schema.ResourceImporter{ - State: resourceFolderOrgPolicyImporter, - }, - - Timeouts: &resource_google_folder_organization_policy_schema.ResourceTimeout{ - Create: resource_google_folder_organization_policy_schema.DefaultTimeout(4 * resource_google_folder_organization_policy_time.Minute), - Update: resource_google_folder_organization_policy_schema.DefaultTimeout(4 * resource_google_folder_organization_policy_time.Minute), - Read: resource_google_folder_organization_policy_schema.DefaultTimeout(4 * resource_google_folder_organization_policy_time.Minute), - Delete: resource_google_folder_organization_policy_schema.DefaultTimeout(4 * resource_google_folder_organization_policy_time.Minute), - }, - - Schema: mergeSchemas( - schemaOrganizationPolicy, - map[string]*resource_google_folder_organization_policy_schema.Schema{ - "folder": { - Type: resource_google_folder_organization_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the folder to set the policy for. Its format is folders/{folder_id}.`, - }, - }, - ), - UseJSONNumber: true, - } -} - -func resourceFolderOrgPolicyImporter(d *resource_google_folder_organization_policy_schema.ResourceData, meta interface{}) ([]*resource_google_folder_organization_policy_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "folders/(?P[^/]+)/constraints/(?P[^/]+)", - "folders/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)"}, - d, config); err != nil { - return nil, err - } - - if d.Get("folder") == "" || d.Get("constraint") == "" { - return nil, resource_google_folder_organization_policy_fmt.Errorf("unable to parse folder or constraint. Check import formats") - } - - if err := d.Set("folder", "folders/"+d.Get("folder").(string)); err != nil { - return nil, resource_google_folder_organization_policy_fmt.Errorf("Error setting folder: %s", err) - } - - return []*resource_google_folder_organization_policy_schema.ResourceData{d}, nil -} - -func resourceGoogleFolderOrganizationPolicyCreate(d *resource_google_folder_organization_policy_schema.ResourceData, meta interface{}) error { - d.SetId(resource_google_folder_organization_policy_fmt.Sprintf("%s/%s", d.Get("folder"), d.Get("constraint"))) - - if isOrganizationPolicyUnset(d) { - return resourceGoogleFolderOrganizationPolicyDelete(d, meta) - } - - if err := setFolderOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleFolderOrganizationPolicyRead(d, meta) -} - -func resourceGoogleFolderOrganizationPolicyRead(d *resource_google_folder_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - folder := canonicalFolderId(d.Get("folder").(string)) - - var policy *resource_google_folder_organization_policy_cloudresourcemanager.OrgPolicy - err = retryTimeDuration(func() (getErr error) { - policy, getErr = config.NewResourceManagerClient(userAgent).Folders.GetOrgPolicy(folder, &resource_google_folder_organization_policy_cloudresourcemanager.GetOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return getErr - }, d.Timeout(resource_google_folder_organization_policy_schema.TimeoutRead)) - if err != nil { - return handleNotFoundError(err, d, resource_google_folder_organization_policy_fmt.Sprintf("Organization policy for %s", folder)) - } - - if err := d.Set("constraint", policy.Constraint); err != nil { - return resource_google_folder_organization_policy_fmt.Errorf("Error setting constraint: %s", err) - } - if err := d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)); err != nil { - return resource_google_folder_organization_policy_fmt.Errorf("Error setting boolean_policy: %s", err) - } - if err := d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)); err != nil { - return resource_google_folder_organization_policy_fmt.Errorf("Error setting list_policy: %s", err) - } - if err := d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)); err != nil { - return resource_google_folder_organization_policy_fmt.Errorf("Error setting restore_policy: %s", err) - } - if err := d.Set("version", policy.Version); err != nil { - return resource_google_folder_organization_policy_fmt.Errorf("Error setting version: %s", err) - } - if err := d.Set("etag", policy.Etag); err != nil { - return resource_google_folder_organization_policy_fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("update_time", policy.UpdateTime); err != nil { - return resource_google_folder_organization_policy_fmt.Errorf("Error setting update_time: %s", err) - } - - return nil -} - -func resourceGoogleFolderOrganizationPolicyUpdate(d *resource_google_folder_organization_policy_schema.ResourceData, meta interface{}) error { - if isOrganizationPolicyUnset(d) { - return resourceGoogleFolderOrganizationPolicyDelete(d, meta) - } - - if err := setFolderOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleFolderOrganizationPolicyRead(d, meta) -} - -func resourceGoogleFolderOrganizationPolicyDelete(d *resource_google_folder_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - folder := canonicalFolderId(d.Get("folder").(string)) - - return retryTimeDuration(func() (delErr error) { - _, delErr = config.NewResourceManagerClient(userAgent).Folders.ClearOrgPolicy(folder, &resource_google_folder_organization_policy_cloudresourcemanager.ClearOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return delErr - }, d.Timeout(resource_google_folder_organization_policy_schema.TimeoutDelete)) -} - -func setFolderOrganizationPolicy(d *resource_google_folder_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - folder := canonicalFolderId(d.Get("folder").(string)) - - listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) - if err != nil { - return err - } - - restoreDefault, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) - if err != nil { - return err - } - - return retryTimeDuration(func() (setErr error) { - _, setErr = config.NewResourceManagerClient(userAgent).Folders.SetOrgPolicy(folder, &resource_google_folder_organization_policy_cloudresourcemanager.SetOrgPolicyRequest{ - Policy: &resource_google_folder_organization_policy_cloudresourcemanager.OrgPolicy{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), - ListPolicy: listPolicy, - RestoreDefault: restoreDefault, - Version: int64(d.Get("version").(int)), - Etag: d.Get("etag").(string), - }, - }).Do() - return setErr - }, d.Timeout(resource_google_folder_organization_policy_schema.TimeoutCreate)) -} - -func resourceGoogleOrganizationIamCustomRole() *resource_google_organization_iam_custom_role_schema.Resource { - return &resource_google_organization_iam_custom_role_schema.Resource{ - Create: resourceGoogleOrganizationIamCustomRoleCreate, - Read: resourceGoogleOrganizationIamCustomRoleRead, - Update: resourceGoogleOrganizationIamCustomRoleUpdate, - Delete: resourceGoogleOrganizationIamCustomRoleDelete, - - Importer: &resource_google_organization_iam_custom_role_schema.ResourceImporter{ - State: resource_google_organization_iam_custom_role_schema.ImportStatePassthrough, - }, - - Schema: map[string]*resource_google_organization_iam_custom_role_schema.Schema{ - "role_id": { - Type: resource_google_organization_iam_custom_role_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The role id to use for this role.`, - }, - "org_id": { - Type: resource_google_organization_iam_custom_role_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The numeric ID of the organization in which you want to create a custom role.`, - }, - "title": { - Type: resource_google_organization_iam_custom_role_schema.TypeString, - Required: true, - Description: `A human-readable title for the role.`, - }, - "permissions": { - Type: resource_google_organization_iam_custom_role_schema.TypeSet, - Required: true, - MinItems: 1, - Description: `The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.`, - Elem: &resource_google_organization_iam_custom_role_schema.Schema{Type: resource_google_organization_iam_custom_role_schema.TypeString}, - }, - "stage": { - Type: resource_google_organization_iam_custom_role_schema.TypeString, - Optional: true, - Default: "GA", - Description: `The current launch stage of the role. Defaults to GA.`, - ValidateFunc: resource_google_organization_iam_custom_role_validation.StringInSlice([]string{"ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED", "EAP"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("ALPHA"), - }, - "description": { - Type: resource_google_organization_iam_custom_role_schema.TypeString, - Optional: true, - Description: `A human-readable description for the role.`, - }, - "deleted": { - Type: resource_google_organization_iam_custom_role_schema.TypeBool, - Computed: true, - Description: `The current deleted state of the role.`, - }, - "name": { - Type: resource_google_organization_iam_custom_role_schema.TypeString, - Computed: true, - Description: `The name of the role in the format organizations/{{org_id}}/roles/{{role_id}}. Like id, this field can be used as a reference in other resources such as IAM role bindings.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleOrganizationIamCustomRoleCreate(d *resource_google_organization_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - org := d.Get("org_id").(string) - roleId := resource_google_organization_iam_custom_role_fmt.Sprintf("organizations/%s/roles/%s", org, d.Get("role_id").(string)) - orgId := resource_google_organization_iam_custom_role_fmt.Sprintf("organizations/%s", org) - - r, err := config.NewIamClient(userAgent).Organizations.Roles.Get(roleId).Do() - if err == nil { - if r.Deleted { - - d.SetId(r.Name) - if err := resourceGoogleOrganizationIamCustomRoleUpdate(d, meta); err != nil { - - d.SetId("") - return err - } - } else { - - return resource_google_organization_iam_custom_role_fmt.Errorf("Custom project role %s already exists and must be imported", roleId) - } - } else if err := handleNotFoundError(err, d, resource_google_organization_iam_custom_role_fmt.Sprintf("Custom Organization Role %q", roleId)); err == nil { - - role, err := config.NewIamClient(userAgent).Organizations.Roles.Create(orgId, &resource_google_organization_iam_custom_role_iam.CreateRoleRequest{ - RoleId: d.Get("role_id").(string), - Role: &resource_google_organization_iam_custom_role_iam.Role{ - Title: d.Get("title").(string), - Description: d.Get("description").(string), - Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*resource_google_organization_iam_custom_role_schema.Set)), - }, - }).Do() - if err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error creating the custom organization role %s: %s", d.Get("title").(string), err) - } - - d.SetId(role.Name) - } else { - return resource_google_organization_iam_custom_role_fmt.Errorf("Unable to verify whether custom org role %s already exists and must be undeleted: %v", roleId, err) - } - - return resourceGoogleOrganizationIamCustomRoleRead(d, meta) -} - -func resourceGoogleOrganizationIamCustomRoleRead(d *resource_google_organization_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - role, err := config.NewIamClient(userAgent).Organizations.Roles.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, d.Id()) - } - - parsedRoleName, err := ParseOrganizationCustomRoleName(role.Name) - if err != nil { - return err - } - - if err := d.Set("role_id", parsedRoleName.Name); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting role_id: %s", err) - } - if err := d.Set("org_id", parsedRoleName.OrgId); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("title", role.Title); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting title: %s", err) - } - if err := d.Set("name", role.Name); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", role.Description); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("permissions", role.IncludedPermissions); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting permissions: %s", err) - } - if err := d.Set("stage", role.Stage); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting stage: %s", err) - } - if err := d.Set("deleted", role.Deleted); err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error setting deleted: %s", err) - } - - return nil -} - -func resourceGoogleOrganizationIamCustomRoleUpdate(d *resource_google_organization_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - d.Partial(true) - - r, err := config.NewIamClient(userAgent).Organizations.Roles.Get(d.Id()).Do() - if err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("unable to find custom project role %s to update: %v", d.Id(), err) - } - - if r.Deleted { - _, err := config.NewIamClient(userAgent).Organizations.Roles.Undelete(d.Id(), &resource_google_organization_iam_custom_role_iam.UndeleteRoleRequest{}).Do() - if err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error undeleting the custom organization role %s: %s", d.Get("title").(string), err) - } - } - - if d.HasChange("title") || d.HasChange("description") || d.HasChange("stage") || d.HasChange("permissions") { - _, err := config.NewIamClient(userAgent).Organizations.Roles.Patch(d.Id(), &resource_google_organization_iam_custom_role_iam.Role{ - Title: d.Get("title").(string), - Description: d.Get("description").(string), - Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*resource_google_organization_iam_custom_role_schema.Set)), - }).Do() - - if err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error updating the custom organization role %s: %s", d.Get("title").(string), err) - } - } - - d.Partial(false) - return nil -} - -func resourceGoogleOrganizationIamCustomRoleDelete(d *resource_google_organization_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - r, err := config.NewIamClient(userAgent).Organizations.Roles.Get(d.Id()).Do() - if err == nil && r != nil && r.Deleted && d.Get("deleted").(bool) { - - return nil - } - - _, err = config.NewIamClient(userAgent).Organizations.Roles.Delete(d.Id()).Do() - if err != nil { - return resource_google_organization_iam_custom_role_fmt.Errorf("Error deleting the custom organization role %s: %s", d.Get("title").(string), err) - } - - return nil -} - -var schemaOrganizationPolicy = map[string]*resource_google_organization_policy_schema.Schema{ - - "constraint": { - Type: resource_google_organization_policy_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the Constraint the Policy is configuring, for example, serviceuser.services.`, - }, - "boolean_policy": { - Type: resource_google_organization_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `A boolean policy is a constraint that is either enforced or not.`, - Elem: &resource_google_organization_policy_schema.Resource{ - Schema: map[string]*resource_google_organization_policy_schema.Schema{ - "enforced": { - Type: resource_google_organization_policy_schema.TypeBool, - Required: true, - Description: `If true, then the Policy is enforced. If false, then any configuration is acceptable.`, - }, - }, - }, - }, - "list_policy": { - Type: resource_google_organization_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `A policy that can define specific values that are allowed or denied for the given constraint. It can also be used to allow or deny all values. `, - Elem: &resource_google_organization_policy_schema.Resource{ - Schema: map[string]*resource_google_organization_policy_schema.Schema{ - "allow": { - Type: resource_google_organization_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `One or the other must be set.`, - ExactlyOneOf: []string{"list_policy.0.allow", "list_policy.0.deny"}, - Elem: &resource_google_organization_policy_schema.Resource{ - Schema: map[string]*resource_google_organization_policy_schema.Schema{ - "all": { - Type: resource_google_organization_policy_schema.TypeBool, - Optional: true, - Default: false, - Description: `The policy allows or denies all values.`, - ExactlyOneOf: []string{"list_policy.0.allow.0.all", "list_policy.0.allow.0.values"}, - }, - "values": { - Type: resource_google_organization_policy_schema.TypeSet, - Optional: true, - Description: `The policy can define specific values that are allowed or denied.`, - ExactlyOneOf: []string{"list_policy.0.allow.0.all", "list_policy.0.allow.0.values"}, - Elem: &resource_google_organization_policy_schema.Schema{Type: resource_google_organization_policy_schema.TypeString}, - Set: resource_google_organization_policy_schema.HashString, - }, - }, - }, - }, - "deny": { - Type: resource_google_organization_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `One or the other must be set.`, - ExactlyOneOf: []string{"list_policy.0.allow", "list_policy.0.deny"}, - Elem: &resource_google_organization_policy_schema.Resource{ - Schema: map[string]*resource_google_organization_policy_schema.Schema{ - "all": { - Type: resource_google_organization_policy_schema.TypeBool, - Optional: true, - Default: false, - Description: `The policy allows or denies all values.`, - ExactlyOneOf: []string{"list_policy.0.deny.0.all", "list_policy.0.deny.0.values"}, - }, - "values": { - Type: resource_google_organization_policy_schema.TypeSet, - Optional: true, - Description: `The policy can define specific values that are allowed or denied.`, - ExactlyOneOf: []string{"list_policy.0.deny.0.all", "list_policy.0.deny.0.values"}, - Elem: &resource_google_organization_policy_schema.Schema{Type: resource_google_organization_policy_schema.TypeString}, - Set: resource_google_organization_policy_schema.HashString, - }, - }, - }, - }, - "suggested_value": { - Type: resource_google_organization_policy_schema.TypeString, - Optional: true, - Computed: true, - Description: `The Google Cloud Console will try to default to a configuration that matches the value specified in this field.`, - }, - "inherit_from_parent": { - Type: resource_google_organization_policy_schema.TypeBool, - Optional: true, - Description: `If set to true, the values from the effective Policy of the parent resource are inherited, meaning the values set in this Policy are added to the values inherited up the hierarchy.`, - }, - }, - }, - }, - "version": { - Type: resource_google_organization_policy_schema.TypeInt, - Optional: true, - Computed: true, - Description: `Version of the Policy. Default version is 0.`, - }, - "etag": { - Type: resource_google_organization_policy_schema.TypeString, - Computed: true, - Description: `The etag of the organization policy. etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other.`, - }, - "update_time": { - Type: resource_google_organization_policy_schema.TypeString, - Computed: true, - Description: `The timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds, representing when the variable was last updated. Example: "2016-10-09T12:33:37.578138407Z".`, - }, - "restore_policy": { - Type: resource_google_organization_policy_schema.TypeList, - Optional: true, - MaxItems: 1, - Description: `A restore policy is a constraint to restore the default policy.`, - Elem: &resource_google_organization_policy_schema.Resource{ - Schema: map[string]*resource_google_organization_policy_schema.Schema{ - "default": { - Type: resource_google_organization_policy_schema.TypeBool, - Required: true, - Description: `May only be set to true. If set, then the default Policy is restored.`, - }, - }, - }, - }, -} - -func resourceGoogleOrganizationPolicy() *resource_google_organization_policy_schema.Resource { - return &resource_google_organization_policy_schema.Resource{ - Create: resourceGoogleOrganizationPolicyCreate, - Read: resourceGoogleOrganizationPolicyRead, - Update: resourceGoogleOrganizationPolicyUpdate, - Delete: resourceGoogleOrganizationPolicyDelete, - - Importer: &resource_google_organization_policy_schema.ResourceImporter{ - State: resourceGoogleOrganizationPolicyImportState, - }, - - Timeouts: &resource_google_organization_policy_schema.ResourceTimeout{ - Create: resource_google_organization_policy_schema.DefaultTimeout(4 * resource_google_organization_policy_time.Minute), - Update: resource_google_organization_policy_schema.DefaultTimeout(4 * resource_google_organization_policy_time.Minute), - Read: resource_google_organization_policy_schema.DefaultTimeout(4 * resource_google_organization_policy_time.Minute), - Delete: resource_google_organization_policy_schema.DefaultTimeout(4 * resource_google_organization_policy_time.Minute), - }, - - Schema: mergeSchemas( - schemaOrganizationPolicy, - map[string]*resource_google_organization_policy_schema.Schema{ - "org_id": { - Type: resource_google_organization_policy_schema.TypeString, - Required: true, - ForceNew: true, - }, - }), - UseJSONNumber: true, - } -} - -func resourceGoogleOrganizationPolicyCreate(d *resource_google_organization_policy_schema.ResourceData, meta interface{}) error { - if isOrganizationPolicyUnset(d) { - return resourceGoogleOrganizationPolicyDelete(d, meta) - } - - if err := setOrganizationPolicy(d, meta); err != nil { - return err - } - - d.SetId(resource_google_organization_policy_fmt.Sprintf("%s/%s", d.Get("org_id"), d.Get("constraint").(string))) - return resourceGoogleOrganizationPolicyRead(d, meta) -} - -func resourceGoogleOrganizationPolicyRead(d *resource_google_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - org := "organizations/" + d.Get("org_id").(string) - - var policy *resource_google_organization_policy_cloudresourcemanager.OrgPolicy - err = retryTimeDuration(func() (readErr error) { - policy, readErr = config.NewResourceManagerClient(userAgent).Organizations.GetOrgPolicy(org, &resource_google_organization_policy_cloudresourcemanager.GetOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return readErr - }, d.Timeout(resource_google_organization_policy_schema.TimeoutRead)) - if err != nil { - return handleNotFoundError(err, d, resource_google_organization_policy_fmt.Sprintf("Organization policy for %s", org)) - } - - if err := d.Set("constraint", policy.Constraint); err != nil { - return resource_google_organization_policy_fmt.Errorf("Error setting constraint: %s", err) - } - if err := d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)); err != nil { - return resource_google_organization_policy_fmt.Errorf("Error setting boolean_policy: %s", err) - } - if err := d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)); err != nil { - return resource_google_organization_policy_fmt.Errorf("Error setting list_policy: %s", err) - } - if err := d.Set("version", policy.Version); err != nil { - return resource_google_organization_policy_fmt.Errorf("Error setting version: %s", err) - } - if err := d.Set("etag", policy.Etag); err != nil { - return resource_google_organization_policy_fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("update_time", policy.UpdateTime); err != nil { - return resource_google_organization_policy_fmt.Errorf("Error setting update_time: %s", err) - } - if err := d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)); err != nil { - return resource_google_organization_policy_fmt.Errorf("Error setting restore_policy: %s", err) - } - - return nil -} - -func resourceGoogleOrganizationPolicyUpdate(d *resource_google_organization_policy_schema.ResourceData, meta interface{}) error { - if isOrganizationPolicyUnset(d) { - return resourceGoogleOrganizationPolicyDelete(d, meta) - } - - if err := setOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleOrganizationPolicyRead(d, meta) -} - -func resourceGoogleOrganizationPolicyDelete(d *resource_google_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - org := "organizations/" + d.Get("org_id").(string) - - err = retryTimeDuration(func() error { - _, dErr := config.NewResourceManagerClient(userAgent).Organizations.ClearOrgPolicy(org, &resource_google_organization_policy_cloudresourcemanager.ClearOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return dErr - }, d.Timeout(resource_google_organization_policy_schema.TimeoutDelete)) - if err != nil { - return err - } - - return nil -} - -func resourceGoogleOrganizationPolicyImportState(d *resource_google_organization_policy_schema.ResourceData, meta interface{}) ([]*resource_google_organization_policy_schema.ResourceData, error) { - parts := resource_google_organization_policy_strings.SplitN(d.Id(), "/", 2) - if len(parts) != 2 { - return nil, resource_google_organization_policy_fmt.Errorf("Invalid id format. Expecting {org_id}/{constraint}, got '%s' instead.", d.Id()) - } - - if err := d.Set("org_id", parts[0]); err != nil { - return nil, resource_google_organization_policy_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("constraint", parts[1]); err != nil { - return nil, resource_google_organization_policy_fmt.Errorf("Error setting constraint: %s", err) - } - - return []*resource_google_organization_policy_schema.ResourceData{d}, nil -} - -func isOrganizationPolicyUnset(d *resource_google_organization_policy_schema.ResourceData) bool { - listPolicy := d.Get("list_policy").([]interface{}) - booleanPolicy := d.Get("boolean_policy").([]interface{}) - restorePolicy := d.Get("restore_policy").([]interface{}) - - return len(listPolicy)+len(booleanPolicy)+len(restorePolicy) == 0 -} - -func setOrganizationPolicy(d *resource_google_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - org := "organizations/" + d.Get("org_id").(string) - - listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) - if err != nil { - return err - } - - restoreDefault, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) - if err != nil { - return err - } - - err = retryTimeDuration(func() (setErr error) { - _, setErr = config.NewResourceManagerClient(userAgent).Organizations.SetOrgPolicy(org, &resource_google_organization_policy_cloudresourcemanager.SetOrgPolicyRequest{ - Policy: &resource_google_organization_policy_cloudresourcemanager.OrgPolicy{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), - ListPolicy: listPolicy, - RestoreDefault: restoreDefault, - Version: int64(d.Get("version").(int)), - Etag: d.Get("etag").(string), - }, - }).Do() - return setErr - }, d.Timeout(resource_google_organization_policy_schema.TimeoutCreate)) - return err -} - -func flattenBooleanOrganizationPolicy(policy *resource_google_organization_policy_cloudresourcemanager.BooleanPolicy) []map[string]interface{} { - bPolicies := make([]map[string]interface{}, 0, 1) - - if policy == nil { - return bPolicies - } - - bPolicies = append(bPolicies, map[string]interface{}{ - "enforced": policy.Enforced, - }) - - return bPolicies -} - -func flattenRestoreOrganizationPolicy(restore_policy *resource_google_organization_policy_cloudresourcemanager.RestoreDefault) []map[string]interface{} { - rp := make([]map[string]interface{}, 0, 1) - - if restore_policy == nil { - return rp - } - - rp = append(rp, map[string]interface{}{ - "default": true, - }) - - return rp -} - -func expandBooleanOrganizationPolicy(configured []interface{}) *resource_google_organization_policy_cloudresourcemanager.BooleanPolicy { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - booleanPolicy := configured[0].(map[string]interface{}) - return &resource_google_organization_policy_cloudresourcemanager.BooleanPolicy{ - Enforced: booleanPolicy["enforced"].(bool), - } -} - -func expandRestoreOrganizationPolicy(configured []interface{}) (*resource_google_organization_policy_cloudresourcemanager.RestoreDefault, error) { - if len(configured) == 0 || configured[0] == nil { - return nil, nil - } - - restoreDefaultMap := configured[0].(map[string]interface{}) - default_value := restoreDefaultMap["default"].(bool) - - if default_value { - return &resource_google_organization_policy_cloudresourcemanager.RestoreDefault{}, nil - } - - return nil, resource_google_organization_policy_fmt.Errorf("Invalid value for restore_policy. Expecting default = true") -} - -func flattenListOrganizationPolicy(policy *resource_google_organization_policy_cloudresourcemanager.ListPolicy) []map[string]interface{} { - lPolicies := make([]map[string]interface{}, 0, 1) - - if policy == nil { - return lPolicies - } - - listPolicy := map[string]interface{}{ - "suggested_value": policy.SuggestedValue, - "inherit_from_parent": policy.InheritFromParent, - } - switch { - case policy.AllValues == "ALLOW": - listPolicy["allow"] = []interface{}{map[string]interface{}{ - "all": true, - }} - case policy.AllValues == "DENY": - listPolicy["deny"] = []interface{}{map[string]interface{}{ - "all": true, - }} - case len(policy.AllowedValues) > 0: - listPolicy["allow"] = []interface{}{map[string]interface{}{ - "values": resource_google_organization_policy_schema.NewSet(resource_google_organization_policy_schema.HashString, convertStringArrToInterface(policy.AllowedValues)), - }} - case len(policy.DeniedValues) > 0: - listPolicy["deny"] = []interface{}{map[string]interface{}{ - "values": resource_google_organization_policy_schema.NewSet(resource_google_organization_policy_schema.HashString, convertStringArrToInterface(policy.DeniedValues)), - }} - } - - lPolicies = append(lPolicies, listPolicy) - - return lPolicies -} - -func expandListOrganizationPolicy(configured []interface{}) (*resource_google_organization_policy_cloudresourcemanager.ListPolicy, error) { - if len(configured) == 0 || configured[0] == nil { - return nil, nil - } - - listPolicyMap := configured[0].(map[string]interface{}) - - allow := listPolicyMap["allow"].([]interface{}) - deny := listPolicyMap["deny"].([]interface{}) - - var allValues string - var allowedValues []string - var deniedValues []string - if len(allow) > 0 { - allowMap := allow[0].(map[string]interface{}) - all := allowMap["all"].(bool) - values := allowMap["values"].(*resource_google_organization_policy_schema.Set) - - if all { - allValues = "ALLOW" - } else { - allowedValues = convertStringArr(values.List()) - } - } - - if len(deny) > 0 { - denyMap := deny[0].(map[string]interface{}) - all := denyMap["all"].(bool) - values := denyMap["values"].(*resource_google_organization_policy_schema.Set) - - if all { - allValues = "DENY" - } else { - deniedValues = convertStringArr(values.List()) - } - } - - listPolicy := configured[0].(map[string]interface{}) - return &resource_google_organization_policy_cloudresourcemanager.ListPolicy{ - AllValues: allValues, - AllowedValues: allowedValues, - DeniedValues: deniedValues, - SuggestedValue: listPolicy["suggested_value"].(string), - InheritFromParent: listPolicy["inherit_from_parent"].(bool), - ForceSendFields: []string{"InheritFromParent"}, - }, nil -} - -func canonicalOrgPolicyConstraint(constraint string) string { - if resource_google_organization_policy_strings.HasPrefix(constraint, "constraints/") { - return constraint - } - return "constraints/" + constraint -} - -type ServicesCall interface { - Header() resource_google_project_http.Header - Do(opts ...resource_google_project_googleapi.CallOption) (*resource_google_project_serviceusage.Operation, error) -} - -func resourceGoogleProject() *resource_google_project_schema.Resource { - return &resource_google_project_schema.Resource{ - SchemaVersion: 1, - - Create: resourceGoogleProjectCreate, - Read: resourceGoogleProjectRead, - Update: resourceGoogleProjectUpdate, - Delete: resourceGoogleProjectDelete, - - Importer: &resource_google_project_schema.ResourceImporter{ - State: resourceProjectImportState, - }, - - Timeouts: &resource_google_project_schema.ResourceTimeout{ - Create: resource_google_project_schema.DefaultTimeout(10 * resource_google_project_time.Minute), - Update: resource_google_project_schema.DefaultTimeout(10 * resource_google_project_time.Minute), - Read: resource_google_project_schema.DefaultTimeout(10 * resource_google_project_time.Minute), - Delete: resource_google_project_schema.DefaultTimeout(10 * resource_google_project_time.Minute), - }, - - MigrateState: resourceGoogleProjectMigrateState, - - Schema: map[string]*resource_google_project_schema.Schema{ - "project_id": { - Type: resource_google_project_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateProjectID(), - Description: `The project ID. Changing this forces a new project to be created.`, - }, - "skip_delete": { - Type: resource_google_project_schema.TypeBool, - Optional: true, - Computed: true, - Description: `If true, the Terraform resource can be deleted without deleting the Project via the Google API.`, - }, - "auto_create_network": { - Type: resource_google_project_schema.TypeBool, - Optional: true, - Default: true, - Description: `Create the 'default' network automatically. Default true. If set to false, the default network will be deleted. Note that, for quota purposes, you will still need to have 1 network slot available to create the project successfully, even if you set auto_create_network to false, since the network will exist momentarily.`, - }, - "name": { - Type: resource_google_project_schema.TypeString, - Required: true, - ValidateFunc: validateProjectName(), - Description: `The display name of the project.`, - }, - "org_id": { - Type: resource_google_project_schema.TypeString, - Optional: true, - ConflictsWith: []string{"folder_id"}, - Description: `The numeric ID of the organization this project belongs to. Changing this forces a new project to be created. Only one of org_id or folder_id may be specified. If the org_id is specified then the project is created at the top level. Changing this forces the project to be migrated to the newly specified organization.`, - }, - "folder_id": { - Type: resource_google_project_schema.TypeString, - Optional: true, - StateFunc: parseFolderId, - ConflictsWith: []string{"org_id"}, - Description: `The numeric ID of the folder this project should be created under. Only one of org_id or folder_id may be specified. If the folder_id is specified, then the project is created under the specified folder. Changing this forces the project to be migrated to the newly specified folder.`, - }, - "number": { - Type: resource_google_project_schema.TypeString, - Computed: true, - Description: `The numeric identifier of the project.`, - }, - "billing_account": { - Type: resource_google_project_schema.TypeString, - Optional: true, - Description: `The alphanumeric ID of the billing account this project belongs to. The user or service account performing this operation with Terraform must have Billing Account Administrator privileges (roles/billing.admin) in the organization. See Google Cloud Billing API Access Control for more details.`, - }, - "labels": { - Type: resource_google_project_schema.TypeMap, - Optional: true, - Elem: &resource_google_project_schema.Schema{Type: resource_google_project_schema.TypeString}, - Description: `A set of key/value label pairs to assign to the project.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleProjectCreate(d *resource_google_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - if err = resourceGoogleProjectCheckPreRequisites(config, d, userAgent); err != nil { - return resource_google_project_fmt.Errorf("failed pre-requisites: %v", err) - } - - var pid string - pid = d.Get("project_id").(string) - - resource_google_project_log.Printf("[DEBUG]: Creating new project %q", pid) - project := &resource_google_project_cloudresourcemanager.Project{ - ProjectId: pid, - Name: d.Get("name").(string), - } - - if err = getParentResourceId(d, project); err != nil { - return err - } - - if _, ok := d.GetOk("labels"); ok { - project.Labels = expandLabels(d) - } - - var op *resource_google_project_cloudresourcemanager.Operation - err = retryTimeDuration(func() (reqErr error) { - op, reqErr = config.NewResourceManagerClient(userAgent).Projects.Create(project).Do() - return reqErr - }, d.Timeout(resource_google_project_schema.TimeoutCreate)) - if err != nil { - return resource_google_project_fmt.Errorf("error creating project %s (%s): %s. "+ - "If you received a 403 error, make sure you have the"+ - " `roles/resourcemanager.projectCreator` permission", - project.ProjectId, project.Name, err) - } - - d.SetId(resource_google_project_fmt.Sprintf("projects/%s", pid)) - - opAsMap, err := ConvertToMap(op) - if err != nil { - return err - } - - waitErr := resourceManagerOperationWaitTime(config, opAsMap, "creating folder", userAgent, d.Timeout(resource_google_project_schema.TimeoutCreate)) - if waitErr != nil { - - d.SetId("") - return waitErr - } - - if _, ok := d.GetOk("billing_account"); ok { - err = updateProjectBillingAccount(d, config, userAgent) - if err != nil { - return err - } - } - - resource_google_project_time.Sleep(10 * resource_google_project_time.Second) - - err = resourceGoogleProjectRead(d, meta) - if err != nil { - return err - } - - if !d.Get("auto_create_network").(bool) { - - billingProject := project.ProjectId - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if err = enableServiceUsageProjectServices([]string{"compute.googleapis.com"}, project.ProjectId, billingProject, userAgent, config, d.Timeout(resource_google_project_schema.TimeoutCreate)); err != nil { - return resource_google_project_errwrap.Wrapf("Error enabling the Compute Engine API required to delete the default network: {{err}} ", err) - } - - if err = forceDeleteComputeNetwork(d, config, project.ProjectId, "default"); err != nil { - if isGoogleApiErrorWithCode(err, 404) { - resource_google_project_log.Printf("[DEBUG] Default network not found for project %q, no need to delete it", project.ProjectId) - } else { - return resource_google_project_errwrap.Wrapf(resource_google_project_fmt.Sprintf("Error deleting default network in project %s: {{err}}", project.ProjectId), err) - } - } - } - return nil -} - -func resourceGoogleProjectCheckPreRequisites(config *Config, d *resource_google_project_schema.ResourceData, userAgent string) error { - ib, ok := d.GetOk("billing_account") - if !ok { - return nil - } - ba := "billingAccounts/" + ib.(string) - const perm = "billing.resourceAssociations.create" - req := &resource_google_project_cloudbilling.TestIamPermissionsRequest{ - Permissions: []string{perm}, - } - resp, err := config.NewBillingClient(userAgent).BillingAccounts.TestIamPermissions(ba, req).Do() - if err != nil { - return resource_google_project_fmt.Errorf("failed to check permissions on billing account %q: %v", ba, err) - } - if !stringInSlice(resp.Permissions, perm) { - return resource_google_project_fmt.Errorf("missing permission on %q: %v", ba, perm) - } - if !d.Get("auto_create_network").(bool) { - call := config.NewServiceUsageClient(userAgent).Services.Get("projects/00000000000/services/serviceusage.googleapis.com") - if config.UserProjectOverride { - if billingProject, err := getBillingProject(d, config); err == nil { - call.Header().Add("X-Goog-User-Project", billingProject) - } - } - _, err := call.Do() - switch { - - case err.Error() == "googleapi: Error 403: Project '00000000000' not found or permission denied., forbidden": - return nil - case resource_google_project_strings.Contains(err.Error(), "accessNotConfigured"): - return resource_google_project_fmt.Errorf("API serviceusage not enabled.\nFound error: %v", err) - } - } - return nil -} - -func resourceGoogleProjectRead(d *resource_google_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - parts := resource_google_project_strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - - p, err := readGoogleProject(d, config, userAgent) - if err != nil { - if gerr, ok := err.(*resource_google_project_googleapi.Error); ok && gerr.Code == 403 && resource_google_project_strings.Contains(gerr.Message, "caller does not have permission") { - return resource_google_project_fmt.Errorf("the user does not have permission to access Project %q or it may not exist", pid) - } - return handleNotFoundError(err, d, resource_google_project_fmt.Sprintf("Project %q", pid)) - } - - if p.LifecycleState != "ACTIVE" { - resource_google_project_log.Printf("[WARN] Removing project '%s' because its state is '%s' (requires 'ACTIVE').", pid, p.LifecycleState) - d.SetId("") - return nil - } - - if err := d.Set("project_id", pid); err != nil { - return resource_google_project_fmt.Errorf("Error setting project_id: %s", err) - } - if err := d.Set("number", resource_google_project_strconv.FormatInt(p.ProjectNumber, 10)); err != nil { - return resource_google_project_fmt.Errorf("Error setting number: %s", err) - } - if err := d.Set("name", p.Name); err != nil { - return resource_google_project_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("labels", p.Labels); err != nil { - return resource_google_project_fmt.Errorf("Error setting labels: %s", err) - } - - if p.Parent != nil { - switch p.Parent.Type { - case "organization": - if err := d.Set("org_id", p.Parent.Id); err != nil { - return resource_google_project_fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("folder_id", ""); err != nil { - return resource_google_project_fmt.Errorf("Error setting folder_id: %s", err) - } - case "folder": - if err := d.Set("folder_id", p.Parent.Id); err != nil { - return resource_google_project_fmt.Errorf("Error setting folder_id: %s", err) - } - if err := d.Set("org_id", ""); err != nil { - return resource_google_project_fmt.Errorf("Error setting org_id: %s", err) - } - } - } - - var ba *resource_google_project_cloudbilling.ProjectBillingInfo - err = retryTimeDuration(func() (reqErr error) { - ba, reqErr = config.NewBillingClient(userAgent).Projects.GetBillingInfo(prefixedProject(pid)).Do() - return reqErr - }, d.Timeout(resource_google_project_schema.TimeoutRead)) - - if err != nil && !isApiNotEnabledError(err) { - return resource_google_project_fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) - } else if isApiNotEnabledError(err) { - resource_google_project_log.Printf("[WARN] Billing info API not enabled, please enable it to read billing info about project %q: %s", pid, err.Error()) - } else if ba.BillingAccountName != "" { - - _ba := resource_google_project_strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") - if ba.BillingAccountName == _ba { - return resource_google_project_fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", prefixedProject(pid), ba.BillingAccountName) - } - if err := d.Set("billing_account", _ba); err != nil { - return resource_google_project_fmt.Errorf("Error setting billing_account: %s", err) - } - } - - return nil -} - -func prefixedProject(pid string) string { - return "projects/" + pid -} - -func getParentResourceId(d *resource_google_project_schema.ResourceData, p *resource_google_project_cloudresourcemanager.Project) error { - orgId := d.Get("org_id").(string) - folderId := d.Get("folder_id").(string) - - if orgId != "" && folderId != "" { - return resource_google_project_fmt.Errorf("'org_id' and 'folder_id' cannot be both set.") - } - - if orgId != "" { - p.Parent = &resource_google_project_cloudresourcemanager.ResourceId{ - Id: orgId, - Type: "organization", - } - } - - if folderId != "" { - p.Parent = &resource_google_project_cloudresourcemanager.ResourceId{ - Id: parseFolderId(folderId), - Type: "folder", - } - } - - return nil -} - -func parseFolderId(v interface{}) string { - folderId := v.(string) - if resource_google_project_strings.HasPrefix(folderId, "folders/") { - return folderId[8:] - } - return folderId -} - -func resourceGoogleProjectUpdate(d *resource_google_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - parts := resource_google_project_strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - project_name := d.Get("name").(string) - - p, err := readGoogleProject(d, config, userAgent) - if err != nil { - if isGoogleApiErrorWithCode(err, 404) { - return resource_google_project_fmt.Errorf("Project %q does not exist.", pid) - } - return resource_google_project_fmt.Errorf("Error checking project %q: %s", pid, err) - } - - d.Partial(true) - - if ok := d.HasChange("name"); ok { - p.Name = project_name - - if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { - return err - } - } - - if d.HasChange("org_id") || d.HasChange("folder_id") { - if err := getParentResourceId(d, p); err != nil { - return err - } - - if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { - return err - } - } - - if ok := d.HasChange("billing_account"); ok { - err = updateProjectBillingAccount(d, config, userAgent) - if err != nil { - return err - } - } - - if ok := d.HasChange("labels"); ok { - p.Labels = expandLabels(d) - - if p, err = updateProject(config, d, project_name, userAgent, p); err != nil { - return err - } - } - - d.Partial(false) - return resourceGoogleProjectRead(d, meta) -} - -func updateProject(config *Config, d *resource_google_project_schema.ResourceData, projectName, userAgent string, desiredProject *resource_google_project_cloudresourcemanager.Project) (*resource_google_project_cloudresourcemanager.Project, error) { - var newProj *resource_google_project_cloudresourcemanager.Project - if err := retryTimeDuration(func() (updateErr error) { - newProj, updateErr = config.NewResourceManagerClient(userAgent).Projects.Update(desiredProject.ProjectId, desiredProject).Do() - return updateErr - }, d.Timeout(resource_google_project_schema.TimeoutUpdate)); err != nil { - return nil, resource_google_project_fmt.Errorf("Error updating project %q: %s", projectName, err) - } - return newProj, nil -} - -func resourceGoogleProjectDelete(d *resource_google_project_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - if !d.Get("skip_delete").(bool) { - parts := resource_google_project_strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - if err := retryTimeDuration(func() error { - _, delErr := config.NewResourceManagerClient(userAgent).Projects.Delete(pid).Do() - return delErr - }, d.Timeout(resource_google_project_schema.TimeoutDelete)); err != nil { - return handleNotFoundError(err, d, resource_google_project_fmt.Sprintf("Project %s", pid)) - } - } - d.SetId("") - return nil -} - -func resourceProjectImportState(d *resource_google_project_schema.ResourceData, meta interface{}) ([]*resource_google_project_schema.ResourceData, error) { - parts := resource_google_project_strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - - matched, err := resource_google_project_regexp.MatchString("^\\d+$", pid) - if err != nil { - return nil, resource_google_project_fmt.Errorf("Error matching project %q: %s", pid, err) - } - - if matched { - return nil, resource_google_project_fmt.Errorf("Error importing project %q, please use project_id", pid) - } - - d.SetId(resource_google_project_fmt.Sprintf("projects/%s", pid)) - - if err := d.Set("auto_create_network", true); err != nil { - return nil, resource_google_project_fmt.Errorf("Error setting auto_create_network: %s", err) - } - return []*resource_google_project_schema.ResourceData{d}, nil -} - -func forceDeleteComputeNetwork(d *resource_google_project_schema.ResourceData, config *Config, projectId, networkName string) error { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - net, err := config.NewComputeClient(userAgent).Networks.Get(projectId, networkName).Do() - if err != nil { - return err - } - - token := "" - for paginate := true; paginate; { - filter := resource_google_project_fmt.Sprintf("network eq %s", net.SelfLink) - resp, err := config.NewComputeClient(userAgent).Firewalls.List(projectId).Filter(filter).Do() - if err != nil { - return resource_google_project_errwrap.Wrapf("Error listing firewall rules in proj: {{err}}", err) - } - - resource_google_project_log.Printf("[DEBUG] Found %d firewall rules in %q network", len(resp.Items), networkName) - - for _, firewall := range resp.Items { - op, err := config.NewComputeClient(userAgent).Firewalls.Delete(projectId, firewall.Name).Do() - if err != nil { - return resource_google_project_errwrap.Wrapf("Error deleting firewall: {{err}}", err) - } - err = computeOperationWaitTime(config, op, projectId, "Deleting Firewall", userAgent, d.Timeout(resource_google_project_schema.TimeoutCreate)) - if err != nil { - return err - } - } - - token = resp.NextPageToken - paginate = token != "" - } - - return deleteComputeNetwork(projectId, networkName, userAgent, config) -} - -func updateProjectBillingAccount(d *resource_google_project_schema.ResourceData, config *Config, userAgent string) error { - parts := resource_google_project_strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - name := d.Get("billing_account").(string) - ba := &resource_google_project_cloudbilling.ProjectBillingInfo{} - - if name != "" { - ba.BillingAccountName = "billingAccounts/" + name - } - updateBillingInfoFunc := func() error { - _, err := config.NewBillingClient(userAgent).Projects.UpdateBillingInfo(prefixedProject(pid), ba).Do() - return err - } - err := retryTimeDuration(updateBillingInfoFunc, d.Timeout(resource_google_project_schema.TimeoutUpdate)) - if err != nil { - if err := d.Set("billing_account", ""); err != nil { - return resource_google_project_fmt.Errorf("Error setting billing_account: %s", err) - } - if _err, ok := err.(*resource_google_project_googleapi.Error); ok { - return resource_google_project_fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), _err) - } - return resource_google_project_fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), err) - } - for retries := 0; retries < 3; retries++ { - var ba *resource_google_project_cloudbilling.ProjectBillingInfo - err = retryTimeDuration(func() (reqErr error) { - ba, reqErr = config.NewBillingClient(userAgent).Projects.GetBillingInfo(prefixedProject(pid)).Do() - return reqErr - }, d.Timeout(resource_google_project_schema.TimeoutRead)) - if err != nil { - return resource_google_project_fmt.Errorf("Error getting billing info for project %q: %v", prefixedProject(pid), err) - } - baName := resource_google_project_strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") - if baName == name { - return nil - } - resource_google_project_time.Sleep(3 * resource_google_project_time.Second) - } - return resource_google_project_fmt.Errorf("Timed out waiting for billing account to return correct value. Waiting for %s, got %s.", - name, resource_google_project_strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/")) -} - -func deleteComputeNetwork(project, network, userAgent string, config *Config) error { - op, err := config.NewComputeClient(userAgent).Networks.Delete( - project, network).Do() - if err != nil { - return resource_google_project_errwrap.Wrapf("Error deleting network: {{err}}", err) - } - - err = computeOperationWaitTime(config, op, project, "Deleting Network", userAgent, 10*resource_google_project_time.Minute) - if err != nil { - return err - } - return nil -} - -func readGoogleProject(d *resource_google_project_schema.ResourceData, config *Config, userAgent string) (*resource_google_project_cloudresourcemanager.Project, error) { - var p *resource_google_project_cloudresourcemanager.Project - - parts := resource_google_project_strings.Split(d.Id(), "/") - pid := parts[len(parts)-1] - err := retryTimeDuration(func() (reqErr error) { - p, reqErr = config.NewResourceManagerClient(userAgent).Projects.Get(pid).Do() - return reqErr - }, d.Timeout(resource_google_project_schema.TimeoutRead)) - return p, err -} - -func enableServiceUsageProjectServices(services []string, project, billingProject, userAgent string, config *Config, timeout resource_google_project_time.Duration) error { - - for i := 0; i < len(services); i += maxServiceUsageBatchSize { - j := i + maxServiceUsageBatchSize - if j > len(services) { - j = len(services) - } - nextBatch := services[i:j] - if len(nextBatch) == 0 { - - return nil - } - - if err := doEnableServicesRequest(nextBatch, project, billingProject, userAgent, config, timeout); err != nil { - return err - } - resource_google_project_log.Printf("[DEBUG] Finished enabling next batch of %d project services: %+v", len(nextBatch), nextBatch) - } - - resource_google_project_log.Printf("[DEBUG] Verifying that all services are enabled") - return waitForServiceUsageEnabledServices(services, project, billingProject, userAgent, config, timeout) -} - -func doEnableServicesRequest(services []string, project, billingProject, userAgent string, config *Config, timeout resource_google_project_time.Duration) error { - var op *resource_google_project_serviceusage.Operation - var call ServicesCall - err := retryTimeDuration(func() error { - var rerr error - if len(services) == 1 { - - name := resource_google_project_fmt.Sprintf("projects/%s/services/%s", project, services[0]) - req := &resource_google_project_serviceusage.EnableServiceRequest{} - call = config.NewServiceUsageClient(userAgent).Services.Enable(name, req) - } else { - - name := resource_google_project_fmt.Sprintf("projects/%s", project) - req := &resource_google_project_serviceusage.BatchEnableServicesRequest{ServiceIds: services} - call = config.NewServiceUsageClient(userAgent).Services.BatchEnable(name, req) - } - if config.UserProjectOverride && billingProject != "" { - call.Header().Add("X-Goog-User-Project", billingProject) - } - op, rerr = call.Do() - return handleServiceUsageRetryableError(rerr) - }, - timeout, - serviceUsageServiceBeingActivated, - ) - if err != nil { - return resource_google_project_errwrap.Wrapf("failed to send enable services request: {{err}}", err) - } - - waitErr := serviceUsageOperationWait(config, op, billingProject, resource_google_project_fmt.Sprintf("Enable Project %q Services: %+v", project, services), userAgent, timeout) - if waitErr != nil { - return waitErr - } - return nil -} - -func listCurrentlyEnabledServices(project, billingProject, userAgent string, config *Config, timeout resource_google_project_time.Duration) (map[string]struct{}, error) { - resource_google_project_log.Printf("[DEBUG] Listing enabled services for project %s", project) - apiServices := make(map[string]struct{}) - err := retryTimeDuration(func() error { - ctx := resource_google_project_context.Background() - call := config.NewServiceUsageClient(userAgent).Services.List(resource_google_project_fmt.Sprintf("projects/%s", project)) - if config.UserProjectOverride && billingProject != "" { - call.Header().Add("X-Goog-User-Project", billingProject) - } - return call.Fields("services/name,nextPageToken").Filter("state:ENABLED"). - Pages(ctx, func(r *resource_google_project_serviceusage.ListServicesResponse) error { - for _, v := range r.Services { - - name := GetResourceNameFromSelfLink(v.Name) - - if _, ok := ignoredProjectServicesSet[name]; !ok { - apiServices[name] = struct{}{} - - if v, ok := renamedServicesByOldAndNewServiceNames[name]; ok { - resource_google_project_log.Printf("[DEBUG] Adding service alias for %s to enabled services: %s", name, v) - apiServices[v] = struct{}{} - } - } - } - return nil - }) - }, timeout) - if err != nil { - return nil, resource_google_project_errwrap.Wrapf(resource_google_project_fmt.Sprintf("Failed to list enabled services for project %s: {{err}}", project), err) - } - return apiServices, nil -} - -func waitForServiceUsageEnabledServices(services []string, project, billingProject, userAgent string, config *Config, timeout resource_google_project_time.Duration) error { - missing := make([]string, 0, len(services)) - delay := resource_google_project_time.Duration(0) - interval := resource_google_project_time.Second - err := retryTimeDuration(func() error { - - enabledServices, err := listCurrentlyEnabledServices(project, billingProject, userAgent, config, timeout) - if err != nil { - return err - } - - missing := make([]string, 0, len(services)) - for _, s := range services { - if _, ok := enabledServices[s]; !ok { - missing = append(missing, s) - } - } - if len(missing) > 0 { - resource_google_project_log.Printf("[DEBUG] waiting %v before reading project %s services...", delay, project) - resource_google_project_time.Sleep(delay) - delay += interval - interval += delay - - return &resource_google_project_googleapi.Error{ - Code: 503, - Message: resource_google_project_fmt.Sprintf("The service(s) %q are still being enabled for project %s. This isn't a real API error, this is just eventual consistency.", missing, project), - } - } - return nil - }, timeout) - if err != nil { - return resource_google_project_errwrap.Wrap(err, resource_google_project_fmt.Errorf("failed to enable some service(s) %q for project %s", missing, project)) - } - return nil -} - -func resourceGoogleProjectDefaultServiceAccounts() *resource_google_project_default_service_accounts_schema.Resource { - return &resource_google_project_default_service_accounts_schema.Resource{ - Create: resourceGoogleProjectDefaultServiceAccountsCreate, - Read: resource_google_project_default_service_accounts_schema.Noop, - Update: resource_google_project_default_service_accounts_schema.Noop, - Delete: resourceGoogleProjectDefaultServiceAccountsDelete, - - Timeouts: &resource_google_project_default_service_accounts_schema.ResourceTimeout{ - Create: resource_google_project_default_service_accounts_schema.DefaultTimeout(10 * resource_google_project_default_service_accounts_time.Minute), - Read: resource_google_project_default_service_accounts_schema.DefaultTimeout(10 * resource_google_project_default_service_accounts_time.Minute), - Delete: resource_google_project_default_service_accounts_schema.DefaultTimeout(10 * resource_google_project_default_service_accounts_time.Minute), - }, - - Schema: map[string]*resource_google_project_default_service_accounts_schema.Schema{ - "project": { - Type: resource_google_project_default_service_accounts_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateProjectID(), - Description: `The project ID where service accounts are created.`, - }, - "action": { - Type: resource_google_project_default_service_accounts_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_google_project_default_service_accounts_validation.StringInSlice([]string{"DEPRIVILEGE", "DELETE", "DISABLE"}, false), - Description: `The action to be performed in the default service accounts. Valid values are: DEPRIVILEGE, DELETE, DISABLE. - Note that DEPRIVILEGE action will ignore the REVERT configuration in the restore_policy.`, - }, - "restore_policy": { - Type: resource_google_project_default_service_accounts_schema.TypeString, - Optional: true, - Default: "REVERT", - ValidateFunc: resource_google_project_default_service_accounts_validation.StringInSlice([]string{"NONE", "REVERT", "REVERT_AND_IGNORE_FAILURE"}, false), - Description: `The action to be performed in the default service accounts on the resource destroy. - Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE.`, - }, - "service_accounts": { - Type: resource_google_project_default_service_accounts_schema.TypeMap, - Computed: true, - Description: `The Service Accounts changed by this resource. It is used for revert the action on the destroy.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleProjectDefaultServiceAccountsDoAction(d *resource_google_project_default_service_accounts_schema.ResourceData, meta interface{}, action, uniqueID, email, project string) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - restorePolicy := d.Get("restore_policy").(string) - serviceAccountSelfLink := resource_google_project_default_service_accounts_fmt.Sprintf("projects/%s/serviceAccounts/%s", project, uniqueID) - switch action { - case "DELETE": - _, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Delete(serviceAccountSelfLink).Do() - if err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("cannot delete service account %s: %v", serviceAccountSelfLink, err) - } - case "UNDELETE": - _, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Undelete(serviceAccountSelfLink, &resource_google_project_default_service_accounts_iam.UndeleteServiceAccountRequest{}).Do() - errExpected := restorePolicy == "REVERT_AND_IGNORE_FAILURE" - errReceived := err != nil - if errReceived { - if !errExpected { - return resource_google_project_default_service_accounts_fmt.Errorf("cannot undelete service account %s: %v", serviceAccountSelfLink, err) - } - resource_google_project_default_service_accounts_log.Printf("cannot undelete service account %s: %v", serviceAccountSelfLink, err) - resource_google_project_default_service_accounts_log.Printf("restore policy is %s... ignoring error", restorePolicy) - } - case "DISABLE": - _, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Disable(serviceAccountSelfLink, &resource_google_project_default_service_accounts_iam.DisableServiceAccountRequest{}).Do() - if err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("cannot disable service account %s: %v", serviceAccountSelfLink, err) - } - case "ENABLE": - _, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Enable(serviceAccountSelfLink, &resource_google_project_default_service_accounts_iam.EnableServiceAccountRequest{}).Do() - errReceived := err != nil - errExpected := restorePolicy == "REVERT_AND_IGNORE_FAILURE" - if errReceived { - if !errExpected { - return resource_google_project_default_service_accounts_fmt.Errorf("cannot enable service account %s: %v", serviceAccountSelfLink, err) - } - resource_google_project_default_service_accounts_log.Printf("cannot enable service account %s: %v", serviceAccountSelfLink, err) - resource_google_project_default_service_accounts_log.Printf("restore policy is %s... ignoring error", restorePolicy) - } - case "DEPRIVILEGE": - iamPolicy, err := config.NewResourceManagerClient(userAgent).Projects.GetIamPolicy(project, &resource_google_project_default_service_accounts_cloudresourcemanagercloudresourcemanager.GetIamPolicyRequest{}).Do() - if err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("cannot get IAM policy on project %s: %v", project, err) - } - - for _, bind := range iamPolicy.Bindings { - newMembers := []string{} - for _, member := range bind.Members { - if member != resource_google_project_default_service_accounts_fmt.Sprintf("serviceAccount:%s", email) { - newMembers = append(newMembers, member) - } - } - bind.Members = newMembers - } - updateRequest := &resource_google_project_default_service_accounts_cloudresourcemanagercloudresourcemanager.SetIamPolicyRequest{ - Policy: iamPolicy, - UpdateMask: "bindings,etag,auditConfigs", - } - _, err = config.NewResourceManagerClient(userAgent).Projects.SetIamPolicy(project, updateRequest).Do() - if err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("cannot update IAM policy on project %s: %v", project, err) - } - default: - return resource_google_project_default_service_accounts_fmt.Errorf("action %s is not a valid action", action) - } - - return nil -} - -func resourceGoogleProjectDefaultServiceAccountsCreate(d *resource_google_project_default_service_accounts_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - pid := d.Get("project").(string) - action := d.Get("action").(string) - - serviceAccounts, err := listServiceAccounts(config, d, userAgent) - if err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("error listing service accounts on project %s: %v", pid, err) - } - changedServiceAccounts := make(map[string]interface{}) - for _, sa := range serviceAccounts { - - if isDefaultServiceAccount(sa.DisplayName) { - err := resourceGoogleProjectDefaultServiceAccountsDoAction(d, meta, action, sa.UniqueId, sa.Email, pid) - if err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("error doing action %s on Service Account %s: %v", action, sa.Email, err) - } - changedServiceAccounts[sa.UniqueId] = sa.Email - } - } - if err := d.Set("service_accounts", changedServiceAccounts); err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("error setting service_accounts: %s", err) - } - d.SetId(prefixedProject(pid)) - - return nil -} - -func listServiceAccounts(config *Config, d *resource_google_project_default_service_accounts_schema.ResourceData, userAgent string) ([]*resource_google_project_default_service_accounts_iam.ServiceAccount, error) { - pid := d.Get("project").(string) - response, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.List(prefixedProject(pid)).Do() - if err != nil { - return nil, resource_google_project_default_service_accounts_fmt.Errorf("failed to list service accounts on project %q: %v", pid, err) - } - return response.Accounts, nil -} - -func resourceGoogleProjectDefaultServiceAccountsDelete(d *resource_google_project_default_service_accounts_schema.ResourceData, meta interface{}) error { - if d.Get("restore_policy").(string) == "NONE" { - d.SetId("") - return nil - } - - pid := d.Get("project").(string) - for saUniqueID, saEmail := range d.Get("service_accounts").(map[string]interface{}) { - origAction := d.Get("action").(string) - newAction := "" - - if origAction == "DISABLE" { - newAction = "ENABLE" - } else if origAction == "DELETE" { - newAction = "UNDELETE" - } - if newAction != "" { - err := resourceGoogleProjectDefaultServiceAccountsDoAction(d, meta, newAction, saUniqueID, saEmail.(string), pid) - if err != nil { - return resource_google_project_default_service_accounts_fmt.Errorf("error doing action %s on Service Account %s: %v", newAction, saUniqueID, err) - } - } - } - - d.SetId("") - - return nil -} - -func isDefaultServiceAccount(displayName string) bool { - gceDefaultSA := "compute engine default service account" - appEngineDefaultSA := "app engine default service account" - saDisplayName := resource_google_project_default_service_accounts_strings.ToLower(displayName) - if saDisplayName == gceDefaultSA || saDisplayName == appEngineDefaultSA { - return true - } - - return false -} - -func resourceGoogleProjectIamCustomRole() *resource_google_project_iam_custom_role_schema.Resource { - return &resource_google_project_iam_custom_role_schema.Resource{ - Create: resourceGoogleProjectIamCustomRoleCreate, - Read: resourceGoogleProjectIamCustomRoleRead, - Update: resourceGoogleProjectIamCustomRoleUpdate, - Delete: resourceGoogleProjectIamCustomRoleDelete, - - Importer: &resource_google_project_iam_custom_role_schema.ResourceImporter{ - State: resourceGoogleProjectIamCustomRoleImport, - }, - - Schema: map[string]*resource_google_project_iam_custom_role_schema.Schema{ - "role_id": { - Type: resource_google_project_iam_custom_role_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The camel case role id to use for this role. Cannot contain - characters.`, - ValidateFunc: validateIAMCustomRoleID, - }, - "title": { - Type: resource_google_project_iam_custom_role_schema.TypeString, - Required: true, - Description: `A human-readable title for the role.`, - }, - "permissions": { - Type: resource_google_project_iam_custom_role_schema.TypeSet, - Required: true, - MinItems: 1, - Description: `The names of the permissions this role grants when bound in an IAM policy. At least one permission must be specified.`, - Elem: &resource_google_project_iam_custom_role_schema.Schema{Type: resource_google_project_iam_custom_role_schema.TypeString}, - }, - "project": { - Type: resource_google_project_iam_custom_role_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The project that the service account will be created in. Defaults to the provider project configuration.`, - }, - "stage": { - Type: resource_google_project_iam_custom_role_schema.TypeString, - Optional: true, - Default: "GA", - Description: `The current launch stage of the role. Defaults to GA.`, - ValidateFunc: resource_google_project_iam_custom_role_validation.StringInSlice([]string{"ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED", "EAP"}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("ALPHA"), - }, - "description": { - Type: resource_google_project_iam_custom_role_schema.TypeString, - Optional: true, - Description: `A human-readable description for the role.`, - }, - "deleted": { - Type: resource_google_project_iam_custom_role_schema.TypeBool, - Computed: true, - Description: `The current deleted state of the role.`, - }, - "name": { - Type: resource_google_project_iam_custom_role_schema.TypeString, - Computed: true, - Description: `The name of the role in the format projects/{{project}}/roles/{{role_id}}. Like id, this field can be used as a reference in other resources such as IAM role bindings.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleProjectIamCustomRoleCreate(d *resource_google_project_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - roleId := resource_google_project_iam_custom_role_fmt.Sprintf("projects/%s/roles/%s", project, d.Get("role_id").(string)) - r, err := config.NewIamClient(userAgent).Projects.Roles.Get(roleId).Do() - if err == nil { - if r.Deleted { - - d.SetId(r.Name) - if err := resourceGoogleProjectIamCustomRoleUpdate(d, meta); err != nil { - - d.SetId("") - return err - } - } else { - - return resource_google_project_iam_custom_role_fmt.Errorf("Custom project role %s already exists and must be imported", roleId) - } - } else if err := handleNotFoundError(err, d, resource_google_project_iam_custom_role_fmt.Sprintf("Custom Project Role %q", roleId)); err == nil { - - role, err := config.NewIamClient(userAgent).Projects.Roles.Create("projects/"+project, &resource_google_project_iam_custom_role_iam.CreateRoleRequest{ - RoleId: d.Get("role_id").(string), - Role: &resource_google_project_iam_custom_role_iam.Role{ - Title: d.Get("title").(string), - Description: d.Get("description").(string), - Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*resource_google_project_iam_custom_role_schema.Set)), - }, - }).Do() - if err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error creating the custom project role %s: %v", roleId, err) - } - - d.SetId(role.Name) - } else { - return resource_google_project_iam_custom_role_fmt.Errorf("Unable to verify whether custom project role %s already exists and must be undeleted: %v", roleId, err) - } - - return resourceGoogleProjectIamCustomRoleRead(d, meta) -} - -func extractProjectFromProjectIamCustomRoleID(id string) string { - parts := resource_google_project_iam_custom_role_strings.Split(id, "/") - - return parts[1] -} - -func resourceGoogleProjectIamCustomRoleRead(d *resource_google_project_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project := extractProjectFromProjectIamCustomRoleID(d.Id()) - - role, err := config.NewIamClient(userAgent).Projects.Roles.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, d.Id()) - } - - if err := d.Set("role_id", GetResourceNameFromSelfLink(role.Name)); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting role_id: %s", err) - } - if err := d.Set("title", role.Title); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting title: %s", err) - } - if err := d.Set("name", role.Name); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", role.Description); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("permissions", role.IncludedPermissions); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting permissions: %s", err) - } - if err := d.Set("stage", role.Stage); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting stage: %s", err) - } - if err := d.Set("deleted", role.Deleted); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting deleted: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error setting project: %s", err) - } - - return nil -} - -func resourceGoogleProjectIamCustomRoleUpdate(d *resource_google_project_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - d.Partial(true) - - r, err := config.NewIamClient(userAgent).Projects.Roles.Get(d.Id()).Do() - if err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("unable to find custom project role %s to update: %v", d.Id(), err) - } - if r.Deleted { - _, err := config.NewIamClient(userAgent).Projects.Roles.Undelete(d.Id(), &resource_google_project_iam_custom_role_iam.UndeleteRoleRequest{}).Do() - if err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error undeleting the custom project role %s: %s", d.Get("title").(string), err) - } - } - - if d.HasChange("title") || d.HasChange("description") || d.HasChange("stage") || d.HasChange("permissions") { - _, err := config.NewIamClient(userAgent).Projects.Roles.Patch(d.Id(), &resource_google_project_iam_custom_role_iam.Role{ - Title: d.Get("title").(string), - Description: d.Get("description").(string), - Stage: d.Get("stage").(string), - IncludedPermissions: convertStringSet(d.Get("permissions").(*resource_google_project_iam_custom_role_schema.Set)), - }).Do() - - if err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error updating the custom project role %s: %s", d.Get("title").(string), err) - } - } - - d.Partial(false) - return nil -} - -func resourceGoogleProjectIamCustomRoleDelete(d *resource_google_project_iam_custom_role_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - _, err = config.NewIamClient(userAgent).Projects.Roles.Delete(d.Id()).Do() - if err != nil { - return resource_google_project_iam_custom_role_fmt.Errorf("Error deleting the custom project role %s: %s", d.Get("title").(string), err) - } - - return nil -} - -func resourceGoogleProjectIamCustomRoleImport(d *resource_google_project_iam_custom_role_schema.ResourceData, meta interface{}) ([]*resource_google_project_iam_custom_role_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/roles/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/roles/{{role_id}}") - if err != nil { - return nil, resource_google_project_iam_custom_role_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_google_project_iam_custom_role_schema.ResourceData{d}, nil -} - -func resourceGoogleProjectMigrateState(v int, s *resource_google_project_migrate_terraform.InstanceState, meta interface{}) (*resource_google_project_migrate_terraform.InstanceState, error) { - if s.Empty() { - resource_google_project_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return s, nil - } - - switch v { - case 0: - resource_google_project_migrate_log.Println("[INFO] Found Google Project State v0; migrating to v1") - s, err := migrateGoogleProjectStateV0toV1(s, meta.(*Config)) - if err != nil { - return s, err - } - return s, nil - default: - return s, resource_google_project_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateGoogleProjectStateV0toV1(s *resource_google_project_migrate_terraform.InstanceState, config *Config) (*resource_google_project_migrate_terraform.InstanceState, error) { - resource_google_project_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes) - - s.Attributes["skip_delete"] = "true" - s.Attributes["project_id"] = s.ID - - if s.Attributes["policy_data"] != "" { - p, err := getProjectIamPolicy(s.ID, config) - if err != nil { - return s, resource_google_project_migrate_fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err) - } - s.Attributes["policy_etag"] = p.Etag - } - - resource_google_project_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes) - return s, nil -} - -func getProjectIamPolicy(project string, config *Config) (*resource_google_project_migrate_cloudresourcemanager.Policy, error) { - p, err := config.NewResourceManagerClient(config.userAgent).Projects.GetIamPolicy(project, - &resource_google_project_migrate_cloudresourcemanager.GetIamPolicyRequest{ - Options: &resource_google_project_migrate_cloudresourcemanager.GetPolicyOptions{ - RequestedPolicyVersion: iamPolicyVersion, - }, - }).Do() - - if err != nil { - return nil, resource_google_project_migrate_fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) - } - return p, nil -} - -func resourceGoogleProjectOrganizationPolicy() *resource_google_project_organization_policy_schema.Resource { - return &resource_google_project_organization_policy_schema.Resource{ - Create: resourceGoogleProjectOrganizationPolicyCreate, - Read: resourceGoogleProjectOrganizationPolicyRead, - Update: resourceGoogleProjectOrganizationPolicyUpdate, - Delete: resourceGoogleProjectOrganizationPolicyDelete, - - Importer: &resource_google_project_organization_policy_schema.ResourceImporter{ - State: resourceProjectOrgPolicyImporter, - }, - - Timeouts: &resource_google_project_organization_policy_schema.ResourceTimeout{ - Create: resource_google_project_organization_policy_schema.DefaultTimeout(4 * resource_google_project_organization_policy_time.Minute), - Update: resource_google_project_organization_policy_schema.DefaultTimeout(4 * resource_google_project_organization_policy_time.Minute), - Read: resource_google_project_organization_policy_schema.DefaultTimeout(4 * resource_google_project_organization_policy_time.Minute), - Delete: resource_google_project_organization_policy_schema.DefaultTimeout(4 * resource_google_project_organization_policy_time.Minute), - }, - - Schema: mergeSchemas( - schemaOrganizationPolicy, - map[string]*resource_google_project_organization_policy_schema.Schema{ - "project": { - Type: resource_google_project_organization_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The project ID.`, - }, - }, - ), - UseJSONNumber: true, - } -} - -func resourceProjectOrgPolicyImporter(d *resource_google_project_organization_policy_schema.ResourceData, meta interface{}) ([]*resource_google_project_organization_policy_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "projects/(?P[^/]+):constraints/(?P[^/]+)", - "(?P[^/]+):constraints/(?P[^/]+)", - "(?P[^/]+):(?P[^/]+)"}, - d, config); err != nil { - return nil, err - } - - if d.Get("project") == "" || d.Get("constraint") == "" { - return nil, resource_google_project_organization_policy_fmt.Errorf("unable to parse project or constraint. Check import formats") - } - - return []*resource_google_project_organization_policy_schema.ResourceData{d}, nil -} - -func resourceGoogleProjectOrganizationPolicyCreate(d *resource_google_project_organization_policy_schema.ResourceData, meta interface{}) error { - d.SetId(resource_google_project_organization_policy_fmt.Sprintf("%s:%s", d.Get("project"), d.Get("constraint"))) - - if isOrganizationPolicyUnset(d) { - return resourceGoogleProjectOrganizationPolicyDelete(d, meta) - } - - if err := setProjectOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleProjectOrganizationPolicyRead(d, meta) -} - -func resourceGoogleProjectOrganizationPolicyRead(d *resource_google_project_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - project := prefixedProject(d.Get("project").(string)) - - var policy *resource_google_project_organization_policy_cloudresourcemanager.OrgPolicy - err = retryTimeDuration(func() (readErr error) { - policy, readErr = config.NewResourceManagerClient(userAgent).Projects.GetOrgPolicy(project, &resource_google_project_organization_policy_cloudresourcemanager.GetOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return readErr - }, d.Timeout(resource_google_project_organization_policy_schema.TimeoutRead)) - if err != nil { - return handleNotFoundError(err, d, resource_google_project_organization_policy_fmt.Sprintf("Organization policy for %s", project)) - } - - if err := d.Set("constraint", policy.Constraint); err != nil { - return resource_google_project_organization_policy_fmt.Errorf("Error setting constraint: %s", err) - } - if err := d.Set("boolean_policy", flattenBooleanOrganizationPolicy(policy.BooleanPolicy)); err != nil { - return resource_google_project_organization_policy_fmt.Errorf("Error setting boolean_policy: %s", err) - } - if err := d.Set("list_policy", flattenListOrganizationPolicy(policy.ListPolicy)); err != nil { - return resource_google_project_organization_policy_fmt.Errorf("Error setting list_policy: %s", err) - } - if err := d.Set("restore_policy", flattenRestoreOrganizationPolicy(policy.RestoreDefault)); err != nil { - return resource_google_project_organization_policy_fmt.Errorf("Error setting restore_policy: %s", err) - } - if err := d.Set("version", policy.Version); err != nil { - return resource_google_project_organization_policy_fmt.Errorf("Error setting version: %s", err) - } - if err := d.Set("etag", policy.Etag); err != nil { - return resource_google_project_organization_policy_fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("update_time", policy.UpdateTime); err != nil { - return resource_google_project_organization_policy_fmt.Errorf("Error setting update_time: %s", err) - } - - return nil -} - -func resourceGoogleProjectOrganizationPolicyUpdate(d *resource_google_project_organization_policy_schema.ResourceData, meta interface{}) error { - if isOrganizationPolicyUnset(d) { - return resourceGoogleProjectOrganizationPolicyDelete(d, meta) - } - - if err := setProjectOrganizationPolicy(d, meta); err != nil { - return err - } - - return resourceGoogleProjectOrganizationPolicyRead(d, meta) -} - -func resourceGoogleProjectOrganizationPolicyDelete(d *resource_google_project_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - project := prefixedProject(d.Get("project").(string)) - - return retryTimeDuration(func() error { - _, err := config.NewResourceManagerClient(userAgent).Projects.ClearOrgPolicy(project, &resource_google_project_organization_policy_cloudresourcemanager.ClearOrgPolicyRequest{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - }).Do() - return err - }, d.Timeout(resource_google_project_organization_policy_schema.TimeoutDelete)) -} - -func setProjectOrganizationPolicy(d *resource_google_project_organization_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project := prefixedProject(d.Get("project").(string)) - - listPolicy, err := expandListOrganizationPolicy(d.Get("list_policy").([]interface{})) - if err != nil { - return err - } - - restore_default, err := expandRestoreOrganizationPolicy(d.Get("restore_policy").([]interface{})) - if err != nil { - return err - } - - return retryTimeDuration(func() error { - _, err := config.NewResourceManagerClient(userAgent).Projects.SetOrgPolicy(project, &resource_google_project_organization_policy_cloudresourcemanager.SetOrgPolicyRequest{ - Policy: &resource_google_project_organization_policy_cloudresourcemanager.OrgPolicy{ - Constraint: canonicalOrgPolicyConstraint(d.Get("constraint").(string)), - BooleanPolicy: expandBooleanOrganizationPolicy(d.Get("boolean_policy").([]interface{})), - ListPolicy: listPolicy, - RestoreDefault: restore_default, - Version: int64(d.Get("version").(int)), - Etag: d.Get("etag").(string), - }, - }).Do() - return err - }, d.Timeout(resource_google_project_organization_policy_schema.TimeoutCreate)) -} - -var ignoredProjectServices = []string{"dataproc-control.googleapis.com", "source.googleapis.com", "stackdriverprovisioning.googleapis.com"} - -var ignoredProjectServicesSet = golangSetFromStringSlice(ignoredProjectServices) - -var bannedProjectServices = []string{"bigquery-json.googleapis.com"} - -var renamedServices = map[string]string{} - -var renamedServicesByNewServiceNames = reverseStringMap(renamedServices) - -var renamedServicesByOldAndNewServiceNames = mergeStringMaps(renamedServices, renamedServicesByNewServiceNames) - -const maxServiceUsageBatchSize = 20 - -func validateProjectServiceService(val interface{}, key string) (warns []string, errs []error) { - bannedServicesFunc := StringNotInSlice(append(ignoredProjectServices, bannedProjectServices...), false) - warns, errs = bannedServicesFunc(val, key) - if len(errs) > 0 { - return - } - - v, _ := val.(string) - if !resource_google_project_service_strings.Contains(v, ".") { - errs = append(errs, resource_google_project_service_fmt.Errorf("expected %s to be a domain like serviceusage.googleapis.com", v)) - } - return -} - -func resourceGoogleProjectService() *resource_google_project_service_schema.Resource { - return &resource_google_project_service_schema.Resource{ - Create: resourceGoogleProjectServiceCreate, - Read: resourceGoogleProjectServiceRead, - Delete: resourceGoogleProjectServiceDelete, - Update: resourceGoogleProjectServiceUpdate, - - Importer: &resource_google_project_service_schema.ResourceImporter{ - State: resourceGoogleProjectServiceImport, - }, - - Timeouts: &resource_google_project_service_schema.ResourceTimeout{ - Create: resource_google_project_service_schema.DefaultTimeout(20 * resource_google_project_service_time.Minute), - Update: resource_google_project_service_schema.DefaultTimeout(20 * resource_google_project_service_time.Minute), - Read: resource_google_project_service_schema.DefaultTimeout(10 * resource_google_project_service_time.Minute), - Delete: resource_google_project_service_schema.DefaultTimeout(20 * resource_google_project_service_time.Minute), - }, - - Schema: map[string]*resource_google_project_service_schema.Schema{ - "service": { - Type: resource_google_project_service_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateProjectServiceService, - }, - "project": { - Type: resource_google_project_service_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - }, - - "disable_dependent_services": { - Type: resource_google_project_service_schema.TypeBool, - Optional: true, - }, - - "disable_on_destroy": { - Type: resource_google_project_service_schema.TypeBool, - Optional: true, - Default: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleProjectServiceImport(d *resource_google_project_service_schema.ResourceData, m interface{}) ([]*resource_google_project_service_schema.ResourceData, error) { - parts := resource_google_project_service_strings.Split(d.Id(), "/") - if len(parts) != 2 { - return nil, resource_google_project_service_fmt.Errorf("Invalid google_project_service id format for import, expecting `{project}/{service}`, found %s", d.Id()) - } - if err := d.Set("project", parts[0]); err != nil { - return nil, resource_google_project_service_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("service", parts[1]); err != nil { - return nil, resource_google_project_service_fmt.Errorf("Error setting service: %s", err) - } - return []*resource_google_project_service_schema.ResourceData{d}, nil -} - -func resourceGoogleProjectServiceCreate(d *resource_google_project_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - project, err := getProject(d, config) - if err != nil { - return err - } - project = GetResourceNameFromSelfLink(project) - - srv := d.Get("service").(string) - id := project + "/" + srv - - servicesRaw, err := BatchRequestReadServices(project, d, config) - if err != nil { - return handleNotFoundError(err, d, resource_google_project_service_fmt.Sprintf("Project Service %s", d.Id())) - } - servicesList := servicesRaw.(map[string]struct{}) - if _, ok := servicesList[srv]; ok { - resource_google_project_service_log.Printf("[DEBUG] service %s was already found to be enabled in project %s", srv, project) - d.SetId(id) - if err := d.Set("project", project); err != nil { - return resource_google_project_service_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("service", srv); err != nil { - return resource_google_project_service_fmt.Errorf("Error setting service: %s", err) - } - return nil - } - - err = BatchRequestEnableService(srv, project, d, config) - if err != nil { - return err - } - d.SetId(id) - return resourceGoogleProjectServiceRead(d, meta) -} - -func resourceGoogleProjectServiceRead(d *resource_google_project_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - project = GetResourceNameFromSelfLink(project) - - projectGetCall := config.NewResourceManagerClient(userAgent).Projects.Get(project) - if config.UserProjectOverride { - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - projectGetCall.Header().Add("X-Goog-User-Project", billingProject) - } - p, err := projectGetCall.Do() - - if err == nil && p.LifecycleState == "DELETE_REQUESTED" { - - err = &resource_google_project_service_googleapi.Error{ - Code: 404, - Message: "Project deletion was requested", - } - } - if err != nil { - return handleNotFoundError(err, d, resource_google_project_service_fmt.Sprintf("Project Service %s", d.Id())) - } - - servicesRaw, err := BatchRequestReadServices(project, d, config) - if err != nil { - return handleNotFoundError(err, d, resource_google_project_service_fmt.Sprintf("Project Service %s", d.Id())) - } - servicesList := servicesRaw.(map[string]struct{}) - - srv := d.Get("service").(string) - if _, ok := servicesList[srv]; ok { - if err := d.Set("project", project); err != nil { - return resource_google_project_service_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("service", srv); err != nil { - return resource_google_project_service_fmt.Errorf("Error setting service: %s", err) - } - return nil - } - - resource_google_project_service_log.Printf("[DEBUG] service %s not in enabled services for project %s, removing from state", srv, project) - d.SetId("") - return nil -} - -func resourceGoogleProjectServiceDelete(d *resource_google_project_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if disable := d.Get("disable_on_destroy"); !(disable.(bool)) { - resource_google_project_service_log.Printf("[WARN] Project service %q disable_on_destroy is false, skip disabling service", d.Id()) - d.SetId("") - return nil - } - - project, err := getProject(d, config) - if err != nil { - return err - } - project = GetResourceNameFromSelfLink(project) - - service := d.Get("service").(string) - disableDependencies := d.Get("disable_dependent_services").(bool) - if err = disableServiceUsageProjectService(service, project, d, config, disableDependencies); err != nil { - return handleNotFoundError(err, d, resource_google_project_service_fmt.Sprintf("Project Service %s", d.Id())) - } - - d.SetId("") - return nil -} - -func resourceGoogleProjectServiceUpdate(d *resource_google_project_service_schema.ResourceData, meta interface{}) error { - - return nil -} - -func disableServiceUsageProjectService(service, project string, d *resource_google_project_service_schema.ResourceData, config *Config, disableDependentServices bool) error { - err := retryTimeDuration(func() error { - billingProject := project - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - name := resource_google_project_service_fmt.Sprintf("projects/%s/services/%s", project, service) - servicesDisableCall := config.NewServiceUsageClient(userAgent).Services.Disable(name, &resource_google_project_service_serviceusage.DisableServiceRequest{ - DisableDependentServices: disableDependentServices, - }) - if config.UserProjectOverride { - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - servicesDisableCall.Header().Add("X-Goog-User-Project", billingProject) - } - sop, err := servicesDisableCall.Do() - if err != nil { - return err - } - - waitErr := serviceUsageOperationWait(config, sop, billingProject, "api to disable", userAgent, d.Timeout(resource_google_project_service_schema.TimeoutDelete)) - if waitErr != nil { - return waitErr - } - return nil - }, d.Timeout(resource_google_project_service_schema.TimeoutDelete), serviceUsageServiceBeingActivated) - if err != nil { - return resource_google_project_service_fmt.Errorf("Error disabling service %q for project %q: %v", service, project, err) - } - return nil -} - -func resourceGoogleServiceAccount() *resource_google_service_account_schema.Resource { - return &resource_google_service_account_schema.Resource{ - Create: resourceGoogleServiceAccountCreate, - Read: resourceGoogleServiceAccountRead, - Delete: resourceGoogleServiceAccountDelete, - Update: resourceGoogleServiceAccountUpdate, - Importer: &resource_google_service_account_schema.ResourceImporter{ - State: resourceGoogleServiceAccountImport, - }, - Timeouts: &resource_google_service_account_schema.ResourceTimeout{ - Create: resource_google_service_account_schema.DefaultTimeout(5 * resource_google_service_account_time.Minute), - }, - Schema: map[string]*resource_google_service_account_schema.Schema{ - "email": { - Type: resource_google_service_account_schema.TypeString, - Computed: true, - Description: `The e-mail address of the service account. This value should be referenced from any google_iam_policy data sources that would grant the service account privileges.`, - }, - "unique_id": { - Type: resource_google_service_account_schema.TypeString, - Computed: true, - Description: `The unique id of the service account.`, - }, - "name": { - Type: resource_google_service_account_schema.TypeString, - Computed: true, - Description: `The fully-qualified name of the service account.`, - }, - "account_id": { - Type: resource_google_service_account_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRFC1035Name(6, 30), - Description: `The account id that is used to generate the service account email address and a stable unique id. It is unique within a project, must be 6-30 characters long, and match the regular expression [a-z]([-a-z0-9]*[a-z0-9]) to comply with RFC1035. Changing this forces a new service account to be created.`, - }, - "display_name": { - Type: resource_google_service_account_schema.TypeString, - Optional: true, - Description: `The display name for the service account. Can be updated without creating a new resource.`, - }, - "disabled": { - Type: resource_google_service_account_schema.TypeBool, - Optional: true, - Default: false, - Description: `Whether the service account is disabled. Defaults to false`, - }, - "description": { - Type: resource_google_service_account_schema.TypeString, - Optional: true, - ValidateFunc: resource_google_service_account_validation.StringLenBetween(0, 256), - Description: `A text description of the service account. Must be less than or equal to 256 UTF-8 bytes.`, - }, - "project": { - Type: resource_google_service_account_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project that the service account will be created in. Defaults to the provider project configuration.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleServiceAccountCreate(d *resource_google_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - aid := d.Get("account_id").(string) - displayName := d.Get("display_name").(string) - description := d.Get("description").(string) - - sa := &resource_google_service_account_iam.ServiceAccount{ - DisplayName: displayName, - Description: description, - } - - r := &resource_google_service_account_iam.CreateServiceAccountRequest{ - AccountId: aid, - ServiceAccount: sa, - } - - sa, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Create("projects/"+project, r).Do() - if err != nil { - return resource_google_service_account_fmt.Errorf("Error creating service account: %s", err) - } - - d.SetId(sa.Name) - - err = retryTimeDuration(func() (operr error) { - _, saerr := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(d.Id()).Do() - return saerr - }, d.Timeout(resource_google_service_account_schema.TimeoutCreate), isNotFoundRetryableError("service account creation")) - - if err != nil { - return resource_google_service_account_fmt.Errorf("Error reading service account after creation: %s", err) - } - - return resourceGoogleServiceAccountRead(d, meta) -} - -func resourceGoogleServiceAccountRead(d *resource_google_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_google_service_account_fmt.Sprintf("Service Account %q", d.Id())) - } - - if err := d.Set("email", sa.Email); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting email: %s", err) - } - if err := d.Set("unique_id", sa.UniqueId); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting unique_id: %s", err) - } - if err := d.Set("project", sa.ProjectId); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("account_id", resource_google_service_account_strings.Split(sa.Email, "@")[0]); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting account_id: %s", err) - } - if err := d.Set("name", sa.Name); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("display_name", sa.DisplayName); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting display_name: %s", err) - } - if err := d.Set("description", sa.Description); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("disabled", sa.Disabled); err != nil { - return resource_google_service_account_fmt.Errorf("Error setting disabled: %s", err) - } - return nil -} - -func resourceGoogleServiceAccountDelete(d *resource_google_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - name := d.Id() - _, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Delete(name).Do() - if err != nil { - return err - } - d.SetId("") - return nil -} - -func resourceGoogleServiceAccountUpdate(d *resource_google_service_account_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - sa, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Get(d.Id()).Do() - if err != nil { - return resource_google_service_account_fmt.Errorf("Error retrieving service account %q: %s", d.Id(), err) - } - updateMask := make([]string, 0) - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - if d.HasChange("display_name") { - updateMask = append(updateMask, "display_name") - } - - if d.HasChange("disabled") && !d.Get("disabled").(bool) { - _, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Enable(d.Id(), - &resource_google_service_account_iam.EnableServiceAccountRequest{}).Do() - if err != nil { - return err - } - - if len(updateMask) == 0 { - return nil - } - - } else if d.HasChange("disabled") && d.Get("disabled").(bool) { - _, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Disable(d.Id(), - &resource_google_service_account_iam.DisableServiceAccountRequest{}).Do() - if err != nil { - return err - } - - if len(updateMask) == 0 { - return nil - } - } - - _, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Patch(d.Id(), - &resource_google_service_account_iam.PatchServiceAccountRequest{ - UpdateMask: resource_google_service_account_strings.Join(updateMask, ","), - ServiceAccount: &resource_google_service_account_iam.ServiceAccount{ - DisplayName: d.Get("display_name").(string), - Description: d.Get("description").(string), - Etag: sa.Etag, - }, - }).Do() - if err != nil { - return err - } - - resource_google_service_account_time.Sleep(resource_google_service_account_time.Second * 5) - - return nil -} - -func resourceGoogleServiceAccountImport(d *resource_google_service_account_schema.ResourceData, meta interface{}) ([]*resource_google_service_account_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/serviceAccounts/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/serviceAccounts/{{email}}") - if err != nil { - return nil, resource_google_service_account_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_google_service_account_schema.ResourceData{d}, nil -} - -func resourceGoogleServiceAccountKey() *resource_google_service_account_key_schema.Resource { - return &resource_google_service_account_key_schema.Resource{ - Create: resourceGoogleServiceAccountKeyCreate, - Read: resourceGoogleServiceAccountKeyRead, - Delete: resourceGoogleServiceAccountKeyDelete, - Schema: map[string]*resource_google_service_account_key_schema.Schema{ - - "service_account_id": { - Type: resource_google_service_account_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The ID of the parent service account of the key. This can be a string in the format {ACCOUNT} or projects/{PROJECT_ID}/serviceAccounts/{ACCOUNT}, where {ACCOUNT} is the email address or unique id of the service account. If the {ACCOUNT} syntax is used, the project will be inferred from the provider's configuration.`, - }, - - "key_algorithm": { - Type: resource_google_service_account_key_schema.TypeString, - Default: "KEY_ALG_RSA_2048", - Optional: true, - ForceNew: true, - ValidateFunc: resource_google_service_account_key_validation.StringInSlice([]string{"KEY_ALG_UNSPECIFIED", "KEY_ALG_RSA_1024", "KEY_ALG_RSA_2048"}, false), - Description: `The algorithm used to generate the key, used only on create. KEY_ALG_RSA_2048 is the default algorithm. Valid values are: "KEY_ALG_RSA_1024", "KEY_ALG_RSA_2048".`, - }, - "private_key_type": { - Type: resource_google_service_account_key_schema.TypeString, - Default: "TYPE_GOOGLE_CREDENTIALS_FILE", - Optional: true, - ForceNew: true, - ValidateFunc: resource_google_service_account_key_validation.StringInSlice([]string{"TYPE_UNSPECIFIED", "TYPE_PKCS12_FILE", "TYPE_GOOGLE_CREDENTIALS_FILE"}, false), - }, - "public_key_type": { - Type: resource_google_service_account_key_schema.TypeString, - Default: "TYPE_X509_PEM_FILE", - Optional: true, - ForceNew: true, - ValidateFunc: resource_google_service_account_key_validation.StringInSlice([]string{"TYPE_NONE", "TYPE_X509_PEM_FILE", "TYPE_RAW_PUBLIC_KEY"}, false), - }, - "public_key_data": { - Type: resource_google_service_account_key_schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"key_algorithm", "private_key_type"}, - Description: `A field that allows clients to upload their own public key. If set, use this public key data to create a service account key for given service account. Please note, the expected format for this field is a base64 encoded X509_PEM.`, - }, - "keepers": { - Description: "Arbitrary map of values that, when changed, will trigger recreation of resource.", - Type: resource_google_service_account_key_schema.TypeMap, - Optional: true, - ForceNew: true, - }, - - "name": { - Type: resource_google_service_account_key_schema.TypeString, - Computed: true, - ForceNew: true, - Description: `The name used for this key pair`, - }, - "public_key": { - Type: resource_google_service_account_key_schema.TypeString, - Computed: true, - ForceNew: true, - Description: `The public key, base64 encoded`, - }, - "private_key": { - Type: resource_google_service_account_key_schema.TypeString, - Computed: true, - Sensitive: true, - Description: `The private key in JSON format, base64 encoded. This is what you normally get as a file when creating service account keys through the CLI or web console. This is only populated when creating a new key.`, - }, - "valid_after": { - Type: resource_google_service_account_key_schema.TypeString, - Computed: true, - Description: `The key can be used after this timestamp. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "valid_before": { - Type: resource_google_service_account_key_schema.TypeString, - Computed: true, - Description: `The key can be used before this timestamp. A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleServiceAccountKeyCreate(d *resource_google_service_account_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - serviceAccountName, err := serviceAccountFQN(d.Get("service_account_id").(string), d, config) - if err != nil { - return err - } - - var sak *resource_google_service_account_key_iam.ServiceAccountKey - - if d.Get("public_key_data").(string) != "" { - ru := &resource_google_service_account_key_iam.UploadServiceAccountKeyRequest{ - PublicKeyData: d.Get("public_key_data").(string), - } - sak, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Upload(serviceAccountName, ru).Do() - if err != nil { - return resource_google_service_account_key_fmt.Errorf("Error creating service account key: %s", err) - } - } else { - rc := &resource_google_service_account_key_iam.CreateServiceAccountKeyRequest{ - KeyAlgorithm: d.Get("key_algorithm").(string), - PrivateKeyType: d.Get("private_key_type").(string), - } - sak, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Create(serviceAccountName, rc).Do() - if err != nil { - return resource_google_service_account_key_fmt.Errorf("Error creating service account key: %s", err) - } - } - - d.SetId(sak.Name) - - if err := d.Set("valid_after", sak.ValidAfterTime); err != nil { - return resource_google_service_account_key_fmt.Errorf("Error setting valid_after: %s", err) - } - if err := d.Set("valid_before", sak.ValidBeforeTime); err != nil { - return resource_google_service_account_key_fmt.Errorf("Error setting valid_before: %s", err) - } - if err := d.Set("private_key", sak.PrivateKeyData); err != nil { - return resource_google_service_account_key_fmt.Errorf("Error setting private_key: %s", err) - } - - err = serviceAccountKeyWaitTime(config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys, d.Id(), d.Get("public_key_type").(string), "Creating Service account key", 4*resource_google_service_account_key_time.Minute) - if err != nil { - return err - } - return resourceGoogleServiceAccountKeyRead(d, meta) -} - -func resourceGoogleServiceAccountKeyRead(d *resource_google_service_account_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - publicKeyType := d.Get("public_key_type").(string) - - sak, err := config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Get(d.Id()).PublicKeyType(publicKeyType).Do() - if err != nil { - if err = handleNotFoundError(err, d, resource_google_service_account_key_fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { - return nil - } else { - - if isGoogleApiErrorWithCode(err, 403) { - resource_google_service_account_key_log.Printf("[DEBUG] Got a 403 error trying to read service account key %s, assuming it's gone.", d.Id()) - d.SetId("") - return nil - } else { - return err - } - } - } - - if err := d.Set("name", sak.Name); err != nil { - return resource_google_service_account_key_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("key_algorithm", sak.KeyAlgorithm); err != nil { - return resource_google_service_account_key_fmt.Errorf("Error setting key_algorithm: %s", err) - } - if err := d.Set("public_key", sak.PublicKeyData); err != nil { - return resource_google_service_account_key_fmt.Errorf("Error setting public_key: %s", err) - } - return nil -} - -func resourceGoogleServiceAccountKeyDelete(d *resource_google_service_account_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - _, err = config.NewIamClient(userAgent).Projects.ServiceAccounts.Keys.Delete(d.Id()).Do() - - if err != nil { - if err = handleNotFoundError(err, d, resource_google_service_account_key_fmt.Sprintf("Service Account Key %q", d.Id())); err == nil { - return nil - } else { - - if isGoogleApiErrorWithCode(err, 403) { - resource_google_service_account_key_log.Printf("[DEBUG] Got a 403 error trying to read service account key %s, assuming it's gone.", d.Id()) - d.SetId("") - return nil - } else { - return err - } - } - } - - d.SetId("") - return nil -} - -func resourceGoogleServiceNetworkingPeeredDNSDomain() *resource_google_service_networking_peered_dns_domain_schema.Resource { - return &resource_google_service_networking_peered_dns_domain_schema.Resource{ - Create: resourceGoogleServiceNetworkingPeeredDNSDomainCreate, - Read: resourceGoogleServiceNetworkingPeeredDNSDomainRead, - Delete: resourceGoogleServiceNetworkingPeeredDNSDomainDelete, - - Importer: &resource_google_service_networking_peered_dns_domain_schema.ResourceImporter{ - State: resourceGoogleServiceNetworkingPeeredDNSDomainImport, - }, - - Timeouts: &resource_google_service_networking_peered_dns_domain_schema.ResourceTimeout{ - Create: resource_google_service_networking_peered_dns_domain_schema.DefaultTimeout(20 * resource_google_service_networking_peered_dns_domain_time.Minute), - Read: resource_google_service_networking_peered_dns_domain_schema.DefaultTimeout(10 * resource_google_service_networking_peered_dns_domain_time.Minute), - Delete: resource_google_service_networking_peered_dns_domain_schema.DefaultTimeout(20 * resource_google_service_networking_peered_dns_domain_time.Minute), - }, - - Schema: map[string]*resource_google_service_networking_peered_dns_domain_schema.Schema{ - "project": { - Type: resource_google_service_networking_peered_dns_domain_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The ID of the project that the service account will be created in. Defaults to the provider project configuration.`, - }, - "name": { - Type: resource_google_service_networking_peered_dns_domain_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the peered DNS domain.", - }, - "dns_suffix": { - Type: resource_google_service_networking_peered_dns_domain_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The DNS domain name suffix of the peered DNS domain.", - }, - "service": { - Type: resource_google_service_networking_peered_dns_domain_schema.TypeString, - Optional: true, - ForceNew: true, - Default: "servicenetworking.googleapis.com", - Description: "The name of the service to create a peered DNS domain for, e.g. servicenetworking.googleapis.com", - }, - "network": { - Type: resource_google_service_networking_peered_dns_domain_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Network in the consumer project to peer with.", - }, - "parent": { - Type: resource_google_service_networking_peered_dns_domain_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceGoogleServiceNetworkingPeeredDNSDomainImport(d *resource_google_service_networking_peered_dns_domain_schema.ResourceData, m interface{}) ([]*resource_google_service_networking_peered_dns_domain_schema.ResourceData, error) { - parts := resource_google_service_networking_peered_dns_domain_strings.Split(d.Id(), "/") - if len(parts) != 9 { - return nil, resource_google_service_networking_peered_dns_domain_fmt.Errorf("Invalid google_project_service_peered_dns_domain id format for import, expecting `services/{service}/projects/{project}/global/networks/{network}/peeredDnsDomains/{name}`, found %s", d.Id()) - } - if err := d.Set("service", parts[1]); err != nil { - return nil, resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting service: %s", err) - } - if err := d.Set("project", parts[3]); err != nil { - return nil, resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("network", parts[6]); err != nil { - return nil, resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("name", parts[8]); err != nil { - return nil, resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting name: %s", err) - } - return []*resource_google_service_networking_peered_dns_domain_schema.ResourceData{d}, nil -} - -func resourceGoogleServiceNetworkingPeeredDNSDomainCreate(d *resource_google_service_networking_peered_dns_domain_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - projectNumber, err := getProjectNumber(d, config, project, userAgent) - if err != nil { - return err - } - - service := d.Get("service").(string) - network := d.Get("network").(string) - parent := resource_google_service_networking_peered_dns_domain_fmt.Sprintf("services/%s/projects/%s/global/networks/%s", service, projectNumber, network) - - name := d.Get("name").(string) - dnsSuffix := d.Get("dns_suffix").(string) - r := &resource_google_service_networking_peered_dns_domain_servicenetworking.PeeredDnsDomain{ - DnsSuffix: dnsSuffix, - Name: name, - } - - apiService := config.NewServiceNetworkingClient(userAgent) - peeredDnsDomainsService := resource_google_service_networking_peered_dns_domain_servicenetworking.NewServicesProjectsGlobalNetworksPeeredDnsDomainsService(apiService) - createCall := peeredDnsDomainsService.Create(parent, r) - if config.UserProjectOverride { - createCall.Header().Add("X-Goog-User-Project", project) - } - op, err := createCall.Do() - if err != nil { - return err - } - - if err := serviceNetworkingOperationWaitTime(config, op, "Create Service Networking Peered DNS Domain", userAgent, project, d.Timeout(resource_google_service_networking_peered_dns_domain_schema.TimeoutCreate)); err != nil { - return err - } - - if err := d.Set("parent", parent); err != nil { - return resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting parent: %s", err) - } - id := resource_google_service_networking_peered_dns_domain_fmt.Sprintf("%s/peeredDnsDomains/%s", parent, name) - d.SetId(id) - return resourceGoogleServiceNetworkingPeeredDNSDomainRead(d, meta) -} - -func resourceGoogleServiceNetworkingPeeredDNSDomainRead(d *resource_google_service_networking_peered_dns_domain_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - projectNumber, err := getProjectNumber(d, config, project, userAgent) - if err != nil { - return err - } - - service := d.Get("service").(string) - network := d.Get("network").(string) - parent := resource_google_service_networking_peered_dns_domain_fmt.Sprintf("services/%s/projects/%s/global/networks/%s", service, projectNumber, network) - - apiService := config.NewServiceNetworkingClient(userAgent) - peeredDnsDomainsService := resource_google_service_networking_peered_dns_domain_servicenetworking.NewServicesProjectsGlobalNetworksPeeredDnsDomainsService(apiService) - readCall := peeredDnsDomainsService.List(parent) - if config.UserProjectOverride { - readCall.Header().Add("X-Goog-User-Project", project) - } - response, err := readCall.Do() - if err != nil { - return err - } - - name := d.Get("name").(string) - id := resource_google_service_networking_peered_dns_domain_fmt.Sprintf("%s/peeredDnsDomains/%s", parent, name) - d.SetId(id) - - var peeredDnsDomain *resource_google_service_networking_peered_dns_domain_servicenetworking.PeeredDnsDomain - for _, c := range response.PeeredDnsDomains { - if c.Name == name { - peeredDnsDomain = c - break - } - } - - if peeredDnsDomain == nil { - d.SetId("") - resource_google_service_networking_peered_dns_domain_log.Printf("[WARNING] Failed to find Service Peered DNS Domain, service: %s, project: %s, network: %s, name: %s", service, project, network, name) - return nil - } - - if err := d.Set("network", network); err != nil { - return resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("name", peeredDnsDomain.Name); err != nil { - return resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("dns_suffix", peeredDnsDomain.DnsSuffix); err != nil { - return resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting peering: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("parent", parent); err != nil { - return resource_google_service_networking_peered_dns_domain_fmt.Errorf("Error setting parent: %s", err) - } - - return nil -} - -func resourceGoogleServiceNetworkingPeeredDNSDomainDelete(d *resource_google_service_networking_peered_dns_domain_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - name := d.Get("name").(string) - apiService := config.NewServiceNetworkingClient(userAgent) - peeredDnsDomainsService := resource_google_service_networking_peered_dns_domain_servicenetworking.NewServicesProjectsGlobalNetworksPeeredDnsDomainsService(apiService) - - if err := retryTimeDuration(func() error { - _, delErr := peeredDnsDomainsService.Delete(d.Id()).Do() - return delErr - }, d.Timeout(resource_google_service_networking_peered_dns_domain_schema.TimeoutDelete)); err != nil { - return handleNotFoundError(err, d, resource_google_service_networking_peered_dns_domain_fmt.Sprintf("Peered DNS domain %s", name)) - } - - d.SetId("") - return nil -} - -func getProjectNumber(d *resource_google_service_networking_peered_dns_domain_schema.ResourceData, config *Config, project, userAgent string) (string, error) { - resource_google_service_networking_peered_dns_domain_log.Printf("[DEBUG] Retrieving project number by doing a GET with the project id, as required by service networking") - - billingProject := project - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getProjectCall := config.NewResourceManagerClient(userAgent).Projects.Get(project) - if config.UserProjectOverride { - getProjectCall.Header().Add("X-Goog-User-Project", billingProject) - } - projectCall, err := getProjectCall.Do() - if err != nil { - - return "", resource_google_service_networking_peered_dns_domain_fmt.Errorf("Failed to retrieve project, project: %s, err: %w", project, err) - } - - return resource_google_service_networking_peered_dns_domain_strconv.FormatInt(projectCall.ProjectNumber, 10), nil -} - -func resourceHealthcareConsentStore() *resource_healthcare_consent_store_schema.Resource { - return &resource_healthcare_consent_store_schema.Resource{ - Create: resourceHealthcareConsentStoreCreate, - Read: resourceHealthcareConsentStoreRead, - Update: resourceHealthcareConsentStoreUpdate, - Delete: resourceHealthcareConsentStoreDelete, - - Importer: &resource_healthcare_consent_store_schema.ResourceImporter{ - State: resourceHealthcareConsentStoreImport, - }, - - Timeouts: &resource_healthcare_consent_store_schema.ResourceTimeout{ - Create: resource_healthcare_consent_store_schema.DefaultTimeout(4 * resource_healthcare_consent_store_time.Minute), - Update: resource_healthcare_consent_store_schema.DefaultTimeout(4 * resource_healthcare_consent_store_time.Minute), - Delete: resource_healthcare_consent_store_schema.DefaultTimeout(4 * resource_healthcare_consent_store_time.Minute), - }, - - Schema: map[string]*resource_healthcare_consent_store_schema.Schema{ - "dataset": { - Type: resource_healthcare_consent_store_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: resource_healthcare_consent_store_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of this ConsentStore, for example: -"consent1"`, - }, - "default_consent_ttl": { - Type: resource_healthcare_consent_store_schema.TypeString, - Optional: true, - Description: `Default time to live for consents in this store. Must be at least 24 hours. Updating this field will not affect the expiration time of existing consents. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "enable_consent_create_on_update": { - Type: resource_healthcare_consent_store_schema.TypeBool, - Optional: true, - Description: `If true, [consents.patch] [google.cloud.healthcare.v1.consent.UpdateConsent] creates the consent if it does not already exist.`, - }, - "labels": { - Type: resource_healthcare_consent_store_schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize Consent stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: '[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}' - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: '[\p{Ll}\p{Lo}\p{N}_-]{0,63}' - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_healthcare_consent_store_schema.Schema{Type: resource_healthcare_consent_store_schema.TypeString}, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareConsentStoreCreate(d *resource_healthcare_consent_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - defaultConsentTtlProp, err := expandHealthcareConsentStoreDefaultConsentTtl(d.Get("default_consent_ttl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_consent_ttl"); !isEmptyValue(resource_healthcare_consent_store_reflect.ValueOf(defaultConsentTtlProp)) && (ok || !resource_healthcare_consent_store_reflect.DeepEqual(v, defaultConsentTtlProp)) { - obj["defaultConsentTtl"] = defaultConsentTtlProp - } - enableConsentCreateOnUpdateProp, err := expandHealthcareConsentStoreEnableConsentCreateOnUpdate(d.Get("enable_consent_create_on_update"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_consent_create_on_update"); !isEmptyValue(resource_healthcare_consent_store_reflect.ValueOf(enableConsentCreateOnUpdateProp)) && (ok || !resource_healthcare_consent_store_reflect.DeepEqual(v, enableConsentCreateOnUpdateProp)) { - obj["enableConsentCreateOnUpdate"] = enableConsentCreateOnUpdateProp - } - labelsProp, err := expandHealthcareConsentStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_consent_store_reflect.ValueOf(labelsProp)) && (ok || !resource_healthcare_consent_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores?consentStoreId={{name}}") - if err != nil { - return err - } - - resource_healthcare_consent_store_log.Printf("[DEBUG] Creating new ConsentStore: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_consent_store_schema.TimeoutCreate)) - if err != nil { - return resource_healthcare_consent_store_fmt.Errorf("Error creating ConsentStore: %s", err) - } - - id, err := replaceVars(d, config, "{{dataset}}/consentStores/{{name}}") - if err != nil { - return resource_healthcare_consent_store_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_healthcare_consent_store_log.Printf("[DEBUG] Finished creating ConsentStore %q: %#v", d.Id(), res) - - return resourceHealthcareConsentStoreRead(d, meta) -} - -func resourceHealthcareConsentStoreRead(d *resource_healthcare_consent_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_healthcare_consent_store_fmt.Sprintf("HealthcareConsentStore %q", d.Id())) - } - - if err := d.Set("default_consent_ttl", flattenHealthcareConsentStoreDefaultConsentTtl(res["defaultConsentTtl"], d, config)); err != nil { - return resource_healthcare_consent_store_fmt.Errorf("Error reading ConsentStore: %s", err) - } - if err := d.Set("enable_consent_create_on_update", flattenHealthcareConsentStoreEnableConsentCreateOnUpdate(res["enableConsentCreateOnUpdate"], d, config)); err != nil { - return resource_healthcare_consent_store_fmt.Errorf("Error reading ConsentStore: %s", err) - } - if err := d.Set("labels", flattenHealthcareConsentStoreLabels(res["labels"], d, config)); err != nil { - return resource_healthcare_consent_store_fmt.Errorf("Error reading ConsentStore: %s", err) - } - - return nil -} - -func resourceHealthcareConsentStoreUpdate(d *resource_healthcare_consent_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - defaultConsentTtlProp, err := expandHealthcareConsentStoreDefaultConsentTtl(d.Get("default_consent_ttl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_consent_ttl"); !isEmptyValue(resource_healthcare_consent_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_consent_store_reflect.DeepEqual(v, defaultConsentTtlProp)) { - obj["defaultConsentTtl"] = defaultConsentTtlProp - } - enableConsentCreateOnUpdateProp, err := expandHealthcareConsentStoreEnableConsentCreateOnUpdate(d.Get("enable_consent_create_on_update"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_consent_create_on_update"); !isEmptyValue(resource_healthcare_consent_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_consent_store_reflect.DeepEqual(v, enableConsentCreateOnUpdateProp)) { - obj["enableConsentCreateOnUpdate"] = enableConsentCreateOnUpdateProp - } - labelsProp, err := expandHealthcareConsentStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_consent_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_consent_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") - if err != nil { - return err - } - - resource_healthcare_consent_store_log.Printf("[DEBUG] Updating ConsentStore %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("default_consent_ttl") { - updateMask = append(updateMask, "defaultConsentTtl") - } - - if d.HasChange("enable_consent_create_on_update") { - updateMask = append(updateMask, "enableConsentCreateOnUpdate") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_healthcare_consent_store_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_consent_store_schema.TimeoutUpdate)) - - if err != nil { - return resource_healthcare_consent_store_fmt.Errorf("Error updating ConsentStore %q: %s", d.Id(), err) - } else { - resource_healthcare_consent_store_log.Printf("[DEBUG] Finished updating ConsentStore %q: %#v", d.Id(), res) - } - - return resourceHealthcareConsentStoreRead(d, meta) -} - -func resourceHealthcareConsentStoreDelete(d *resource_healthcare_consent_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/consentStores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_healthcare_consent_store_log.Printf("[DEBUG] Deleting ConsentStore %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_consent_store_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ConsentStore") - } - - resource_healthcare_consent_store_log.Printf("[DEBUG] Finished deleting ConsentStore %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareConsentStoreImport(d *resource_healthcare_consent_store_schema.ResourceData, meta interface{}) ([]*resource_healthcare_consent_store_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P.+)/consentStores/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{dataset}}/consentStores/{{name}}") - if err != nil { - return nil, resource_healthcare_consent_store_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_healthcare_consent_store_schema.ResourceData{d}, nil -} - -func flattenHealthcareConsentStoreDefaultConsentTtl(v interface{}, d *resource_healthcare_consent_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareConsentStoreEnableConsentCreateOnUpdate(v interface{}, d *resource_healthcare_consent_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareConsentStoreLabels(v interface{}, d *resource_healthcare_consent_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareConsentStoreDefaultConsentTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareConsentStoreEnableConsentCreateOnUpdate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareConsentStoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceHealthcareDataset() *resource_healthcare_dataset_schema.Resource { - return &resource_healthcare_dataset_schema.Resource{ - Create: resourceHealthcareDatasetCreate, - Read: resourceHealthcareDatasetRead, - Update: resourceHealthcareDatasetUpdate, - Delete: resourceHealthcareDatasetDelete, - - Importer: &resource_healthcare_dataset_schema.ResourceImporter{ - State: resourceHealthcareDatasetImport, - }, - - Timeouts: &resource_healthcare_dataset_schema.ResourceTimeout{ - Create: resource_healthcare_dataset_schema.DefaultTimeout(4 * resource_healthcare_dataset_time.Minute), - Update: resource_healthcare_dataset_schema.DefaultTimeout(4 * resource_healthcare_dataset_time.Minute), - Delete: resource_healthcare_dataset_schema.DefaultTimeout(4 * resource_healthcare_dataset_time.Minute), - }, - - Schema: map[string]*resource_healthcare_dataset_schema.Schema{ - "location": { - Type: resource_healthcare_dataset_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location for the Dataset.`, - }, - "name": { - Type: resource_healthcare_dataset_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the Dataset.`, - }, - "time_zone": { - Type: resource_healthcare_dataset_schema.TypeString, - Computed: true, - Optional: true, - Description: `The default timezone used by this dataset. Must be a either a valid IANA time zone name such as -"America/New_York" or empty, which defaults to UTC. This is used for parsing times in resources -(e.g., HL7 messages) where no explicit timezone is specified.`, - }, - "self_link": { - Type: resource_healthcare_dataset_schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - "project": { - Type: resource_healthcare_dataset_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareDatasetCreate(d *resource_healthcare_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareDatasetName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_healthcare_dataset_reflect.ValueOf(nameProp)) && (ok || !resource_healthcare_dataset_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - timeZoneProp, err := expandHealthcareDatasetTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_healthcare_dataset_reflect.ValueOf(timeZoneProp)) && (ok || !resource_healthcare_dataset_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets?datasetId={{name}}") - if err != nil { - return err - } - - resource_healthcare_dataset_log.Printf("[DEBUG] Creating new Dataset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_dataset_schema.TimeoutCreate), healthcareDatasetNotInitialized) - if err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error creating Dataset: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_healthcare_dataset_log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) - - return resourceHealthcareDatasetRead(d, meta) -} - -func resourceHealthcareDatasetRead(d *resource_healthcare_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, healthcareDatasetNotInitialized) - if err != nil { - return handleNotFoundError(err, d, resource_healthcare_dataset_fmt.Sprintf("HealthcareDataset %q", d.Id())) - } - - res, err = resourceHealthcareDatasetDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_healthcare_dataset_log.Printf("[DEBUG] Removing HealthcareDataset because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - - if err := d.Set("name", flattenHealthcareDatasetName(res["name"], d, config)); err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("time_zone", flattenHealthcareDatasetTimeZone(res["timeZone"], d, config)); err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - - return nil -} - -func resourceHealthcareDatasetUpdate(d *resource_healthcare_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - timeZoneProp, err := expandHealthcareDatasetTimeZone(d.Get("time_zone"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("time_zone"); !isEmptyValue(resource_healthcare_dataset_reflect.ValueOf(v)) && (ok || !resource_healthcare_dataset_reflect.DeepEqual(v, timeZoneProp)) { - obj["timeZone"] = timeZoneProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return err - } - - resource_healthcare_dataset_log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("time_zone") { - updateMask = append(updateMask, "timeZone") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_healthcare_dataset_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_dataset_schema.TimeoutUpdate), healthcareDatasetNotInitialized) - - if err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) - } else { - resource_healthcare_dataset_log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) - } - - return resourceHealthcareDatasetRead(d, meta) -} - -func resourceHealthcareDatasetDelete(d *resource_healthcare_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_healthcare_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_healthcare_dataset_log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_dataset_schema.TimeoutDelete), healthcareDatasetNotInitialized) - if err != nil { - return handleNotFoundError(err, d, "Dataset") - } - - resource_healthcare_dataset_log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareDatasetImport(d *resource_healthcare_dataset_schema.ResourceData, meta interface{}) ([]*resource_healthcare_dataset_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/datasets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/datasets/{{name}}") - if err != nil { - return nil, resource_healthcare_dataset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_healthcare_dataset_schema.ResourceData{d}, nil -} - -func flattenHealthcareDatasetName(v interface{}, d *resource_healthcare_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareDatasetTimeZone(v interface{}, d *resource_healthcare_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareDatasetName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareDatasetTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareDatasetDecoder(d *resource_healthcare_dataset_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["name"] = d.Get("name").(string) - return res, nil -} - -func resourceHealthcareDicomStore() *resource_healthcare_dicom_store_schema.Resource { - return &resource_healthcare_dicom_store_schema.Resource{ - Create: resourceHealthcareDicomStoreCreate, - Read: resourceHealthcareDicomStoreRead, - Update: resourceHealthcareDicomStoreUpdate, - Delete: resourceHealthcareDicomStoreDelete, - - Importer: &resource_healthcare_dicom_store_schema.ResourceImporter{ - State: resourceHealthcareDicomStoreImport, - }, - - Timeouts: &resource_healthcare_dicom_store_schema.ResourceTimeout{ - Create: resource_healthcare_dicom_store_schema.DefaultTimeout(4 * resource_healthcare_dicom_store_time.Minute), - Update: resource_healthcare_dicom_store_schema.DefaultTimeout(4 * resource_healthcare_dicom_store_time.Minute), - Delete: resource_healthcare_dicom_store_schema.DefaultTimeout(4 * resource_healthcare_dicom_store_time.Minute), - }, - - Schema: map[string]*resource_healthcare_dicom_store_schema.Schema{ - "dataset": { - Type: resource_healthcare_dicom_store_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: resource_healthcare_dicom_store_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the DicomStore. - -** Changing this property may recreate the Dicom store (removing all data) **`, - }, - "labels": { - Type: resource_healthcare_dicom_store_schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize DICOM stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_healthcare_dicom_store_schema.Schema{Type: resource_healthcare_dicom_store_schema.TypeString}, - }, - "notification_config": { - Type: resource_healthcare_dicom_store_schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_healthcare_dicom_store_schema.Resource{ - Schema: map[string]*resource_healthcare_dicom_store_schema.Schema{ - "pubsub_topic": { - Type: resource_healthcare_dicom_store_schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, - }, - }, - }, - }, - "self_link": { - Type: resource_healthcare_dicom_store_schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareDicomStoreCreate(d *resource_healthcare_dicom_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareDicomStoreName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_healthcare_dicom_store_reflect.ValueOf(nameProp)) && (ok || !resource_healthcare_dicom_store_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - labelsProp, err := expandHealthcareDicomStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_dicom_store_reflect.ValueOf(labelsProp)) && (ok || !resource_healthcare_dicom_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareDicomStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(resource_healthcare_dicom_store_reflect.ValueOf(notificationConfigProp)) && (ok || !resource_healthcare_dicom_store_reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores?dicomStoreId={{name}}") - if err != nil { - return err - } - - resource_healthcare_dicom_store_log.Printf("[DEBUG] Creating new DicomStore: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_dicom_store_schema.TimeoutCreate)) - if err != nil { - return resource_healthcare_dicom_store_fmt.Errorf("Error creating DicomStore: %s", err) - } - - id, err := replaceVars(d, config, "{{dataset}}/dicomStores/{{name}}") - if err != nil { - return resource_healthcare_dicom_store_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_healthcare_dicom_store_log.Printf("[DEBUG] Finished creating DicomStore %q: %#v", d.Id(), res) - - return resourceHealthcareDicomStoreRead(d, meta) -} - -func resourceHealthcareDicomStoreRead(d *resource_healthcare_dicom_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_healthcare_dicom_store_fmt.Sprintf("HealthcareDicomStore %q", d.Id())) - } - - res, err = resourceHealthcareDicomStoreDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_healthcare_dicom_store_log.Printf("[DEBUG] Removing HealthcareDicomStore because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenHealthcareDicomStoreName(res["name"], d, config)); err != nil { - return resource_healthcare_dicom_store_fmt.Errorf("Error reading DicomStore: %s", err) - } - if err := d.Set("labels", flattenHealthcareDicomStoreLabels(res["labels"], d, config)); err != nil { - return resource_healthcare_dicom_store_fmt.Errorf("Error reading DicomStore: %s", err) - } - if err := d.Set("notification_config", flattenHealthcareDicomStoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { - return resource_healthcare_dicom_store_fmt.Errorf("Error reading DicomStore: %s", err) - } - - return nil -} - -func resourceHealthcareDicomStoreUpdate(d *resource_healthcare_dicom_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - labelsProp, err := expandHealthcareDicomStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_dicom_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_dicom_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareDicomStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(resource_healthcare_dicom_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_dicom_store_reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") - if err != nil { - return err - } - - resource_healthcare_dicom_store_log.Printf("[DEBUG] Updating DicomStore %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("notification_config") { - updateMask = append(updateMask, "notificationConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_healthcare_dicom_store_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_dicom_store_schema.TimeoutUpdate)) - - if err != nil { - return resource_healthcare_dicom_store_fmt.Errorf("Error updating DicomStore %q: %s", d.Id(), err) - } else { - resource_healthcare_dicom_store_log.Printf("[DEBUG] Finished updating DicomStore %q: %#v", d.Id(), res) - } - - return resourceHealthcareDicomStoreRead(d, meta) -} - -func resourceHealthcareDicomStoreDelete(d *resource_healthcare_dicom_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/dicomStores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_healthcare_dicom_store_log.Printf("[DEBUG] Deleting DicomStore %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_dicom_store_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DicomStore") - } - - resource_healthcare_dicom_store_log.Printf("[DEBUG] Finished deleting DicomStore %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareDicomStoreImport(d *resource_healthcare_dicom_store_schema.ResourceData, meta interface{}) ([]*resource_healthcare_dicom_store_schema.ResourceData, error) { - - config := meta.(*Config) - - dicomStoreId, err := parseHealthcareDicomStoreId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("dataset", dicomStoreId.DatasetId.datasetId()); err != nil { - return nil, resource_healthcare_dicom_store_fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("name", dicomStoreId.Name); err != nil { - return nil, resource_healthcare_dicom_store_fmt.Errorf("Error setting name: %s", err) - } - - return []*resource_healthcare_dicom_store_schema.ResourceData{d}, nil -} - -func flattenHealthcareDicomStoreName(v interface{}, d *resource_healthcare_dicom_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareDicomStoreLabels(v interface{}, d *resource_healthcare_dicom_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareDicomStoreNotificationConfig(v interface{}, d *resource_healthcare_dicom_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_topic"] = - flattenHealthcareDicomStoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) - return []interface{}{transformed} -} - -func flattenHealthcareDicomStoreNotificationConfigPubsubTopic(v interface{}, d *resource_healthcare_dicom_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareDicomStoreName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareDicomStoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandHealthcareDicomStoreNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareDicomStoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_dicom_store_reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - return transformed, nil -} - -func expandHealthcareDicomStoreNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareDicomStoreDecoder(d *resource_healthcare_dicom_store_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["name"] = d.Get("name").(string) - return res, nil -} - -func resourceHealthcareFhirStore() *resource_healthcare_fhir_store_schema.Resource { - return &resource_healthcare_fhir_store_schema.Resource{ - Create: resourceHealthcareFhirStoreCreate, - Read: resourceHealthcareFhirStoreRead, - Update: resourceHealthcareFhirStoreUpdate, - Delete: resourceHealthcareFhirStoreDelete, - - Importer: &resource_healthcare_fhir_store_schema.ResourceImporter{ - State: resourceHealthcareFhirStoreImport, - }, - - Timeouts: &resource_healthcare_fhir_store_schema.ResourceTimeout{ - Create: resource_healthcare_fhir_store_schema.DefaultTimeout(4 * resource_healthcare_fhir_store_time.Minute), - Update: resource_healthcare_fhir_store_schema.DefaultTimeout(4 * resource_healthcare_fhir_store_time.Minute), - Delete: resource_healthcare_fhir_store_schema.DefaultTimeout(4 * resource_healthcare_fhir_store_time.Minute), - }, - - Schema: map[string]*resource_healthcare_fhir_store_schema.Schema{ - "dataset": { - Type: resource_healthcare_fhir_store_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: resource_healthcare_fhir_store_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the FhirStore. - -** Changing this property may recreate the FHIR store (removing all data) **`, - }, - "version": { - Type: resource_healthcare_fhir_store_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_healthcare_fhir_store_validation.StringInSlice([]string{"DSTU2", "STU3", "R4"}, false), - Description: `The FHIR specification version. Possible values: ["DSTU2", "STU3", "R4"]`, - }, - "disable_referential_integrity": { - Type: resource_healthcare_fhir_store_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to disable referential integrity in this FHIR store. This field is immutable after FHIR store -creation. The default value is false, meaning that the API will enforce referential integrity and fail the -requests that will result in inconsistent state in the FHIR store. When this field is set to true, the API -will skip referential integrity check. Consequently, operations that rely on references, such as -Patient.get$everything, will not return all the results if broken references exist. - -** Changing this property may recreate the FHIR store (removing all data) **`, - }, - "disable_resource_versioning": { - Type: resource_healthcare_fhir_store_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to disable resource versioning for this FHIR store. This field can not be changed after the creation -of FHIR store. If set to false, which is the default behavior, all write operations will cause historical -versions to be recorded automatically. The historical versions can be fetched through the history APIs, but -cannot be updated. If set to true, no historical versions will be kept. The server will send back errors for -attempts to read the historical versions. - -** Changing this property may recreate the FHIR store (removing all data) **`, - }, - "enable_history_import": { - Type: resource_healthcare_fhir_store_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether to allow the bulk import API to accept history bundles and directly insert historical resource -versions into the FHIR store. Importing resource histories creates resource interactions that appear to have -occurred in the past, which clients may not want to allow. If set to false, history bundles within an import -will fail with an error. - -** Changing this property may recreate the FHIR store (removing all data) ** - -** This property can be changed manually in the Google Cloud Healthcare admin console without recreating the FHIR store **`, - }, - "enable_update_create": { - Type: resource_healthcare_fhir_store_schema.TypeBool, - Optional: true, - Description: `Whether this FHIR store has the updateCreate capability. This determines if the client can use an Update -operation to create a new resource with a client-specified ID. If false, all IDs are server-assigned through -the Create operation and attempts to Update a non-existent resource will return errors. Please treat the audit -logs with appropriate levels of care if client-specified resource IDs contain sensitive data such as patient -identifiers, those IDs will be part of the FHIR resource path recorded in Cloud audit logs and Cloud Pub/Sub -notifications.`, - }, - "labels": { - Type: resource_healthcare_fhir_store_schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize FHIR stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_healthcare_fhir_store_schema.Schema{Type: resource_healthcare_fhir_store_schema.TypeString}, - }, - "notification_config": { - Type: resource_healthcare_fhir_store_schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_healthcare_fhir_store_schema.Resource{ - Schema: map[string]*resource_healthcare_fhir_store_schema.Schema{ - "pubsub_topic": { - Type: resource_healthcare_fhir_store_schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, - }, - }, - }, - }, - "stream_configs": { - Type: resource_healthcare_fhir_store_schema.TypeList, - Optional: true, - Description: `A list of streaming configs that configure the destinations of streaming export for every resource mutation in -this FHIR store. Each store is allowed to have up to 10 streaming configs. After a new config is added, the next -resource mutation is streamed to the new location in addition to the existing ones. When a location is removed -from the list, the server stops streaming to that location. Before adding a new config, you must add the required -bigquery.dataEditor role to your project's Cloud Healthcare Service Agent service account. Some lag (typically on -the order of dozens of seconds) is expected before the results show up in the streaming destination.`, - Elem: &resource_healthcare_fhir_store_schema.Resource{ - Schema: map[string]*resource_healthcare_fhir_store_schema.Schema{ - "bigquery_destination": { - Type: resource_healthcare_fhir_store_schema.TypeList, - Required: true, - Description: `The destination BigQuery structure that contains both the dataset location and corresponding schema config. -The output is organized in one table per resource type. The server reuses the existing tables (if any) that -are named after the resource types, e.g. "Patient", "Observation". When there is no existing table for a given -resource type, the server attempts to create one. -See the [streaming config reference](https://cloud.google.com/healthcare/docs/reference/rest/v1beta1/projects.locations.datasets.fhirStores#streamconfig) for more details.`, - MaxItems: 1, - Elem: &resource_healthcare_fhir_store_schema.Resource{ - Schema: map[string]*resource_healthcare_fhir_store_schema.Schema{ - "dataset_uri": { - Type: resource_healthcare_fhir_store_schema.TypeString, - Required: true, - Description: `BigQuery URI to a dataset, up to 2000 characters long, in the format bq://projectId.bqDatasetId`, - }, - "schema_config": { - Type: resource_healthcare_fhir_store_schema.TypeList, - Required: true, - Description: `The configuration for the exported BigQuery schema.`, - MaxItems: 1, - Elem: &resource_healthcare_fhir_store_schema.Resource{ - Schema: map[string]*resource_healthcare_fhir_store_schema.Schema{ - "recursive_structure_depth": { - Type: resource_healthcare_fhir_store_schema.TypeInt, - Required: true, - Description: `The depth for all recursive structures in the output analytics schema. For example, concept in the CodeSystem -resource is a recursive structure; when the depth is 2, the CodeSystem table will have a column called -concept.concept but not concept.concept.concept. If not specified or set to 0, the server will use the default -value 2. The maximum depth allowed is 5.`, - }, - "schema_type": { - Type: resource_healthcare_fhir_store_schema.TypeString, - Optional: true, - ValidateFunc: resource_healthcare_fhir_store_validation.StringInSlice([]string{"ANALYTICS", ""}, false), - Description: `Specifies the output schema type. Only ANALYTICS is supported at this time. - * ANALYTICS: Analytics schema defined by the FHIR community. - See https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md. Default value: "ANALYTICS" Possible values: ["ANALYTICS"]`, - Default: "ANALYTICS", - }, - }, - }, - }, - }, - }, - }, - "resource_types": { - Type: resource_healthcare_fhir_store_schema.TypeList, - Optional: true, - Description: `Supply a FHIR resource type (such as "Patient" or "Observation"). See -https://www.hl7.org/fhir/valueset-resource-types.html for a list of all FHIR resource types. The server treats -an empty list as an intent to stream all the supported resource types in this FHIR store.`, - Elem: &resource_healthcare_fhir_store_schema.Schema{ - Type: resource_healthcare_fhir_store_schema.TypeString, - }, - }, - }, - }, - }, - "self_link": { - Type: resource_healthcare_fhir_store_schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareFhirStoreCreate(d *resource_healthcare_fhir_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareFhirStoreName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(nameProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - versionProp, err := expandHealthcareFhirStoreVersion(d.Get("version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(versionProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, versionProp)) { - obj["version"] = versionProp - } - enableUpdateCreateProp, err := expandHealthcareFhirStoreEnableUpdateCreate(d.Get("enable_update_create"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_update_create"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(enableUpdateCreateProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, enableUpdateCreateProp)) { - obj["enableUpdateCreate"] = enableUpdateCreateProp - } - disableReferentialIntegrityProp, err := expandHealthcareFhirStoreDisableReferentialIntegrity(d.Get("disable_referential_integrity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_referential_integrity"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(disableReferentialIntegrityProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, disableReferentialIntegrityProp)) { - obj["disableReferentialIntegrity"] = disableReferentialIntegrityProp - } - disableResourceVersioningProp, err := expandHealthcareFhirStoreDisableResourceVersioning(d.Get("disable_resource_versioning"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_resource_versioning"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(disableResourceVersioningProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, disableResourceVersioningProp)) { - obj["disableResourceVersioning"] = disableResourceVersioningProp - } - enableHistoryImportProp, err := expandHealthcareFhirStoreEnableHistoryImport(d.Get("enable_history_import"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_history_import"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(enableHistoryImportProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, enableHistoryImportProp)) { - obj["enableHistoryImport"] = enableHistoryImportProp - } - labelsProp, err := expandHealthcareFhirStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(labelsProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareFhirStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(notificationConfigProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - streamConfigsProp, err := expandHealthcareFhirStoreStreamConfigs(d.Get("stream_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stream_configs"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(streamConfigsProp)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, streamConfigsProp)) { - obj["streamConfigs"] = streamConfigsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores?fhirStoreId={{name}}") - if err != nil { - return err - } - - resource_healthcare_fhir_store_log.Printf("[DEBUG] Creating new FhirStore: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_fhir_store_schema.TimeoutCreate)) - if err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error creating FhirStore: %s", err) - } - - id, err := replaceVars(d, config, "{{dataset}}/fhirStores/{{name}}") - if err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_healthcare_fhir_store_log.Printf("[DEBUG] Finished creating FhirStore %q: %#v", d.Id(), res) - - return resourceHealthcareFhirStoreRead(d, meta) -} - -func resourceHealthcareFhirStoreRead(d *resource_healthcare_fhir_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_healthcare_fhir_store_fmt.Sprintf("HealthcareFhirStore %q", d.Id())) - } - - res, err = resourceHealthcareFhirStoreDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_healthcare_fhir_store_log.Printf("[DEBUG] Removing HealthcareFhirStore because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenHealthcareFhirStoreName(res["name"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("version", flattenHealthcareFhirStoreVersion(res["version"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("enable_update_create", flattenHealthcareFhirStoreEnableUpdateCreate(res["enableUpdateCreate"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("disable_referential_integrity", flattenHealthcareFhirStoreDisableReferentialIntegrity(res["disableReferentialIntegrity"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("disable_resource_versioning", flattenHealthcareFhirStoreDisableResourceVersioning(res["disableResourceVersioning"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("enable_history_import", flattenHealthcareFhirStoreEnableHistoryImport(res["enableHistoryImport"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("labels", flattenHealthcareFhirStoreLabels(res["labels"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("notification_config", flattenHealthcareFhirStoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - if err := d.Set("stream_configs", flattenHealthcareFhirStoreStreamConfigs(res["streamConfigs"], d, config)); err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error reading FhirStore: %s", err) - } - - return nil -} - -func resourceHealthcareFhirStoreUpdate(d *resource_healthcare_fhir_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - enableUpdateCreateProp, err := expandHealthcareFhirStoreEnableUpdateCreate(d.Get("enable_update_create"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_update_create"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, enableUpdateCreateProp)) { - obj["enableUpdateCreate"] = enableUpdateCreateProp - } - labelsProp, err := expandHealthcareFhirStoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigProp, err := expandHealthcareFhirStoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - streamConfigsProp, err := expandHealthcareFhirStoreStreamConfigs(d.Get("stream_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("stream_configs"); !isEmptyValue(resource_healthcare_fhir_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_fhir_store_reflect.DeepEqual(v, streamConfigsProp)) { - obj["streamConfigs"] = streamConfigsProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") - if err != nil { - return err - } - - resource_healthcare_fhir_store_log.Printf("[DEBUG] Updating FhirStore %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("enable_update_create") { - updateMask = append(updateMask, "enableUpdateCreate") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("notification_config") { - updateMask = append(updateMask, "notificationConfig") - } - - if d.HasChange("stream_configs") { - updateMask = append(updateMask, "streamConfigs") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_healthcare_fhir_store_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_fhir_store_schema.TimeoutUpdate)) - - if err != nil { - return resource_healthcare_fhir_store_fmt.Errorf("Error updating FhirStore %q: %s", d.Id(), err) - } else { - resource_healthcare_fhir_store_log.Printf("[DEBUG] Finished updating FhirStore %q: %#v", d.Id(), res) - } - - return resourceHealthcareFhirStoreRead(d, meta) -} - -func resourceHealthcareFhirStoreDelete(d *resource_healthcare_fhir_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/fhirStores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_healthcare_fhir_store_log.Printf("[DEBUG] Deleting FhirStore %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_fhir_store_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "FhirStore") - } - - resource_healthcare_fhir_store_log.Printf("[DEBUG] Finished deleting FhirStore %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareFhirStoreImport(d *resource_healthcare_fhir_store_schema.ResourceData, meta interface{}) ([]*resource_healthcare_fhir_store_schema.ResourceData, error) { - - config := meta.(*Config) - - fhirStoreId, err := parseHealthcareFhirStoreId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("dataset", fhirStoreId.DatasetId.datasetId()); err != nil { - return nil, resource_healthcare_fhir_store_fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("name", fhirStoreId.Name); err != nil { - return nil, resource_healthcare_fhir_store_fmt.Errorf("Error setting name: %s", err) - } - - return []*resource_healthcare_fhir_store_schema.ResourceData{d}, nil -} - -func flattenHealthcareFhirStoreName(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreVersion(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreEnableUpdateCreate(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreDisableReferentialIntegrity(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreDisableResourceVersioning(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreEnableHistoryImport(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreLabels(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreNotificationConfig(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_topic"] = - flattenHealthcareFhirStoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) - return []interface{}{transformed} -} - -func flattenHealthcareFhirStoreNotificationConfigPubsubTopic(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigs(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "resource_types": flattenHealthcareFhirStoreStreamConfigsResourceTypes(original["resourceTypes"], d, config), - "bigquery_destination": flattenHealthcareFhirStoreStreamConfigsBigqueryDestination(original["bigqueryDestination"], d, config), - }) - } - return transformed -} - -func flattenHealthcareFhirStoreStreamConfigsResourceTypes(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestination(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dataset_uri"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(original["datasetUri"], d, config) - transformed["schema_config"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(original["schemaConfig"], d, config) - return []interface{}{transformed} -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["schema_type"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(original["schemaType"], d, config) - transformed["recursive_structure_depth"] = - flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(original["recursiveStructureDepth"], d, config) - return []interface{}{transformed} -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(v interface{}, d *resource_healthcare_fhir_store_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_healthcare_fhir_store_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandHealthcareFhirStoreName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreEnableUpdateCreate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreDisableReferentialIntegrity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreDisableResourceVersioning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreEnableHistoryImport(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandHealthcareFhirStoreNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareFhirStoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_fhir_store_reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - return transformed, nil -} - -func expandHealthcareFhirStoreNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceTypes, err := expandHealthcareFhirStoreStreamConfigsResourceTypes(original["resource_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_fhir_store_reflect.ValueOf(transformedResourceTypes); val.IsValid() && !isEmptyValue(val) { - transformed["resourceTypes"] = transformedResourceTypes - } - - transformedBigqueryDestination, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestination(original["bigquery_destination"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_fhir_store_reflect.ValueOf(transformedBigqueryDestination); val.IsValid() && !isEmptyValue(val) { - transformed["bigqueryDestination"] = transformedBigqueryDestination - } - - req = append(req, transformed) - } - return req, nil -} - -func expandHealthcareFhirStoreStreamConfigsResourceTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDatasetUri, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(original["dataset_uri"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_fhir_store_reflect.ValueOf(transformedDatasetUri); val.IsValid() && !isEmptyValue(val) { - transformed["datasetUri"] = transformedDatasetUri - } - - transformedSchemaConfig, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(original["schema_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_fhir_store_reflect.ValueOf(transformedSchemaConfig); val.IsValid() && !isEmptyValue(val) { - transformed["schemaConfig"] = transformedSchemaConfig - } - - return transformed, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationDatasetUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchemaType, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(original["schema_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_fhir_store_reflect.ValueOf(transformedSchemaType); val.IsValid() && !isEmptyValue(val) { - transformed["schemaType"] = transformedSchemaType - } - - transformedRecursiveStructureDepth, err := expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(original["recursive_structure_depth"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_fhir_store_reflect.ValueOf(transformedRecursiveStructureDepth); val.IsValid() && !isEmptyValue(val) { - transformed["recursiveStructureDepth"] = transformedRecursiveStructureDepth - } - - return transformed, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigSchemaType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareFhirStoreStreamConfigsBigqueryDestinationSchemaConfigRecursiveStructureDepth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareFhirStoreDecoder(d *resource_healthcare_fhir_store_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["name"] = d.Get("name").(string) - return res, nil -} - -func resourceHealthcareHl7V2Store() *resource_healthcare_hl7_v2_store_schema.Resource { - return &resource_healthcare_hl7_v2_store_schema.Resource{ - Create: resourceHealthcareHl7V2StoreCreate, - Read: resourceHealthcareHl7V2StoreRead, - Update: resourceHealthcareHl7V2StoreUpdate, - Delete: resourceHealthcareHl7V2StoreDelete, - - Importer: &resource_healthcare_hl7_v2_store_schema.ResourceImporter{ - State: resourceHealthcareHl7V2StoreImport, - }, - - Timeouts: &resource_healthcare_hl7_v2_store_schema.ResourceTimeout{ - Create: resource_healthcare_hl7_v2_store_schema.DefaultTimeout(4 * resource_healthcare_hl7_v2_store_time.Minute), - Update: resource_healthcare_hl7_v2_store_schema.DefaultTimeout(4 * resource_healthcare_hl7_v2_store_time.Minute), - Delete: resource_healthcare_hl7_v2_store_schema.DefaultTimeout(4 * resource_healthcare_hl7_v2_store_time.Minute), - }, - - Schema: map[string]*resource_healthcare_hl7_v2_store_schema.Schema{ - "dataset": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Identifies the dataset addressed by this request. Must be in the format -'projects/{project}/locations/{location}/datasets/{dataset}'`, - }, - "name": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the Hl7V2Store. - -** Changing this property may recreate the Hl7v2 store (removing all data) **`, - }, - "labels": { - Type: resource_healthcare_hl7_v2_store_schema.TypeMap, - Optional: true, - Description: `User-supplied key-value pairs used to organize HL7v2 stores. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must -conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 -bytes, and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be associated with a given store. - -An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_healthcare_hl7_v2_store_schema.Schema{Type: resource_healthcare_hl7_v2_store_schema.TypeString}, - }, - "notification_config": { - Type: resource_healthcare_hl7_v2_store_schema.TypeList, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_healthcare_hl7_v2_store_schema.Resource{ - Schema: map[string]*resource_healthcare_hl7_v2_store_schema.Schema{ - "pubsub_topic": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail.`, - }, - }, - }, - }, - "notification_configs": { - Type: resource_healthcare_hl7_v2_store_schema.TypeList, - Optional: true, - Description: `A list of notification configs. Each configuration uses a filter to determine whether to publish a -message (both Ingest & Create) on the corresponding notification destination. Only the message name -is sent as part of the notification. Supplied by the client.`, - Elem: &resource_healthcare_hl7_v2_store_schema.Resource{ - Schema: map[string]*resource_healthcare_hl7_v2_store_schema.Schema{ - "pubsub_topic": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Required: true, - Description: `The Cloud Pub/Sub topic that notifications of changes are published on. Supplied by the client. -PubsubMessage.Data will contain the resource name. PubsubMessage.MessageId is the ID of this message. -It is guaranteed to be unique within the topic. PubsubMessage.PublishTime is the time at which the message -was published. Notifications are only sent if the topic is non-empty. Topic names must be scoped to a -project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given -Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. - -If a notification cannot be published to Cloud Pub/Sub, errors will be logged to Stackdriver`, - }, - "filter": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Optional: true, - Description: `Restricts notifications sent for messages matching a filter. If this is empty, all messages -are matched. Syntax: https://cloud.google.com/appengine/docs/standard/python/search/query_strings - -Fields/functions available for filtering are: - -* messageType, from the MSH-9.1 field. For example, NOT messageType = "ADT". -* send_date or sendDate, the YYYY-MM-DD date the message was sent in the dataset's timeZone, from the MSH-7 segment. For example, send_date < "2017-01-02". -* sendTime, the timestamp when the message was sent, using the RFC3339 time format for comparisons, from the MSH-7 segment. For example, sendTime < "2017-01-02T00:00:00-05:00". -* sendFacility, the care center that the message came from, from the MSH-4 segment. For example, sendFacility = "ABC". -* PatientId(value, type), which matches if the message lists a patient having an ID of the given value and type in the PID-2, PID-3, or PID-4 segments. For example, PatientId("123456", "MRN"). -* labels.x, a string value of the label with key x as set using the Message.labels map. For example, labels."priority"="high". The operator :* can be used to assert the existence of a label. For example, labels."priority":*.`, - }, - }, - }, - }, - "parser_config": { - Type: resource_healthcare_hl7_v2_store_schema.TypeList, - Computed: true, - Optional: true, - Description: `A nested object resource`, - MaxItems: 1, - Elem: &resource_healthcare_hl7_v2_store_schema.Resource{ - Schema: map[string]*resource_healthcare_hl7_v2_store_schema.Schema{ - "allow_null_header": { - Type: resource_healthcare_hl7_v2_store_schema.TypeBool, - Optional: true, - Description: `Determines whether messages with no header are allowed.`, - AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema"}, - }, - "schema": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Optional: true, - ValidateFunc: resource_healthcare_hl7_v2_store_validation.StringIsJSON, - StateFunc: func(v interface{}) string { - s, _ := resource_healthcare_hl7_v2_store_structure.NormalizeJsonString(v) - return s - }, - Description: `JSON encoded string for schemas used to parse messages in this -store if schematized parsing is desired.`, - AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema", "parser_config.0.version"}, - }, - "segment_terminator": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Optional: true, - ValidateFunc: validateBase64String, - Description: `Byte(s) to be used as the segment terminator. If this is unset, '\r' will be used as segment terminator. - -A base64-encoded string.`, - AtLeastOneOf: []string{"parser_config.0.allow_null_header", "parser_config.0.segment_terminator", "parser_config.0.schema"}, - }, - "version": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_healthcare_hl7_v2_store_validation.StringInSlice([]string{"V1", "V2", ""}, false), - Description: `The version of the unschematized parser to be used when a custom 'schema' is not set. Default value: "V1" Possible values: ["V1", "V2"]`, - Default: "V1", - }, - }, - }, - }, - "self_link": { - Type: resource_healthcare_hl7_v2_store_schema.TypeString, - Computed: true, - Description: `The fully qualified name of this dataset`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceHealthcareHl7V2StoreCreate(d *resource_healthcare_hl7_v2_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandHealthcareHl7V2StoreName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(nameProp)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - parserConfigProp, err := expandHealthcareHl7V2StoreParserConfig(d.Get("parser_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parser_config"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(parserConfigProp)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, parserConfigProp)) { - obj["parserConfig"] = parserConfigProp - } - labelsProp, err := expandHealthcareHl7V2StoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(labelsProp)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigsProp, err := expandHealthcareHl7V2StoreNotificationConfigs(d.Get("notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_configs"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(notificationConfigsProp)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, notificationConfigsProp)) { - obj["notificationConfigs"] = notificationConfigsProp - } - notificationConfigProp, err := expandHealthcareHl7V2StoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(notificationConfigProp)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores?hl7V2StoreId={{name}}") - if err != nil { - return err - } - - resource_healthcare_hl7_v2_store_log.Printf("[DEBUG] Creating new Hl7V2Store: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_hl7_v2_store_schema.TimeoutCreate)) - if err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error creating Hl7V2Store: %s", err) - } - - id, err := replaceVars(d, config, "{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_healthcare_hl7_v2_store_log.Printf("[DEBUG] Finished creating Hl7V2Store %q: %#v", d.Id(), res) - - return resourceHealthcareHl7V2StoreRead(d, meta) -} - -func resourceHealthcareHl7V2StoreRead(d *resource_healthcare_hl7_v2_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_healthcare_hl7_v2_store_fmt.Sprintf("HealthcareHl7V2Store %q", d.Id())) - } - - res, err = resourceHealthcareHl7V2StoreDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_healthcare_hl7_v2_store_log.Printf("[DEBUG] Removing HealthcareHl7V2Store because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenHealthcareHl7V2StoreName(res["name"], d, config)); err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("parser_config", flattenHealthcareHl7V2StoreParserConfig(res["parserConfig"], d, config)); err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("labels", flattenHealthcareHl7V2StoreLabels(res["labels"], d, config)); err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("notification_configs", flattenHealthcareHl7V2StoreNotificationConfigs(res["notificationConfigs"], d, config)); err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - if err := d.Set("notification_config", flattenHealthcareHl7V2StoreNotificationConfig(res["notificationConfig"], d, config)); err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error reading Hl7V2Store: %s", err) - } - - return nil -} - -func resourceHealthcareHl7V2StoreUpdate(d *resource_healthcare_hl7_v2_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - parserConfigProp, err := expandHealthcareHl7V2StoreParserConfig(d.Get("parser_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parser_config"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, parserConfigProp)) { - obj["parserConfig"] = parserConfigProp - } - labelsProp, err := expandHealthcareHl7V2StoreLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - notificationConfigsProp, err := expandHealthcareHl7V2StoreNotificationConfigs(d.Get("notification_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_configs"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, notificationConfigsProp)) { - obj["notificationConfigs"] = notificationConfigsProp - } - notificationConfigProp, err := expandHealthcareHl7V2StoreNotificationConfig(d.Get("notification_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_config"); !isEmptyValue(resource_healthcare_hl7_v2_store_reflect.ValueOf(v)) && (ok || !resource_healthcare_hl7_v2_store_reflect.DeepEqual(v, notificationConfigProp)) { - obj["notificationConfig"] = notificationConfigProp - } - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return err - } - - resource_healthcare_hl7_v2_store_log.Printf("[DEBUG] Updating Hl7V2Store %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("parser_config") { - updateMask = append(updateMask, "parser_config.allow_null_header", - "parser_config.segment_terminator", - "parser_config.schema") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("notification_configs") { - updateMask = append(updateMask, "notificationConfigs") - } - - if d.HasChange("notification_config") { - updateMask = append(updateMask, "notificationConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_healthcare_hl7_v2_store_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_hl7_v2_store_schema.TimeoutUpdate)) - - if err != nil { - return resource_healthcare_hl7_v2_store_fmt.Errorf("Error updating Hl7V2Store %q: %s", d.Id(), err) - } else { - resource_healthcare_hl7_v2_store_log.Printf("[DEBUG] Finished updating Hl7V2Store %q: %#v", d.Id(), res) - } - - return resourceHealthcareHl7V2StoreRead(d, meta) -} - -func resourceHealthcareHl7V2StoreDelete(d *resource_healthcare_hl7_v2_store_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{HealthcareBasePath}}{{dataset}}/hl7V2Stores/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_healthcare_hl7_v2_store_log.Printf("[DEBUG] Deleting Hl7V2Store %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_healthcare_hl7_v2_store_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Hl7V2Store") - } - - resource_healthcare_hl7_v2_store_log.Printf("[DEBUG] Finished deleting Hl7V2Store %q: %#v", d.Id(), res) - return nil -} - -func resourceHealthcareHl7V2StoreImport(d *resource_healthcare_hl7_v2_store_schema.ResourceData, meta interface{}) ([]*resource_healthcare_hl7_v2_store_schema.ResourceData, error) { - - config := meta.(*Config) - - hl7v2StoreId, err := parseHealthcareHl7V2StoreId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("dataset", hl7v2StoreId.DatasetId.datasetId()); err != nil { - return nil, resource_healthcare_hl7_v2_store_fmt.Errorf("Error setting dataset: %s", err) - } - if err := d.Set("name", hl7v2StoreId.Name); err != nil { - return nil, resource_healthcare_hl7_v2_store_fmt.Errorf("Error setting name: %s", err) - } - - return []*resource_healthcare_hl7_v2_store_schema.ResourceData{d}, nil -} - -func flattenHealthcareHl7V2StoreName(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreParserConfig(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_null_header"] = - flattenHealthcareHl7V2StoreParserConfigAllowNullHeader(original["allowNullHeader"], d, config) - transformed["segment_terminator"] = - flattenHealthcareHl7V2StoreParserConfigSegmentTerminator(original["segmentTerminator"], d, config) - transformed["schema"] = - flattenHealthcareHl7V2StoreParserConfigSchema(original["schema"], d, config) - transformed["version"] = - flattenHealthcareHl7V2StoreParserConfigVersion(original["version"], d, config) - return []interface{}{transformed} -} - -func flattenHealthcareHl7V2StoreParserConfigAllowNullHeader(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreParserConfigSegmentTerminator(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreParserConfigSchema(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - b, err := resource_healthcare_hl7_v2_store_json.Marshal(v) - if err != nil { - - resource_healthcare_hl7_v2_store_log.Printf("[ERROR] failed to marshal schema to JSON: %v", err) - } - return string(b) -} - -func flattenHealthcareHl7V2StoreParserConfigVersion(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreLabels(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreNotificationConfigs(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "pubsub_topic": flattenHealthcareHl7V2StoreNotificationConfigsPubsubTopic(original["pubsubTopic"], d, config), - "filter": flattenHealthcareHl7V2StoreNotificationConfigsFilter(original["filter"], d, config), - }) - } - return transformed -} - -func flattenHealthcareHl7V2StoreNotificationConfigsPubsubTopic(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreNotificationConfigsFilter(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenHealthcareHl7V2StoreNotificationConfig(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pubsub_topic"] = - flattenHealthcareHl7V2StoreNotificationConfigPubsubTopic(original["pubsubTopic"], d, config) - return []interface{}{transformed} -} - -func flattenHealthcareHl7V2StoreNotificationConfigPubsubTopic(v interface{}, d *resource_healthcare_hl7_v2_store_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandHealthcareHl7V2StoreName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreParserConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowNullHeader, err := expandHealthcareHl7V2StoreParserConfigAllowNullHeader(original["allow_null_header"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_hl7_v2_store_reflect.ValueOf(transformedAllowNullHeader); val.IsValid() && !isEmptyValue(val) { - transformed["allowNullHeader"] = transformedAllowNullHeader - } - - transformedSegmentTerminator, err := expandHealthcareHl7V2StoreParserConfigSegmentTerminator(original["segment_terminator"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_hl7_v2_store_reflect.ValueOf(transformedSegmentTerminator); val.IsValid() && !isEmptyValue(val) { - transformed["segmentTerminator"] = transformedSegmentTerminator - } - - transformedSchema, err := expandHealthcareHl7V2StoreParserConfigSchema(original["schema"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_hl7_v2_store_reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { - transformed["schema"] = transformedSchema - } - - transformedVersion, err := expandHealthcareHl7V2StoreParserConfigVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_hl7_v2_store_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandHealthcareHl7V2StoreParserConfigAllowNullHeader(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreParserConfigSegmentTerminator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreParserConfigSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - b := []byte(v.(string)) - if len(b) == 0 { - return nil, nil - } - m := make(map[string]interface{}) - if err := resource_healthcare_hl7_v2_store_json.Unmarshal(b, &m); err != nil { - return nil, err - } - return m, nil -} - -func expandHealthcareHl7V2StoreParserConfigVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareHl7V2StoreNotificationConfigsPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_hl7_v2_store_reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - transformedFilter, err := expandHealthcareHl7V2StoreNotificationConfigsFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_hl7_v2_store_reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - req = append(req, transformed) - } - return req, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigsPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigsFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandHealthcareHl7V2StoreNotificationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPubsubTopic, err := expandHealthcareHl7V2StoreNotificationConfigPubsubTopic(original["pubsub_topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_healthcare_hl7_v2_store_reflect.ValueOf(transformedPubsubTopic); val.IsValid() && !isEmptyValue(val) { - transformed["pubsubTopic"] = transformedPubsubTopic - } - - return transformed, nil -} - -func expandHealthcareHl7V2StoreNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceHealthcareHl7V2StoreDecoder(d *resource_healthcare_hl7_v2_store_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["name"] = d.Get("name").(string) - return res, nil -} - -var iamAuditConfigSchema = map[string]*resource_iam_audit_config_schema.Schema{ - "service": { - Type: resource_iam_audit_config_schema.TypeString, - Required: true, - Description: `Service which will be enabled for audit logging. The special value allServices covers all services.`, - }, - "audit_log_config": { - Type: resource_iam_audit_config_schema.TypeSet, - Required: true, - Description: `The configuration for logging of each type of permission. This can be specified multiple times.`, - Elem: &resource_iam_audit_config_schema.Resource{ - Schema: map[string]*resource_iam_audit_config_schema.Schema{ - "log_type": { - Type: resource_iam_audit_config_schema.TypeString, - Required: true, - Description: `Permission type for which logging is to be configured. Must be one of DATA_READ, DATA_WRITE, or ADMIN_READ.`, - }, - "exempted_members": { - Type: resource_iam_audit_config_schema.TypeSet, - Elem: &resource_iam_audit_config_schema.Schema{Type: resource_iam_audit_config_schema.TypeString}, - Optional: true, - Description: `Identities that do not cause logging for this type of permission. Each entry can have one of the following values:user:{emailid}: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. serviceAccount:{emailid}: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. group:{emailid}: An email address that represents a Google group. For example, admins@example.com. domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.`, - }, - }, - }, - }, - "etag": { - Type: resource_iam_audit_config_schema.TypeString, - Computed: true, - Description: `The etag of iam policy`, - }, -} - -func ResourceIamAuditConfig(parentSpecificSchema map[string]*resource_iam_audit_config_schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) *resource_iam_audit_config_schema.Resource { - return ResourceIamAuditConfigWithBatching(parentSpecificSchema, newUpdaterFunc, resourceIdParser, IamBatchingDisabled) -} - -func ResourceIamAuditConfigWithBatching(parentSpecificSchema map[string]*resource_iam_audit_config_schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool) *resource_iam_audit_config_schema.Resource { - return &resource_iam_audit_config_schema.Resource{ - Create: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), - Read: resourceIamAuditConfigRead(newUpdaterFunc), - Update: resourceIamAuditConfigCreateUpdate(newUpdaterFunc, enableBatching), - Delete: resourceIamAuditConfigDelete(newUpdaterFunc, enableBatching), - Schema: mergeSchemas(iamAuditConfigSchema, parentSpecificSchema), - Importer: &resource_iam_audit_config_schema.ResourceImporter{ - State: iamAuditConfigImport(resourceIdParser), - }, - UseJSONNumber: true, - } -} - -func resourceIamAuditConfigRead(newUpdaterFunc newResourceIamUpdaterFunc) resource_iam_audit_config_schema.ReadFunc { - return func(d *resource_iam_audit_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - eAuditConfig := getResourceIamAuditConfig(d) - p, err := iamPolicyReadWithRetry(updater) - if err != nil { - return handleNotFoundError(err, d, resource_iam_audit_config_fmt.Sprintf("AuditConfig for %s on %q", eAuditConfig.Service, updater.DescribeResource())) - } - resource_iam_audit_config_log.Printf("[DEBUG]: Retrieved policy for %s: %+v", updater.DescribeResource(), p) - - var ac *resource_iam_audit_config_cloudresourcemanager.AuditConfig - for _, b := range p.AuditConfigs { - if b.Service != eAuditConfig.Service { - continue - } - ac = b - break - } - if ac == nil { - resource_iam_audit_config_log.Printf("[DEBUG]: AuditConfig for service %q not found in policy for %s, removing from state file.", eAuditConfig.Service, updater.DescribeResource()) - d.SetId("") - return nil - } - - if err := d.Set("etag", p.Etag); err != nil { - return resource_iam_audit_config_fmt.Errorf("Error setting etag: %s", err) - } - err = d.Set("audit_log_config", flattenAuditLogConfigs(ac.AuditLogConfigs)) - if err != nil { - return resource_iam_audit_config_fmt.Errorf("Error flattening audit log config: %s", err) - } - if err := d.Set("service", ac.Service); err != nil { - return resource_iam_audit_config_fmt.Errorf("Error setting service: %s", err) - } - return nil - } -} - -func iamAuditConfigImport(resourceIdParser resourceIdParserFunc) resource_iam_audit_config_schema.StateFunc { - return func(d *resource_iam_audit_config_schema.ResourceData, m interface{}) ([]*resource_iam_audit_config_schema.ResourceData, error) { - if resourceIdParser == nil { - return nil, resource_iam_audit_config_errors.New("Import not supported for this IAM resource.") - } - config := m.(*Config) - s := resource_iam_audit_config_strings.Fields(d.Id()) - if len(s) != 2 { - d.SetId("") - return nil, resource_iam_audit_config_fmt.Errorf("Wrong number of parts to AuditConfig id %s; expected 'resource_name service'.", s) - } - id, service := s[0], s[1] - - d.SetId(id) - if err := d.Set("service", service); err != nil { - return nil, resource_iam_audit_config_fmt.Errorf("Error setting service: %s", err) - } - err := resourceIdParser(d, config) - if err != nil { - return nil, err - } - - d.SetId(d.Id() + "/audit_config/" + service) - return []*resource_iam_audit_config_schema.ResourceData{d}, nil - } -} - -func resourceIamAuditConfigCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) func(*resource_iam_audit_config_schema.ResourceData, interface{}) error { - return func(d *resource_iam_audit_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - ac := getResourceIamAuditConfig(d) - modifyF := func(ep *resource_iam_audit_config_cloudresourcemanager.Policy) error { - cleaned := removeAllAuditConfigsWithService(ep.AuditConfigs, ac.Service) - ep.AuditConfigs = append(cleaned, ac) - return nil - } - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, resource_iam_audit_config_fmt.Sprintf( - "Overwrite audit config for service %s on resource %q", ac.Service, updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return err - } - d.SetId(updater.GetResourceId() + "/audit_config/" + ac.Service) - return resourceIamAuditConfigRead(newUpdaterFunc)(d, meta) - } -} - -func resourceIamAuditConfigDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) resource_iam_audit_config_schema.DeleteFunc { - return func(d *resource_iam_audit_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - ac := getResourceIamAuditConfig(d) - modifyF := func(ep *resource_iam_audit_config_cloudresourcemanager.Policy) error { - ep.AuditConfigs = removeAllAuditConfigsWithService(ep.AuditConfigs, ac.Service) - return nil - } - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, resource_iam_audit_config_fmt.Sprintf( - "Delete audit config for service %s on resource %q", ac.Service, updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return handleNotFoundError(err, d, resource_iam_audit_config_fmt.Sprintf("Resource %s with IAM audit config %q", updater.DescribeResource(), d.Id())) - } - - return resourceIamAuditConfigRead(newUpdaterFunc)(d, meta) - } -} - -func getResourceIamAuditConfig(d *resource_iam_audit_config_schema.ResourceData) *resource_iam_audit_config_cloudresourcemanager.AuditConfig { - auditLogConfigSet := d.Get("audit_log_config").(*resource_iam_audit_config_schema.Set) - auditLogConfigs := make([]*resource_iam_audit_config_cloudresourcemanager.AuditLogConfig, auditLogConfigSet.Len()) - for x, y := range auditLogConfigSet.List() { - logConfig := y.(map[string]interface{}) - auditLogConfigs[x] = &resource_iam_audit_config_cloudresourcemanager.AuditLogConfig{ - LogType: logConfig["log_type"].(string), - ExemptedMembers: convertStringArr(logConfig["exempted_members"].(*resource_iam_audit_config_schema.Set).List()), - } - } - return &resource_iam_audit_config_cloudresourcemanager.AuditConfig{ - AuditLogConfigs: auditLogConfigs, - Service: d.Get("service").(string), - } -} - -func flattenAuditLogConfigs(configs []*resource_iam_audit_config_cloudresourcemanager.AuditLogConfig) *resource_iam_audit_config_schema.Set { - auditLogConfigSchema := iamAuditConfigSchema["audit_log_config"].Elem.(*resource_iam_audit_config_schema.Resource) - exemptedMemberSchema := auditLogConfigSchema.Schema["exempted_members"].Elem.(*resource_iam_audit_config_schema.Schema) - res := resource_iam_audit_config_schema.NewSet(resource_iam_audit_config_schema.HashResource(auditLogConfigSchema), []interface{}{}) - for _, conf := range configs { - res.Add(map[string]interface{}{ - "log_type": conf.LogType, - "exempted_members": resource_iam_audit_config_schema.NewSet(resource_iam_audit_config_schema.HashSchema(exemptedMemberSchema), convertStringArrToInterface(conf.ExemptedMembers)), - }) - } - return res -} - -var iamBindingSchema = map[string]*resource_iam_binding_schema.Schema{ - "role": { - Type: resource_iam_binding_schema.TypeString, - Required: true, - ForceNew: true, - }, - "members": { - Type: resource_iam_binding_schema.TypeSet, - Required: true, - Elem: &resource_iam_binding_schema.Schema{ - Type: resource_iam_binding_schema.TypeString, - DiffSuppressFunc: caseDiffSuppress, - ValidateFunc: resource_iam_binding_validation.StringDoesNotMatch(resource_iam_binding_regexp.MustCompile("^deleted:"), "Terraform does not support IAM bindings for deleted principals"), - }, - Set: func(v interface{}) int { - return resource_iam_binding_schema.HashString(resource_iam_binding_strings.ToLower(v.(string))) - }, - }, - "condition": { - Type: resource_iam_binding_schema.TypeList, - Optional: true, - MaxItems: 1, - ForceNew: true, - Elem: &resource_iam_binding_schema.Resource{ - Schema: map[string]*resource_iam_binding_schema.Schema{ - "expression": { - Type: resource_iam_binding_schema.TypeString, - Required: true, - ForceNew: true, - }, - "title": { - Type: resource_iam_binding_schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": { - Type: resource_iam_binding_schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "etag": { - Type: resource_iam_binding_schema.TypeString, - Computed: true, - }, -} - -func ResourceIamBinding(parentSpecificSchema map[string]*resource_iam_binding_schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, options ...func(*IamSettings)) *resource_iam_binding_schema.Resource { - return ResourceIamBindingWithBatching(parentSpecificSchema, newUpdaterFunc, resourceIdParser, IamBatchingDisabled, options...) -} - -func ResourceIamBindingWithBatching(parentSpecificSchema map[string]*resource_iam_binding_schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool, options ...func(*IamSettings)) *resource_iam_binding_schema.Resource { - settings := &IamSettings{} - for _, o := range options { - o(settings) - } - - return &resource_iam_binding_schema.Resource{ - Create: resourceIamBindingCreateUpdate(newUpdaterFunc, enableBatching), - Read: resourceIamBindingRead(newUpdaterFunc), - Update: resourceIamBindingCreateUpdate(newUpdaterFunc, enableBatching), - Delete: resourceIamBindingDelete(newUpdaterFunc, enableBatching), - - DeprecationMessage: settings.DeprecationMessage, - - Schema: mergeSchemas(iamBindingSchema, parentSpecificSchema), - Importer: &resource_iam_binding_schema.ResourceImporter{ - State: iamBindingImport(newUpdaterFunc, resourceIdParser), - }, - UseJSONNumber: true, - } -} - -func resourceIamBindingCreateUpdate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) func(*resource_iam_binding_schema.ResourceData, interface{}) error { - return func(d *resource_iam_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - binding := getResourceIamBinding(d) - modifyF := func(ep *resource_iam_binding_cloudresourcemanager.Policy) error { - cleaned := filterBindingsWithRoleAndCondition(ep.Bindings, binding.Role, binding.Condition) - ep.Bindings = append(cleaned, binding) - ep.Version = iamPolicyVersion - return nil - } - - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, resource_iam_binding_fmt.Sprintf( - "Set IAM Binding for role %q on %q", binding.Role, updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return err - } - - d.SetId(updater.GetResourceId() + "/" + binding.Role) - if k := conditionKeyFromCondition(binding.Condition); !k.Empty() { - d.SetId(d.Id() + "/" + k.String()) - } - return resourceIamBindingRead(newUpdaterFunc)(d, meta) - } -} - -func resourceIamBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) resource_iam_binding_schema.ReadFunc { - return func(d *resource_iam_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - eBinding := getResourceIamBinding(d) - eCondition := conditionKeyFromCondition(eBinding.Condition) - p, err := iamPolicyReadWithRetry(updater) - if err != nil { - return handleNotFoundError(err, d, resource_iam_binding_fmt.Sprintf("Resource %q with IAM Binding (Role %q)", updater.DescribeResource(), eBinding.Role)) - } - resource_iam_binding_log.Print(resource_iam_binding_spew.Sprintf("[DEBUG] Retrieved policy for %s: %#v", updater.DescribeResource(), p)) - resource_iam_binding_log.Printf("[DEBUG] Looking for binding with role %q and condition %#v", eBinding.Role, eCondition) - - var binding *resource_iam_binding_cloudresourcemanager.Binding - for _, b := range p.Bindings { - if b.Role == eBinding.Role && conditionKeyFromCondition(b.Condition) == eCondition { - binding = b - break - } - } - - if binding == nil { - resource_iam_binding_log.Printf("[WARNING] Binding for role %q not found, assuming it has no members. If you expected existing members bound for this role, make sure your role is correctly formatted.", eBinding.Role) - resource_iam_binding_log.Printf("[DEBUG] Binding for role %q and condition %#v not found in policy for %s, assuming it has no members.", eBinding.Role, eCondition, updater.DescribeResource()) - if err := d.Set("role", eBinding.Role); err != nil { - return resource_iam_binding_fmt.Errorf("Error setting role: %s", err) - } - if err := d.Set("members", nil); err != nil { - return resource_iam_binding_fmt.Errorf("Error setting members: %s", err) - } - return nil - } else { - if err := d.Set("role", binding.Role); err != nil { - return resource_iam_binding_fmt.Errorf("Error setting role: %s", err) - } - if err := d.Set("members", binding.Members); err != nil { - return resource_iam_binding_fmt.Errorf("Error setting members: %s", err) - } - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { - return resource_iam_binding_fmt.Errorf("Error setting condition: %s", err) - } - } - if err := d.Set("etag", p.Etag); err != nil { - return resource_iam_binding_fmt.Errorf("Error setting etag: %s", err) - } - return nil - } -} - -func iamBindingImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) resource_iam_binding_schema.StateFunc { - return func(d *resource_iam_binding_schema.ResourceData, m interface{}) ([]*resource_iam_binding_schema.ResourceData, error) { - if resourceIdParser == nil { - return nil, resource_iam_binding_errors.New("Import not supported for this IAM resource.") - } - config := m.(*Config) - s := resource_iam_binding_strings.Fields(d.Id()) - var id, role string - if len(s) < 2 { - d.SetId("") - return nil, resource_iam_binding_fmt.Errorf("Wrong number of parts to Binding id %s; expected 'resource_name role [condition_title]'.", s) - } - - var conditionTitle string - if len(s) == 2 { - id, role = s[0], s[1] - } else { - - id, role, conditionTitle = s[0], s[1], resource_iam_binding_strings.Join(s[2:], " ") - } - - d.SetId(id) - if err := d.Set("role", role); err != nil { - return nil, resource_iam_binding_fmt.Errorf("Error setting role: %s", err) - } - err := resourceIdParser(d, config) - if err != nil { - return nil, err - } - - d.SetId(d.Id() + "/" + role) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return nil, err - } - p, err := iamPolicyReadWithRetry(updater) - if err != nil { - return nil, err - } - var binding *resource_iam_binding_cloudresourcemanager.Binding - for _, b := range p.Bindings { - if b.Role == role && conditionKeyFromCondition(b.Condition).Title == conditionTitle { - if binding != nil { - return nil, resource_iam_binding_fmt.Errorf("Cannot import IAM member with condition title %q, it matches multiple conditions", conditionTitle) - } - binding = b - } - } - if binding != nil { - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { - return nil, resource_iam_binding_fmt.Errorf("Error setting condition: %s", err) - } - if k := conditionKeyFromCondition(binding.Condition); !k.Empty() { - d.SetId(d.Id() + "/" + k.String()) - } - } - - return []*resource_iam_binding_schema.ResourceData{d}, nil - } -} - -func resourceIamBindingDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) resource_iam_binding_schema.DeleteFunc { - return func(d *resource_iam_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - binding := getResourceIamBinding(d) - modifyF := func(p *resource_iam_binding_cloudresourcemanager.Policy) error { - p.Bindings = filterBindingsWithRoleAndCondition(p.Bindings, binding.Role, binding.Condition) - return nil - } - - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, resource_iam_binding_fmt.Sprintf( - "Delete IAM Binding for role %q on %q", binding.Role, updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return handleNotFoundError(err, d, resource_iam_binding_fmt.Sprintf("Resource %q for IAM binding with role %q", updater.DescribeResource(), binding.Role)) - } - - return resourceIamBindingRead(newUpdaterFunc)(d, meta) - } -} - -func getResourceIamBinding(d *resource_iam_binding_schema.ResourceData) *resource_iam_binding_cloudresourcemanager.Binding { - members := d.Get("members").(*resource_iam_binding_schema.Set).List() - b := &resource_iam_binding_cloudresourcemanager.Binding{ - Members: convertStringArr(members), - Role: d.Get("role").(string), - } - if c := expandIamCondition(d.Get("condition")); c != nil { - b.Condition = c - } - return b -} - -func expandIamCondition(v interface{}) *resource_iam_binding_cloudresourcemanager.Expr { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil - } - original := l[0].(map[string]interface{}) - return &resource_iam_binding_cloudresourcemanager.Expr{ - Description: original["description"].(string), - Expression: original["expression"].(string), - Title: original["title"].(string), - ForceSendFields: []string{"Description", "Expression", "Title"}, - } -} - -func flattenIamCondition(condition *resource_iam_binding_cloudresourcemanager.Expr) []map[string]interface{} { - if conditionKeyFromCondition(condition).Empty() { - return nil - } - return []map[string]interface{}{ - { - "expression": condition.Expression, - "title": condition.Title, - "description": condition.Description, - }, - } -} - -func iamMemberCaseDiffSuppress(k, old, new string, d *resource_iam_member_schema.ResourceData) bool { - isCaseSensitive := iamMemberIsCaseSensitive(old) || iamMemberIsCaseSensitive(new) - if isCaseSensitive { - return old == new - } - return caseDiffSuppress(k, old, new, d) -} - -var IamMemberBaseSchema = map[string]*resource_iam_member_schema.Schema{ - "role": { - Type: resource_iam_member_schema.TypeString, - Required: true, - ForceNew: true, - }, - "member": { - Type: resource_iam_member_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: iamMemberCaseDiffSuppress, - ValidateFunc: resource_iam_member_validation.StringDoesNotMatch(resource_iam_member_regexp.MustCompile("^deleted:"), "Terraform does not support IAM members for deleted principals"), - }, - "condition": { - Type: resource_iam_member_schema.TypeList, - Optional: true, - MaxItems: 1, - ForceNew: true, - Elem: &resource_iam_member_schema.Resource{ - Schema: map[string]*resource_iam_member_schema.Schema{ - "expression": { - Type: resource_iam_member_schema.TypeString, - Required: true, - ForceNew: true, - }, - "title": { - Type: resource_iam_member_schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": { - Type: resource_iam_member_schema.TypeString, - Optional: true, - ForceNew: true, - }, - }, - }, - }, - "etag": { - Type: resource_iam_member_schema.TypeString, - Computed: true, - }, -} - -func iamMemberImport(newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc) resource_iam_member_schema.StateFunc { - return func(d *resource_iam_member_schema.ResourceData, m interface{}) ([]*resource_iam_member_schema.ResourceData, error) { - if resourceIdParser == nil { - return nil, resource_iam_member_errors.New("Import not supported for this IAM resource.") - } - config := m.(*Config) - s := resource_iam_member_strings.Fields(d.Id()) - var id, role, member string - if len(s) < 3 { - d.SetId("") - return nil, resource_iam_member_fmt.Errorf("Wrong number of parts to Member id %s; expected 'resource_name role member [condition_title]'.", s) - } - - var conditionTitle string - if len(s) == 3 { - id, role, member = s[0], s[1], s[2] - } else { - - id, role, member, conditionTitle = s[0], s[1], s[2], resource_iam_member_strings.Join(s[3:], " ") - } - - d.SetId(id) - if err := d.Set("role", role); err != nil { - return nil, resource_iam_member_fmt.Errorf("Error setting role: %s", err) - } - if err := d.Set("member", normalizeIamMemberCasing(member)); err != nil { - return nil, resource_iam_member_fmt.Errorf("Error setting member: %s", err) - } - - err := resourceIdParser(d, config) - if err != nil { - return nil, err - } - - d.SetId(d.Id() + "/" + role + "/" + normalizeIamMemberCasing(member)) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return nil, err - } - p, err := iamPolicyReadWithRetry(updater) - if err != nil { - return nil, err - } - var binding *resource_iam_member_cloudresourcemanager.Binding - for _, b := range p.Bindings { - if b.Role == role && conditionKeyFromCondition(b.Condition).Title == conditionTitle { - containsMember := false - for _, m := range b.Members { - if resource_iam_member_strings.ToLower(m) == resource_iam_member_strings.ToLower(member) { - containsMember = true - } - } - if !containsMember { - continue - } - - if binding != nil { - return nil, resource_iam_member_fmt.Errorf("Cannot import IAM member with condition title %q, it matches multiple conditions", conditionTitle) - } - binding = b - } - } - if binding == nil { - return nil, resource_iam_member_fmt.Errorf("Cannot find binding for %q with role %q, member %q, and condition title %q", updater.DescribeResource(), role, member, conditionTitle) - } - - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { - return nil, resource_iam_member_fmt.Errorf("Error setting condition: %s", err) - } - if k := conditionKeyFromCondition(binding.Condition); !k.Empty() { - d.SetId(d.Id() + "/" + k.String()) - } - - return []*resource_iam_member_schema.ResourceData{d}, nil - } -} - -func ResourceIamMember(parentSpecificSchema map[string]*resource_iam_member_schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, options ...func(*IamSettings)) *resource_iam_member_schema.Resource { - return ResourceIamMemberWithBatching(parentSpecificSchema, newUpdaterFunc, resourceIdParser, IamBatchingDisabled, options...) -} - -func ResourceIamMemberWithBatching(parentSpecificSchema map[string]*resource_iam_member_schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, enableBatching bool, options ...func(*IamSettings)) *resource_iam_member_schema.Resource { - settings := &IamSettings{} - for _, o := range options { - o(settings) - } - - return &resource_iam_member_schema.Resource{ - Create: resourceIamMemberCreate(newUpdaterFunc, enableBatching), - Read: resourceIamMemberRead(newUpdaterFunc), - Delete: resourceIamMemberDelete(newUpdaterFunc, enableBatching), - - DeprecationMessage: settings.DeprecationMessage, - - Schema: mergeSchemas(IamMemberBaseSchema, parentSpecificSchema), - Importer: &resource_iam_member_schema.ResourceImporter{ - State: iamMemberImport(newUpdaterFunc, resourceIdParser), - }, - UseJSONNumber: true, - } -} - -func getResourceIamMember(d *resource_iam_member_schema.ResourceData) *resource_iam_member_cloudresourcemanager.Binding { - b := &resource_iam_member_cloudresourcemanager.Binding{ - Members: []string{d.Get("member").(string)}, - Role: d.Get("role").(string), - } - if c := expandIamCondition(d.Get("condition")); c != nil { - b.Condition = c - } - return b -} - -func resourceIamMemberCreate(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) resource_iam_member_schema.CreateFunc { - return func(d *resource_iam_member_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - memberBind := getResourceIamMember(d) - modifyF := func(ep *resource_iam_member_cloudresourcemanager.Policy) error { - - ep.Bindings = mergeBindings(append(ep.Bindings, memberBind)) - ep.Version = iamPolicyVersion - return nil - } - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, - resource_iam_member_fmt.Sprintf("Create IAM Members %s %+v for %s", memberBind.Role, memberBind.Members[0], updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return err - } - d.SetId(updater.GetResourceId() + "/" + memberBind.Role + "/" + normalizeIamMemberCasing(memberBind.Members[0])) - if k := conditionKeyFromCondition(memberBind.Condition); !k.Empty() { - d.SetId(d.Id() + "/" + k.String()) - } - return resourceIamMemberRead(newUpdaterFunc)(d, meta) - } -} - -func resourceIamMemberRead(newUpdaterFunc newResourceIamUpdaterFunc) resource_iam_member_schema.ReadFunc { - return func(d *resource_iam_member_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - eMember := getResourceIamMember(d) - eCondition := conditionKeyFromCondition(eMember.Condition) - p, err := iamPolicyReadWithRetry(updater) - if err != nil { - return handleNotFoundError(err, d, resource_iam_member_fmt.Sprintf("Resource %q with IAM Member: Role %q Member %q", updater.DescribeResource(), eMember.Role, eMember.Members[0])) - } - resource_iam_member_log.Print(resource_iam_member_spew.Sprintf("[DEBUG]: Retrieved policy for %s: %#v\n", updater.DescribeResource(), p)) - resource_iam_member_log.Printf("[DEBUG]: Looking for binding with role %q and condition %#v", eMember.Role, eCondition) - - var binding *resource_iam_member_cloudresourcemanager.Binding - for _, b := range p.Bindings { - if b.Role == eMember.Role && conditionKeyFromCondition(b.Condition) == eCondition { - binding = b - break - } - } - - if binding == nil { - resource_iam_member_log.Printf("[DEBUG]: Binding for role %q with condition %#v does not exist in policy of %s, removing member %q from state.", eMember.Role, eCondition, updater.DescribeResource(), eMember.Members[0]) - d.SetId("") - return nil - } - - resource_iam_member_log.Printf("[DEBUG]: Looking for member %q in found binding", eMember.Members[0]) - var member string - for _, m := range binding.Members { - if resource_iam_member_strings.ToLower(m) == resource_iam_member_strings.ToLower(eMember.Members[0]) { - member = m - } - } - - if member == "" { - resource_iam_member_log.Printf("[DEBUG]: Member %q for binding for role %q with condition %#v does not exist in policy of %s, removing from state.", eMember.Members[0], eMember.Role, eCondition, updater.DescribeResource()) - d.SetId("") - return nil - } - - if err := d.Set("etag", p.Etag); err != nil { - return resource_iam_member_fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("member", member); err != nil { - return resource_iam_member_fmt.Errorf("Error setting member: %s", err) - } - if err := d.Set("role", binding.Role); err != nil { - return resource_iam_member_fmt.Errorf("Error setting role: %s", err) - } - if err := d.Set("condition", flattenIamCondition(binding.Condition)); err != nil { - return resource_iam_member_fmt.Errorf("Error setting condition: %s", err) - } - return nil - } -} - -func resourceIamMemberDelete(newUpdaterFunc newResourceIamUpdaterFunc, enableBatching bool) resource_iam_member_schema.DeleteFunc { - return func(d *resource_iam_member_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - memberBind := getResourceIamMember(d) - modifyF := func(ep *resource_iam_member_cloudresourcemanager.Policy) error { - - ep.Bindings = subtractFromBindings(ep.Bindings, memberBind) - return nil - } - if enableBatching { - err = BatchRequestModifyIamPolicy(updater, modifyF, config, - resource_iam_member_fmt.Sprintf("Delete IAM Members %s %s for %q", memberBind.Role, memberBind.Members[0], updater.DescribeResource())) - } else { - err = iamPolicyReadModifyWrite(updater, modifyF) - } - if err != nil { - return handleNotFoundError(err, d, resource_iam_member_fmt.Sprintf("Resource %s for IAM Member (role %q, %q)", updater.GetResourceId(), memberBind.Members[0], memberBind.Role)) - } - return resourceIamMemberRead(newUpdaterFunc)(d, meta) - } -} - -var IamPolicyBaseSchema = map[string]*resource_iam_policy_schema.Schema{ - "policy_data": { - Type: resource_iam_policy_schema.TypeString, - Required: true, - DiffSuppressFunc: jsonPolicyDiffSuppress, - ValidateFunc: validateIamPolicy, - }, - "etag": { - Type: resource_iam_policy_schema.TypeString, - Computed: true, - }, -} - -func iamPolicyImport(resourceIdParser resourceIdParserFunc) resource_iam_policy_schema.StateFunc { - return func(d *resource_iam_policy_schema.ResourceData, m interface{}) ([]*resource_iam_policy_schema.ResourceData, error) { - if resourceIdParser == nil { - return nil, resource_iam_policy_errors.New("Import not supported for this IAM resource.") - } - config := m.(*Config) - err := resourceIdParser(d, config) - if err != nil { - return nil, err - } - return []*resource_iam_policy_schema.ResourceData{d}, nil - } -} - -func ResourceIamPolicy(parentSpecificSchema map[string]*resource_iam_policy_schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc, resourceIdParser resourceIdParserFunc, options ...func(*IamSettings)) *resource_iam_policy_schema.Resource { - settings := &IamSettings{} - for _, o := range options { - o(settings) - } - - return &resource_iam_policy_schema.Resource{ - Create: ResourceIamPolicyCreate(newUpdaterFunc), - Read: ResourceIamPolicyRead(newUpdaterFunc), - Update: ResourceIamPolicyUpdate(newUpdaterFunc), - Delete: ResourceIamPolicyDelete(newUpdaterFunc), - - DeprecationMessage: settings.DeprecationMessage, - - Schema: mergeSchemas(IamPolicyBaseSchema, parentSpecificSchema), - Importer: &resource_iam_policy_schema.ResourceImporter{ - State: iamPolicyImport(resourceIdParser), - }, - UseJSONNumber: true, - } -} - -func ResourceIamPolicyCreate(newUpdaterFunc newResourceIamUpdaterFunc) resource_iam_policy_schema.CreateFunc { - return func(d *resource_iam_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - if err = setIamPolicyData(d, updater); err != nil { - return err - } - - d.SetId(updater.GetResourceId()) - return ResourceIamPolicyRead(newUpdaterFunc)(d, meta) - } -} - -func ResourceIamPolicyRead(newUpdaterFunc newResourceIamUpdaterFunc) resource_iam_policy_schema.ReadFunc { - return func(d *resource_iam_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - policy, err := iamPolicyReadWithRetry(updater) - if err != nil { - return handleNotFoundError(err, d, resource_iam_policy_fmt.Sprintf("Resource %q with IAM Policy", updater.DescribeResource())) - } - - if err := d.Set("etag", policy.Etag); err != nil { - return resource_iam_policy_fmt.Errorf("Error setting etag: %s", err) - } - if err := d.Set("policy_data", marshalIamPolicy(policy)); err != nil { - return resource_iam_policy_fmt.Errorf("Error setting policy_data: %s", err) - } - - return nil - } -} - -func ResourceIamPolicyUpdate(newUpdaterFunc newResourceIamUpdaterFunc) resource_iam_policy_schema.UpdateFunc { - return func(d *resource_iam_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - if d.HasChange("policy_data") { - if err := setIamPolicyData(d, updater); err != nil { - return err - } - } - - return ResourceIamPolicyRead(newUpdaterFunc)(d, meta) - } -} - -func ResourceIamPolicyDelete(newUpdaterFunc newResourceIamUpdaterFunc) resource_iam_policy_schema.DeleteFunc { - return func(d *resource_iam_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - pol := &resource_iam_policy_cloudresourcemanager.Policy{} - if v, ok := d.GetOk("etag"); ok { - pol.Etag = v.(string) - } - pol.Version = iamPolicyVersion - err = updater.SetResourceIamPolicy(pol) - if err != nil { - return err - } - - return nil - } -} - -func setIamPolicyData(d *resource_iam_policy_schema.ResourceData, updater ResourceIamUpdater) error { - policy, err := unmarshalIamPolicy(d.Get("policy_data").(string)) - if err != nil { - return resource_iam_policy_fmt.Errorf("'policy_data' is not valid for %s: %s", updater.DescribeResource(), err) - } - policy.Version = iamPolicyVersion - - err = updater.SetResourceIamPolicy(policy) - if err != nil { - return err - } - - return nil -} - -func marshalIamPolicy(policy *resource_iam_policy_cloudresourcemanager.Policy) string { - pdBytes, _ := resource_iam_policy_json.Marshal(&resource_iam_policy_cloudresourcemanager.Policy{ - AuditConfigs: policy.AuditConfigs, - Bindings: policy.Bindings, - }) - return string(pdBytes) -} - -func unmarshalIamPolicy(policyData string) (*resource_iam_policy_cloudresourcemanager.Policy, error) { - policy := &resource_iam_policy_cloudresourcemanager.Policy{} - if err := resource_iam_policy_json.Unmarshal([]byte(policyData), policy); err != nil { - return nil, resource_iam_policy_fmt.Errorf("Could not unmarshal policy data %s:\n%s", policyData, err) - } - return policy, nil -} - -func validateIamPolicy(i interface{}, k string) (s []string, es []error) { - _, err := unmarshalIamPolicy(i.(string)) - if err != nil { - es = append(es, err) - } - return -} - -func resourceIapBrand() *resource_iap_brand_schema.Resource { - return &resource_iap_brand_schema.Resource{ - Create: resourceIapBrandCreate, - Read: resourceIapBrandRead, - Delete: resourceIapBrandDelete, - - Importer: &resource_iap_brand_schema.ResourceImporter{ - State: resourceIapBrandImport, - }, - - Timeouts: &resource_iap_brand_schema.ResourceTimeout{ - Create: resource_iap_brand_schema.DefaultTimeout(4 * resource_iap_brand_time.Minute), - Delete: resource_iap_brand_schema.DefaultTimeout(4 * resource_iap_brand_time.Minute), - }, - - Schema: map[string]*resource_iap_brand_schema.Schema{ - "application_title": { - Type: resource_iap_brand_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Application name displayed on OAuth consent screen.`, - }, - "support_email": { - Type: resource_iap_brand_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Support email displayed on the OAuth consent screen. Can be either a -user or group email. When a user email is specified, the caller must -be the user with the associated email address. When a group email is -specified, the caller can be either a user or a service account which -is an owner of the specified group in Cloud Identity.`, - }, - "name": { - Type: resource_iap_brand_schema.TypeString, - Computed: true, - Description: `Output only. Identifier of the brand, in the format -'projects/{project_number}/brands/{brand_id}'. NOTE: The brand -identification corresponds to the project number as only one -brand per project can be created.`, - }, - "org_internal_only": { - Type: resource_iap_brand_schema.TypeBool, - Computed: true, - Description: `Whether the brand is only intended for usage inside the GSuite organization only.`, - }, - "project": { - Type: resource_iap_brand_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIapBrandCreate(d *resource_iap_brand_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - supportEmailProp, err := expandIapBrandSupportEmail(d.Get("support_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("support_email"); !isEmptyValue(resource_iap_brand_reflect.ValueOf(supportEmailProp)) && (ok || !resource_iap_brand_reflect.DeepEqual(v, supportEmailProp)) { - obj["supportEmail"] = supportEmailProp - } - applicationTitleProp, err := expandIapBrandApplicationTitle(d.Get("application_title"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("application_title"); !isEmptyValue(resource_iap_brand_reflect.ValueOf(applicationTitleProp)) && (ok || !resource_iap_brand_reflect.DeepEqual(v, applicationTitleProp)) { - obj["applicationTitle"] = applicationTitleProp - } - - url, err := replaceVars(d, config, "{{IapBasePath}}projects/{{project}}/brands") - if err != nil { - return err - } - - resource_iap_brand_log.Printf("[DEBUG] Creating new Brand: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_iap_brand_fmt.Errorf("Error fetching project for Brand: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_iap_brand_schema.TimeoutCreate)) - if err != nil { - return resource_iap_brand_fmt.Errorf("Error creating Brand: %s", err) - } - if err := d.Set("name", flattenIapBrandName(res["name"], d, config)); err != nil { - return resource_iap_brand_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_iap_brand_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_iap_brand_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_iap_brand_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_iap_brand_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - err = PollingWaitTime(resourceIapBrandPollRead(d, meta), PollCheckForExistence, "Creating Brand", d.Timeout(resource_iap_brand_schema.TimeoutCreate), 5) - if err != nil { - return resource_iap_brand_fmt.Errorf("Error waiting to create Brand: %s", err) - } - - resource_iap_brand_log.Printf("[DEBUG] Finished creating Brand %q: %#v", d.Id(), res) - - return resourceIapBrandRead(d, meta) -} - -func resourceIapBrandPollRead(d *resource_iap_brand_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{IapBasePath}}{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_iap_brand_fmt.Errorf("Error fetching project for Brand: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceIapBrandRead(d *resource_iap_brand_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IapBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_iap_brand_fmt.Errorf("Error fetching project for Brand: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_iap_brand_fmt.Sprintf("IapBrand %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_iap_brand_fmt.Errorf("Error reading Brand: %s", err) - } - - if err := d.Set("support_email", flattenIapBrandSupportEmail(res["supportEmail"], d, config)); err != nil { - return resource_iap_brand_fmt.Errorf("Error reading Brand: %s", err) - } - if err := d.Set("application_title", flattenIapBrandApplicationTitle(res["applicationTitle"], d, config)); err != nil { - return resource_iap_brand_fmt.Errorf("Error reading Brand: %s", err) - } - if err := d.Set("org_internal_only", flattenIapBrandOrgInternalOnly(res["orgInternalOnly"], d, config)); err != nil { - return resource_iap_brand_fmt.Errorf("Error reading Brand: %s", err) - } - if err := d.Set("name", flattenIapBrandName(res["name"], d, config)); err != nil { - return resource_iap_brand_fmt.Errorf("Error reading Brand: %s", err) - } - - return nil -} - -func resourceIapBrandDelete(d *resource_iap_brand_schema.ResourceData, meta interface{}) error { - resource_iap_brand_log.Printf("[WARNING] Iap Brand resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceIapBrandImport(d *resource_iap_brand_schema.ResourceData, meta interface{}) ([]*resource_iap_brand_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := resource_iap_brand_strings.Split(d.Get("name").(string), "/") - if len(nameParts) != 4 { - return nil, resource_iap_brand_fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "projects/{{project}}/brands/{{name}}", - ) - } - - if err := d.Set("project", nameParts[1]); err != nil { - return nil, resource_iap_brand_fmt.Errorf("Error setting project: %s", err) - } - return []*resource_iap_brand_schema.ResourceData{d}, nil -} - -func flattenIapBrandSupportEmail(v interface{}, d *resource_iap_brand_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapBrandApplicationTitle(v interface{}, d *resource_iap_brand_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapBrandOrgInternalOnly(v interface{}, d *resource_iap_brand_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapBrandName(v interface{}, d *resource_iap_brand_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIapBrandSupportEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIapBrandApplicationTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIapClient() *resource_iap_client_schema.Resource { - return &resource_iap_client_schema.Resource{ - Create: resourceIapClientCreate, - Read: resourceIapClientRead, - Delete: resourceIapClientDelete, - - Importer: &resource_iap_client_schema.ResourceImporter{ - State: resourceIapClientImport, - }, - - Timeouts: &resource_iap_client_schema.ResourceTimeout{ - Create: resource_iap_client_schema.DefaultTimeout(4 * resource_iap_client_time.Minute), - Delete: resource_iap_client_schema.DefaultTimeout(4 * resource_iap_client_time.Minute), - }, - - Schema: map[string]*resource_iap_client_schema.Schema{ - "brand": { - Type: resource_iap_client_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Identifier of the brand to which this client -is attached to. The format is -'projects/{project_number}/brands/{brand_id}/identityAwareProxyClients/{client_id}'.`, - }, - "display_name": { - Type: resource_iap_client_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Human-friendly name given to the OAuth client.`, - }, - "client_id": { - Type: resource_iap_client_schema.TypeString, - Computed: true, - Description: `Output only. Unique identifier of the OAuth client.`, - }, - "secret": { - Type: resource_iap_client_schema.TypeString, - Computed: true, - Description: `Output only. Client secret of the OAuth client.`, - Sensitive: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIapClientCreate(d *resource_iap_client_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIapClientDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_iap_client_reflect.ValueOf(displayNameProp)) && (ok || !resource_iap_client_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - url, err := replaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients") - if err != nil { - return err - } - - resource_iap_client_log.Printf("[DEBUG] Creating new Client: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_iap_client_schema.TimeoutCreate), iapClient409Operation) - if err != nil { - return resource_iap_client_fmt.Errorf("Error creating Client: %s", err) - } - - id, err := replaceVars(d, config, "{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return resource_iap_client_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - brand := d.Get("brand") - clientId := flattenIapClientClientId(res["name"], d, config) - - if err := d.Set("client_id", clientId); err != nil { - return resource_iap_client_fmt.Errorf("Error setting client_id: %s", err) - } - d.SetId(resource_iap_client_fmt.Sprintf("%s/identityAwareProxyClients/%s", brand, clientId)) - - resource_iap_client_log.Printf("[DEBUG] Finished creating Client %q: %#v", d.Id(), res) - - return resourceIapClientRead(d, meta) -} - -func resourceIapClientRead(d *resource_iap_client_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, iapClient409Operation) - if err != nil { - return handleNotFoundError(err, d, resource_iap_client_fmt.Sprintf("IapClient %q", d.Id())) - } - - if err := d.Set("secret", flattenIapClientSecret(res["secret"], d, config)); err != nil { - return resource_iap_client_fmt.Errorf("Error reading Client: %s", err) - } - if err := d.Set("display_name", flattenIapClientDisplayName(res["displayName"], d, config)); err != nil { - return resource_iap_client_fmt.Errorf("Error reading Client: %s", err) - } - if err := d.Set("client_id", flattenIapClientClientId(res["name"], d, config)); err != nil { - return resource_iap_client_fmt.Errorf("Error reading Client: %s", err) - } - - return nil -} - -func resourceIapClientDelete(d *resource_iap_client_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{IapBasePath}}{{brand}}/identityAwareProxyClients/{{client_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_iap_client_log.Printf("[DEBUG] Deleting Client %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_iap_client_schema.TimeoutDelete), iapClient409Operation) - if err != nil { - return handleNotFoundError(err, d, "Client") - } - - resource_iap_client_log.Printf("[DEBUG] Finished deleting Client %q: %#v", d.Id(), res) - return nil -} - -func resourceIapClientImport(d *resource_iap_client_schema.ResourceData, meta interface{}) ([]*resource_iap_client_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - nameParts := resource_iap_client_strings.Split(d.Get("brand").(string), "/") - if len(nameParts) != 6 { - return nil, resource_iap_client_fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("brand").(string), - "projects/{{project_number}}/brands/{{brand_id}}/identityAwareProxyClients/{{client_id}}", - ) - } - - if err := d.Set("brand", resource_iap_client_fmt.Sprintf("projects/%s/brands/%s", nameParts[1], nameParts[3])); err != nil { - return nil, resource_iap_client_fmt.Errorf("Error setting brand: %s", err) - } - if err := d.Set("client_id", nameParts[5]); err != nil { - return nil, resource_iap_client_fmt.Errorf("Error setting client_id: %s", err) - } - return []*resource_iap_client_schema.ResourceData{d}, nil -} - -func flattenIapClientSecret(v interface{}, d *resource_iap_client_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapClientDisplayName(v interface{}, d *resource_iap_client_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIapClientClientId(v interface{}, d *resource_iap_client_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandIapClientDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIdentityPlatformDefaultSupportedIdpConfig() *resource_identity_platform_default_supported_idp_config_schema.Resource { - return &resource_identity_platform_default_supported_idp_config_schema.Resource{ - Create: resourceIdentityPlatformDefaultSupportedIdpConfigCreate, - Read: resourceIdentityPlatformDefaultSupportedIdpConfigRead, - Update: resourceIdentityPlatformDefaultSupportedIdpConfigUpdate, - Delete: resourceIdentityPlatformDefaultSupportedIdpConfigDelete, - - Importer: &resource_identity_platform_default_supported_idp_config_schema.ResourceImporter{ - State: resourceIdentityPlatformDefaultSupportedIdpConfigImport, - }, - - Timeouts: &resource_identity_platform_default_supported_idp_config_schema.ResourceTimeout{ - Create: resource_identity_platform_default_supported_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_default_supported_idp_config_time.Minute), - Update: resource_identity_platform_default_supported_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_default_supported_idp_config_time.Minute), - Delete: resource_identity_platform_default_supported_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_default_supported_idp_config_time.Minute), - }, - - Schema: map[string]*resource_identity_platform_default_supported_idp_config_schema.Schema{ - "client_id": { - Type: resource_identity_platform_default_supported_idp_config_schema.TypeString, - Required: true, - Description: `OAuth client ID`, - }, - "client_secret": { - Type: resource_identity_platform_default_supported_idp_config_schema.TypeString, - Required: true, - Description: `OAuth client secret`, - }, - "idp_id": { - Type: resource_identity_platform_default_supported_idp_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the IDP. Possible values include: - -* 'apple.com' - -* 'facebook.com' - -* 'gc.apple.com' - -* 'github.com' - -* 'google.com' - -* 'linkedin.com' - -* 'microsoft.com' - -* 'playgames.google.com' - -* 'twitter.com' - -* 'yahoo.com'`, - }, - "enabled": { - Type: resource_identity_platform_default_supported_idp_config_schema.TypeBool, - Optional: true, - Description: `If this IDP allows the user to sign in`, - }, - "name": { - Type: resource_identity_platform_default_supported_idp_config_schema.TypeString, - Computed: true, - Description: `The name of the DefaultSupportedIdpConfig resource`, - }, - "project": { - Type: resource_identity_platform_default_supported_idp_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigCreate(d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_default_supported_idp_config_reflect.ValueOf(clientIdProp)) && (ok || !resource_identity_platform_default_supported_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_default_supported_idp_config_reflect.ValueOf(clientSecretProp)) && (ok || !resource_identity_platform_default_supported_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_default_supported_idp_config_reflect.ValueOf(enabledProp)) && (ok || !resource_identity_platform_default_supported_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs?idpId={{idp_id}}") - if err != nil { - return err - } - - resource_identity_platform_default_supported_idp_config_log.Printf("[DEBUG] Creating new DefaultSupportedIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_default_supported_idp_config_schema.TimeoutCreate)) - if err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error creating DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_identity_platform_default_supported_idp_config_log.Printf("[DEBUG] Finished creating DefaultSupportedIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigRead(d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_identity_platform_default_supported_idp_config_fmt.Sprintf("IdentityPlatformDefaultSupportedIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformDefaultSupportedIdpConfigClientId(res["clientId"], d, config)); err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformDefaultSupportedIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformDefaultSupportedIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error reading DefaultSupportedIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigUpdate(d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_default_supported_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_default_supported_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_default_supported_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_default_supported_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_default_supported_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_default_supported_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - resource_identity_platform_default_supported_idp_config_log.Printf("[DEBUG] Updating DefaultSupportedIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_identity_platform_default_supported_idp_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_default_supported_idp_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error updating DefaultSupportedIdpConfig %q: %s", d.Id(), err) - } else { - resource_identity_platform_default_supported_idp_config_log.Printf("[DEBUG] Finished updating DefaultSupportedIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigDelete(d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error fetching project for DefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_identity_platform_default_supported_idp_config_log.Printf("[DEBUG] Deleting DefaultSupportedIdpConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_default_supported_idp_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DefaultSupportedIdpConfig") - } - - resource_identity_platform_default_supported_idp_config_log.Printf("[DEBUG] Finished deleting DefaultSupportedIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformDefaultSupportedIdpConfigImport(d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, meta interface{}) ([]*resource_identity_platform_default_supported_idp_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/defaultSupportedIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return nil, resource_identity_platform_default_supported_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_identity_platform_default_supported_idp_config_schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigName(v interface{}, d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigClientId(v interface{}, d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigClientSecret(v interface{}, d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformDefaultSupportedIdpConfigEnabled(v interface{}, d *resource_identity_platform_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformDefaultSupportedIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformDefaultSupportedIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformDefaultSupportedIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIdentityPlatformInboundSamlConfig() *resource_identity_platform_inbound_saml_config_schema.Resource { - return &resource_identity_platform_inbound_saml_config_schema.Resource{ - Create: resourceIdentityPlatformInboundSamlConfigCreate, - Read: resourceIdentityPlatformInboundSamlConfigRead, - Update: resourceIdentityPlatformInboundSamlConfigUpdate, - Delete: resourceIdentityPlatformInboundSamlConfigDelete, - - Importer: &resource_identity_platform_inbound_saml_config_schema.ResourceImporter{ - State: resourceIdentityPlatformInboundSamlConfigImport, - }, - - Timeouts: &resource_identity_platform_inbound_saml_config_schema.ResourceTimeout{ - Create: resource_identity_platform_inbound_saml_config_schema.DefaultTimeout(4 * resource_identity_platform_inbound_saml_config_time.Minute), - Update: resource_identity_platform_inbound_saml_config_schema.DefaultTimeout(4 * resource_identity_platform_inbound_saml_config_time.Minute), - Delete: resource_identity_platform_inbound_saml_config_schema.DefaultTimeout(4 * resource_identity_platform_inbound_saml_config_time.Minute), - }, - - Schema: map[string]*resource_identity_platform_inbound_saml_config_schema.Schema{ - "display_name": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Required: true, - Description: `Human friendly display name.`, - }, - "idp_config": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeList, - Required: true, - Description: `SAML IdP configuration when the project acts as the relying party`, - MaxItems: 1, - Elem: &resource_identity_platform_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_inbound_saml_config_schema.Schema{ - "idp_certificates": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeList, - Required: true, - Description: `The IdP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &resource_identity_platform_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_inbound_saml_config_schema.Schema{ - "x509_certificate": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Optional: true, - Description: `The IdP's x509 certificate.`, - }, - }, - }, - }, - "idp_entity_id": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Required: true, - Description: `Unique identifier for all SAML entities`, - }, - "sso_url": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Required: true, - Description: `URL to send Authentication request to.`, - }, - "sign_request": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeBool, - Optional: true, - Description: `Indicates if outbounding SAMLRequest should be signed.`, - }, - }, - }, - }, - "name": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters, -hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an -alphanumeric character, and have at least 2 characters.`, - }, - "sp_config": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeList, - Required: true, - Description: `SAML SP (Service Provider) configuration when the project acts as the relying party to receive -and accept an authentication assertion issued by a SAML identity provider.`, - MaxItems: 1, - Elem: &resource_identity_platform_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_inbound_saml_config_schema.Schema{ - "callback_uri": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Optional: true, - Description: `Callback URI where responses from IDP are handled. Must start with 'https://'.`, - }, - "sp_entity_id": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Optional: true, - Description: `Unique identifier for all SAML entities.`, - }, - "sp_certificates": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeList, - Computed: true, - Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &resource_identity_platform_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_inbound_saml_config_schema.Schema{ - "x509_certificate": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Computed: true, - Description: `The x509 certificate`, - }, - }, - }, - }, - }, - }, - }, - "enabled": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: resource_identity_platform_inbound_saml_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformInboundSamlConfigCreate(d *resource_identity_platform_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformInboundSamlConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(nameProp)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(displayNameProp)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(enabledProp)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(idpConfigProp)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(spConfigProp)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs?inboundSamlConfigId={{name}}") - if err != nil { - return err - } - - resource_identity_platform_inbound_saml_config_log.Printf("[DEBUG] Creating new InboundSamlConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_inbound_saml_config_schema.TimeoutCreate)) - if err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error creating InboundSamlConfig: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_identity_platform_inbound_saml_config_log.Printf("[DEBUG] Finished creating InboundSamlConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformInboundSamlConfigRead(d *resource_identity_platform_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_identity_platform_inbound_saml_config_fmt.Sprintf("IdentityPlatformInboundSamlConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformInboundSamlConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformInboundSamlConfigDisplayName(res["displayName"], d, config)); err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformInboundSamlConfigEnabled(res["enabled"], d, config)); err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("idp_config", flattenIdentityPlatformInboundSamlConfigIdpConfig(res["idpConfig"], d, config)); err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - if err := d.Set("sp_config", flattenIdentityPlatformInboundSamlConfigSpConfig(res["spConfig"], d, config)); err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error reading InboundSamlConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformInboundSamlConfigUpdate(d *resource_identity_platform_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(resource_identity_platform_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_inbound_saml_config_reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - resource_identity_platform_inbound_saml_config_log.Printf("[DEBUG] Updating InboundSamlConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("idp_config") { - updateMask = append(updateMask, "idpConfig") - } - - if d.HasChange("sp_config") { - updateMask = append(updateMask, "spConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_identity_platform_inbound_saml_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_inbound_saml_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error updating InboundSamlConfig %q: %s", d.Id(), err) - } else { - resource_identity_platform_inbound_saml_config_log.Printf("[DEBUG] Finished updating InboundSamlConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformInboundSamlConfigDelete(d *resource_identity_platform_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_inbound_saml_config_fmt.Errorf("Error fetching project for InboundSamlConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_identity_platform_inbound_saml_config_log.Printf("[DEBUG] Deleting InboundSamlConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_inbound_saml_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "InboundSamlConfig") - } - - resource_identity_platform_inbound_saml_config_log.Printf("[DEBUG] Finished deleting InboundSamlConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformInboundSamlConfigImport(d *resource_identity_platform_inbound_saml_config_schema.ResourceData, meta interface{}) ([]*resource_identity_platform_inbound_saml_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/inboundSamlConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/inboundSamlConfigs/{{name}}") - if err != nil { - return nil, resource_identity_platform_inbound_saml_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_identity_platform_inbound_saml_config_schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformInboundSamlConfigName(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformInboundSamlConfigDisplayName(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigEnabled(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfig(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["idp_entity_id"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(original["idpEntityId"], d, config) - transformed["sso_url"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(original["ssoUrl"], d, config) - transformed["sign_request"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigSignRequest(original["signRequest"], d, config) - transformed["idp_certificates"] = - flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(original["idpCertificates"], d, config) - return []interface{}{transformed} -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigSignRequest(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} - -func flattenIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigSpConfig(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["sp_entity_id"] = - flattenIdentityPlatformInboundSamlConfigSpConfigSpEntityId(original["spEntityId"], d, config) - transformed["callback_uri"] = - flattenIdentityPlatformInboundSamlConfigSpConfigCallbackUri(original["callbackUri"], d, config) - transformed["sp_certificates"] = - flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificates(original["spCertificates"], d, config) - return []interface{}{transformed} -} - -func flattenIdentityPlatformInboundSamlConfigSpConfigSpEntityId(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigSpConfigCallbackUri(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificates(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} - -func flattenIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d *resource_identity_platform_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformInboundSamlConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdpEntityId, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(original["idp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedIdpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["idpEntityId"] = transformedIdpEntityId - } - - transformedSsoUrl, err := expandIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(original["sso_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedSsoUrl); val.IsValid() && !isEmptyValue(val) { - transformed["ssoUrl"] = transformedSsoUrl - } - - transformedSignRequest, err := expandIdentityPlatformInboundSamlConfigIdpConfigSignRequest(original["sign_request"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedSignRequest); val.IsValid() && !isEmptyValue(val) { - transformed["signRequest"] = transformedSignRequest - } - - transformedIdpCertificates, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(original["idp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedIdpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["idpCertificates"] = transformedIdpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigSsoUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigSignRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSpEntityId, err := expandIdentityPlatformInboundSamlConfigSpConfigSpEntityId(original["sp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedSpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["spEntityId"] = transformedSpEntityId - } - - transformedCallbackUri, err := expandIdentityPlatformInboundSamlConfigSpConfigCallbackUri(original["callback_uri"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedCallbackUri); val.IsValid() && !isEmptyValue(val) { - transformed["callbackUri"] = transformedCallbackUri - } - - transformedSpCertificates, err := expandIdentityPlatformInboundSamlConfigSpConfigSpCertificates(original["sp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedSpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["spCertificates"] = transformedSpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigSpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigCallbackUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigSpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_inbound_saml_config_reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIdentityPlatformOauthIdpConfig() *resource_identity_platform_oauth_idp_config_schema.Resource { - return &resource_identity_platform_oauth_idp_config_schema.Resource{ - Create: resourceIdentityPlatformOauthIdpConfigCreate, - Read: resourceIdentityPlatformOauthIdpConfigRead, - Update: resourceIdentityPlatformOauthIdpConfigUpdate, - Delete: resourceIdentityPlatformOauthIdpConfigDelete, - - Importer: &resource_identity_platform_oauth_idp_config_schema.ResourceImporter{ - State: resourceIdentityPlatformOauthIdpConfigImport, - }, - - Timeouts: &resource_identity_platform_oauth_idp_config_schema.ResourceTimeout{ - Create: resource_identity_platform_oauth_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_oauth_idp_config_time.Minute), - Update: resource_identity_platform_oauth_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_oauth_idp_config_time.Minute), - Delete: resource_identity_platform_oauth_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_oauth_idp_config_time.Minute), - }, - - Schema: map[string]*resource_identity_platform_oauth_idp_config_schema.Schema{ - "client_id": { - Type: resource_identity_platform_oauth_idp_config_schema.TypeString, - Required: true, - Description: `The client id of an OAuth client.`, - }, - "issuer": { - Type: resource_identity_platform_oauth_idp_config_schema.TypeString, - Required: true, - Description: `For OIDC Idps, the issuer identifier.`, - }, - "name": { - Type: resource_identity_platform_oauth_idp_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the OauthIdpConfig. Must start with 'oidc.'.`, - }, - "client_secret": { - Type: resource_identity_platform_oauth_idp_config_schema.TypeString, - Optional: true, - Description: `The client secret of the OAuth client, to enable OIDC code flow.`, - }, - "display_name": { - Type: resource_identity_platform_oauth_idp_config_schema.TypeString, - Optional: true, - Description: `Human friendly display name.`, - }, - "enabled": { - Type: resource_identity_platform_oauth_idp_config_schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: resource_identity_platform_oauth_idp_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformOauthIdpConfigCreate(d *resource_identity_platform_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformOauthIdpConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(nameProp)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(displayNameProp)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(enabledProp)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(issuerProp)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(clientIdProp)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(clientSecretProp)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs?oauthIdpConfigId={{name}}") - if err != nil { - return err - } - - resource_identity_platform_oauth_idp_config_log.Printf("[DEBUG] Creating new OauthIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_oauth_idp_config_schema.TimeoutCreate)) - if err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error creating OauthIdpConfig: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_identity_platform_oauth_idp_config_log.Printf("[DEBUG] Finished creating OauthIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformOauthIdpConfigRead(d *resource_identity_platform_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_identity_platform_oauth_idp_config_fmt.Sprintf("IdentityPlatformOauthIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformOauthIdpConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformOauthIdpConfigDisplayName(res["displayName"], d, config)); err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformOauthIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("issuer", flattenIdentityPlatformOauthIdpConfigIssuer(res["issuer"], d, config)); err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformOauthIdpConfigClientId(res["clientId"], d, config)); err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformOauthIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error reading OauthIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformOauthIdpConfigUpdate(d *resource_identity_platform_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_oauth_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - resource_identity_platform_oauth_idp_config_log.Printf("[DEBUG] Updating OauthIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("issuer") { - updateMask = append(updateMask, "issuer") - } - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_identity_platform_oauth_idp_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_oauth_idp_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error updating OauthIdpConfig %q: %s", d.Id(), err) - } else { - resource_identity_platform_oauth_idp_config_log.Printf("[DEBUG] Finished updating OauthIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformOauthIdpConfigDelete(d *resource_identity_platform_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_oauth_idp_config_fmt.Errorf("Error fetching project for OauthIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_identity_platform_oauth_idp_config_log.Printf("[DEBUG] Deleting OauthIdpConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_oauth_idp_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "OauthIdpConfig") - } - - resource_identity_platform_oauth_idp_config_log.Printf("[DEBUG] Finished deleting OauthIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformOauthIdpConfigImport(d *resource_identity_platform_oauth_idp_config_schema.ResourceData, meta interface{}) ([]*resource_identity_platform_oauth_idp_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/oauthIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/oauthIdpConfigs/{{name}}") - if err != nil { - return nil, resource_identity_platform_oauth_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_identity_platform_oauth_idp_config_schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformOauthIdpConfigName(v interface{}, d *resource_identity_platform_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformOauthIdpConfigDisplayName(v interface{}, d *resource_identity_platform_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigEnabled(v interface{}, d *resource_identity_platform_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigIssuer(v interface{}, d *resource_identity_platform_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigClientId(v interface{}, d *resource_identity_platform_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformOauthIdpConfigClientSecret(v interface{}, d *resource_identity_platform_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformOauthIdpConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigIssuer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformOauthIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIdentityPlatformTenant() *resource_identity_platform_tenant_schema.Resource { - return &resource_identity_platform_tenant_schema.Resource{ - Create: resourceIdentityPlatformTenantCreate, - Read: resourceIdentityPlatformTenantRead, - Update: resourceIdentityPlatformTenantUpdate, - Delete: resourceIdentityPlatformTenantDelete, - - Importer: &resource_identity_platform_tenant_schema.ResourceImporter{ - State: resourceIdentityPlatformTenantImport, - }, - - Timeouts: &resource_identity_platform_tenant_schema.ResourceTimeout{ - Create: resource_identity_platform_tenant_schema.DefaultTimeout(4 * resource_identity_platform_tenant_time.Minute), - Update: resource_identity_platform_tenant_schema.DefaultTimeout(4 * resource_identity_platform_tenant_time.Minute), - Delete: resource_identity_platform_tenant_schema.DefaultTimeout(4 * resource_identity_platform_tenant_time.Minute), - }, - - Schema: map[string]*resource_identity_platform_tenant_schema.Schema{ - "display_name": { - Type: resource_identity_platform_tenant_schema.TypeString, - Required: true, - Description: `Human friendly display name of the tenant.`, - }, - "allow_password_signup": { - Type: resource_identity_platform_tenant_schema.TypeBool, - Optional: true, - Description: `Whether to allow email/password user authentication.`, - }, - "disable_auth": { - Type: resource_identity_platform_tenant_schema.TypeBool, - Optional: true, - Description: `Whether authentication is disabled for the tenant. If true, the users under -the disabled tenant are not allowed to sign-in. Admins of the disabled tenant -are not able to manage its users.`, - }, - "enable_email_link_signin": { - Type: resource_identity_platform_tenant_schema.TypeBool, - Optional: true, - Description: `Whether to enable email link user authentication.`, - }, - "name": { - Type: resource_identity_platform_tenant_schema.TypeString, - Computed: true, - Description: `The name of the tenant that is generated by the server`, - }, - "project": { - Type: resource_identity_platform_tenant_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantCreate(d *resource_identity_platform_tenant_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(displayNameProp)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - allowPasswordSignupProp, err := expandIdentityPlatformTenantAllowPasswordSignup(d.Get("allow_password_signup"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow_password_signup"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(allowPasswordSignupProp)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, allowPasswordSignupProp)) { - obj["allowPasswordSignup"] = allowPasswordSignupProp - } - enableEmailLinkSigninProp, err := expandIdentityPlatformTenantEnableEmailLinkSignin(d.Get("enable_email_link_signin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_email_link_signin"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(enableEmailLinkSigninProp)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, enableEmailLinkSigninProp)) { - obj["enableEmailLinkSignin"] = enableEmailLinkSigninProp - } - disableAuthProp, err := expandIdentityPlatformTenantDisableAuth(d.Get("disable_auth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_auth"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(disableAuthProp)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, disableAuthProp)) { - obj["disableAuth"] = disableAuthProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants") - if err != nil { - return err - } - - resource_identity_platform_tenant_log.Printf("[DEBUG] Creating new Tenant: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_schema.TimeoutCreate)) - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error creating Tenant: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformTenantName(res["name"], d, config)); err != nil { - return resource_identity_platform_tenant_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{name}}") - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - return resource_identity_platform_tenant_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - if err := d.Set("name", GetResourceNameFromSelfLink(name.(string))); err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error setting name: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/tenants/{{name}}") - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_identity_platform_tenant_log.Printf("[DEBUG] Finished creating Tenant %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantRead(d, meta) -} - -func resourceIdentityPlatformTenantRead(d *resource_identity_platform_tenant_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_identity_platform_tenant_fmt.Sprintf("IdentityPlatformTenant %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error reading Tenant: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantName(res["name"], d, config)); err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformTenantDisplayName(res["displayName"], d, config)); err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("allow_password_signup", flattenIdentityPlatformTenantAllowPasswordSignup(res["allowPasswordSignup"], d, config)); err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("enable_email_link_signin", flattenIdentityPlatformTenantEnableEmailLinkSignin(res["enableEmailLinkSignin"], d, config)); err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error reading Tenant: %s", err) - } - if err := d.Set("disable_auth", flattenIdentityPlatformTenantDisableAuth(res["disableAuth"], d, config)); err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error reading Tenant: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantUpdate(d *resource_identity_platform_tenant_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - allowPasswordSignupProp, err := expandIdentityPlatformTenantAllowPasswordSignup(d.Get("allow_password_signup"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("allow_password_signup"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, allowPasswordSignupProp)) { - obj["allowPasswordSignup"] = allowPasswordSignupProp - } - enableEmailLinkSigninProp, err := expandIdentityPlatformTenantEnableEmailLinkSignin(d.Get("enable_email_link_signin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_email_link_signin"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, enableEmailLinkSigninProp)) { - obj["enableEmailLinkSignin"] = enableEmailLinkSigninProp - } - disableAuthProp, err := expandIdentityPlatformTenantDisableAuth(d.Get("disable_auth"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_auth"); !isEmptyValue(resource_identity_platform_tenant_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_reflect.DeepEqual(v, disableAuthProp)) { - obj["disableAuth"] = disableAuthProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") - if err != nil { - return err - } - - resource_identity_platform_tenant_log.Printf("[DEBUG] Updating Tenant %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("allow_password_signup") { - updateMask = append(updateMask, "allowPasswordSignup") - } - - if d.HasChange("enable_email_link_signin") { - updateMask = append(updateMask, "enableEmailLinkSignin") - } - - if d.HasChange("disable_auth") { - updateMask = append(updateMask, "disableAuth") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_identity_platform_tenant_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_schema.TimeoutUpdate)) - - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error updating Tenant %q: %s", d.Id(), err) - } else { - resource_identity_platform_tenant_log.Printf("[DEBUG] Finished updating Tenant %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantRead(d, meta) -} - -func resourceIdentityPlatformTenantDelete(d *resource_identity_platform_tenant_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_fmt.Errorf("Error fetching project for Tenant: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_identity_platform_tenant_log.Printf("[DEBUG] Deleting Tenant %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Tenant") - } - - resource_identity_platform_tenant_log.Printf("[DEBUG] Finished deleting Tenant %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantImport(d *resource_identity_platform_tenant_schema.ResourceData, meta interface{}) ([]*resource_identity_platform_tenant_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{name}}") - if err != nil { - return nil, resource_identity_platform_tenant_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_identity_platform_tenant_schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantName(v interface{}, d *resource_identity_platform_tenant_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformTenantDisplayName(v interface{}, d *resource_identity_platform_tenant_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantAllowPasswordSignup(v interface{}, d *resource_identity_platform_tenant_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantEnableEmailLinkSignin(v interface{}, d *resource_identity_platform_tenant_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDisableAuth(v interface{}, d *resource_identity_platform_tenant_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantAllowPasswordSignup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantEnableEmailLinkSignin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantDisableAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfig() *resource_identity_platform_tenant_default_supported_idp_config_schema.Resource { - return &resource_identity_platform_tenant_default_supported_idp_config_schema.Resource{ - Create: resourceIdentityPlatformTenantDefaultSupportedIdpConfigCreate, - Read: resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead, - Update: resourceIdentityPlatformTenantDefaultSupportedIdpConfigUpdate, - Delete: resourceIdentityPlatformTenantDefaultSupportedIdpConfigDelete, - - Importer: &resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceImporter{ - State: resourceIdentityPlatformTenantDefaultSupportedIdpConfigImport, - }, - - Timeouts: &resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceTimeout{ - Create: resource_identity_platform_tenant_default_supported_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_default_supported_idp_config_time.Minute), - Update: resource_identity_platform_tenant_default_supported_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_default_supported_idp_config_time.Minute), - Delete: resource_identity_platform_tenant_default_supported_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_default_supported_idp_config_time.Minute), - }, - - Schema: map[string]*resource_identity_platform_tenant_default_supported_idp_config_schema.Schema{ - "client_id": { - Type: resource_identity_platform_tenant_default_supported_idp_config_schema.TypeString, - Required: true, - Description: `OAuth client ID`, - }, - "client_secret": { - Type: resource_identity_platform_tenant_default_supported_idp_config_schema.TypeString, - Required: true, - Description: `OAuth client secret`, - }, - "idp_id": { - Type: resource_identity_platform_tenant_default_supported_idp_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the IDP. Possible values include: - -* 'apple.com' - -* 'facebook.com' - -* 'gc.apple.com' - -* 'github.com' - -* 'google.com' - -* 'linkedin.com' - -* 'microsoft.com' - -* 'playgames.google.com' - -* 'twitter.com' - -* 'yahoo.com'`, - }, - "tenant": { - Type: resource_identity_platform_tenant_default_supported_idp_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the tenant where this DefaultSupportedIdpConfig resource exists`, - }, - "enabled": { - Type: resource_identity_platform_tenant_default_supported_idp_config_schema.TypeBool, - Optional: true, - Description: `If this IDP allows the user to sign in`, - }, - "name": { - Type: resource_identity_platform_tenant_default_supported_idp_config_schema.TypeString, - Computed: true, - Description: `The name of the default supported IDP config resource`, - }, - "project": { - Type: resource_identity_platform_tenant_default_supported_idp_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigCreate(d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_tenant_default_supported_idp_config_reflect.ValueOf(clientIdProp)) && (ok || !resource_identity_platform_tenant_default_supported_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_tenant_default_supported_idp_config_reflect.ValueOf(clientSecretProp)) && (ok || !resource_identity_platform_tenant_default_supported_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_tenant_default_supported_idp_config_reflect.ValueOf(enabledProp)) && (ok || !resource_identity_platform_tenant_default_supported_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs?idpId={{idp_id}}") - if err != nil { - return err - } - - resource_identity_platform_tenant_default_supported_idp_config_log.Printf("[DEBUG] Creating new TenantDefaultSupportedIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_default_supported_idp_config_schema.TimeoutCreate)) - if err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error creating TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("name", flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_identity_platform_tenant_default_supported_idp_config_log.Printf("[DEBUG] Finished creating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_identity_platform_tenant_default_supported_idp_config_fmt.Sprintf("IdentityPlatformTenantDefaultSupportedIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientId(res["clientId"], d, config)); err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error reading TenantDefaultSupportedIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigUpdate(d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - clientIdProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_tenant_default_supported_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_default_supported_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_tenant_default_supported_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_default_supported_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - enabledProp, err := expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_tenant_default_supported_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_default_supported_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - resource_identity_platform_tenant_default_supported_idp_config_log.Printf("[DEBUG] Updating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_identity_platform_tenant_default_supported_idp_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_default_supported_idp_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error updating TenantDefaultSupportedIdpConfig %q: %s", d.Id(), err) - } else { - resource_identity_platform_tenant_default_supported_idp_config_log.Printf("[DEBUG] Finished updating TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantDefaultSupportedIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigDelete(d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error fetching project for TenantDefaultSupportedIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_identity_platform_tenant_default_supported_idp_config_log.Printf("[DEBUG] Deleting TenantDefaultSupportedIdpConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_default_supported_idp_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TenantDefaultSupportedIdpConfig") - } - - resource_identity_platform_tenant_default_supported_idp_config_log.Printf("[DEBUG] Finished deleting TenantDefaultSupportedIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantDefaultSupportedIdpConfigImport(d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, meta interface{}) ([]*resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)/defaultSupportedIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/defaultSupportedIdpConfigs/{{idp_id}}") - if err != nil { - return nil, resource_identity_platform_tenant_default_supported_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigName(v interface{}, d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientId(v interface{}, d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(v interface{}, d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(v interface{}, d *resource_identity_platform_tenant_default_supported_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantDefaultSupportedIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantDefaultSupportedIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantDefaultSupportedIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIdentityPlatformTenantInboundSamlConfig() *resource_identity_platform_tenant_inbound_saml_config_schema.Resource { - return &resource_identity_platform_tenant_inbound_saml_config_schema.Resource{ - Create: resourceIdentityPlatformTenantInboundSamlConfigCreate, - Read: resourceIdentityPlatformTenantInboundSamlConfigRead, - Update: resourceIdentityPlatformTenantInboundSamlConfigUpdate, - Delete: resourceIdentityPlatformTenantInboundSamlConfigDelete, - - Importer: &resource_identity_platform_tenant_inbound_saml_config_schema.ResourceImporter{ - State: resourceIdentityPlatformTenantInboundSamlConfigImport, - }, - - Timeouts: &resource_identity_platform_tenant_inbound_saml_config_schema.ResourceTimeout{ - Create: resource_identity_platform_tenant_inbound_saml_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_inbound_saml_config_time.Minute), - Update: resource_identity_platform_tenant_inbound_saml_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_inbound_saml_config_time.Minute), - Delete: resource_identity_platform_tenant_inbound_saml_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_inbound_saml_config_time.Minute), - }, - - Schema: map[string]*resource_identity_platform_tenant_inbound_saml_config_schema.Schema{ - "display_name": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Required: true, - Description: `Human friendly display name.`, - }, - "idp_config": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeList, - Required: true, - Description: `SAML IdP configuration when the project acts as the relying party`, - MaxItems: 1, - Elem: &resource_identity_platform_tenant_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_tenant_inbound_saml_config_schema.Schema{ - "idp_certificates": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeList, - Required: true, - Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &resource_identity_platform_tenant_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_tenant_inbound_saml_config_schema.Schema{ - "x509_certificate": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Optional: true, - Description: `The x509 certificate`, - }, - }, - }, - }, - "idp_entity_id": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Required: true, - Description: `Unique identifier for all SAML entities`, - }, - "sso_url": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Required: true, - Description: `URL to send Authentication request to.`, - }, - "sign_request": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeBool, - Optional: true, - Description: `Indicates if outbounding SAMLRequest should be signed.`, - }, - }, - }, - }, - "name": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the InboundSamlConfig resource. Must start with 'saml.' and can only have alphanumeric characters, -hyphens, underscores or periods. The part after 'saml.' must also start with a lowercase letter, end with an -alphanumeric character, and have at least 2 characters.`, - }, - "sp_config": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeList, - Required: true, - Description: `SAML SP (Service Provider) configuration when the project acts as the relying party to receive -and accept an authentication assertion issued by a SAML identity provider.`, - MaxItems: 1, - Elem: &resource_identity_platform_tenant_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_tenant_inbound_saml_config_schema.Schema{ - "callback_uri": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Required: true, - Description: `Callback URI where responses from IDP are handled. Must start with 'https://'.`, - }, - "sp_entity_id": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Required: true, - Description: `Unique identifier for all SAML entities.`, - }, - "sp_certificates": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeList, - Computed: true, - Description: `The IDP's certificate data to verify the signature in the SAMLResponse issued by the IDP.`, - Elem: &resource_identity_platform_tenant_inbound_saml_config_schema.Resource{ - Schema: map[string]*resource_identity_platform_tenant_inbound_saml_config_schema.Schema{ - "x509_certificate": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Computed: true, - Description: `The x509 certificate`, - }, - }, - }, - }, - }, - }, - }, - "tenant": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the tenant where this inbound SAML config resource exists`, - }, - "enabled": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: resource_identity_platform_tenant_inbound_saml_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantInboundSamlConfigCreate(d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformTenantInboundSamlConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(nameProp)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformTenantInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(displayNameProp)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(enabledProp)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(idpConfigProp)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(spConfigProp)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs?inboundSamlConfigId={{name}}") - if err != nil { - return err - } - - resource_identity_platform_tenant_inbound_saml_config_log.Printf("[DEBUG] Creating new TenantInboundSamlConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_inbound_saml_config_schema.TimeoutCreate)) - if err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error creating TenantInboundSamlConfig: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_identity_platform_tenant_inbound_saml_config_log.Printf("[DEBUG] Finished creating TenantInboundSamlConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantInboundSamlConfigRead(d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_identity_platform_tenant_inbound_saml_config_fmt.Sprintf("IdentityPlatformTenantInboundSamlConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantInboundSamlConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformTenantInboundSamlConfigDisplayName(res["displayName"], d, config)); err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformTenantInboundSamlConfigEnabled(res["enabled"], d, config)); err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("idp_config", flattenIdentityPlatformTenantInboundSamlConfigIdpConfig(res["idpConfig"], d, config)); err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - if err := d.Set("sp_config", flattenIdentityPlatformTenantInboundSamlConfigSpConfig(res["spConfig"], d, config)); err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error reading TenantInboundSamlConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantInboundSamlConfigUpdate(d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantInboundSamlConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantInboundSamlConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - idpConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfig(d.Get("idp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("idp_config"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, idpConfigProp)) { - obj["idpConfig"] = idpConfigProp - } - spConfigProp, err := expandIdentityPlatformTenantInboundSamlConfigSpConfig(d.Get("sp_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("sp_config"); !isEmptyValue(resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_inbound_saml_config_reflect.DeepEqual(v, spConfigProp)) { - obj["spConfig"] = spConfigProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - resource_identity_platform_tenant_inbound_saml_config_log.Printf("[DEBUG] Updating TenantInboundSamlConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("idp_config") { - updateMask = append(updateMask, "idpConfig") - } - - if d.HasChange("sp_config") { - updateMask = append(updateMask, "spConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_identity_platform_tenant_inbound_saml_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_inbound_saml_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error updating TenantInboundSamlConfig %q: %s", d.Id(), err) - } else { - resource_identity_platform_tenant_inbound_saml_config_log.Printf("[DEBUG] Finished updating TenantInboundSamlConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantInboundSamlConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantInboundSamlConfigDelete(d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error fetching project for TenantInboundSamlConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_identity_platform_tenant_inbound_saml_config_log.Printf("[DEBUG] Deleting TenantInboundSamlConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_inbound_saml_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TenantInboundSamlConfig") - } - - resource_identity_platform_tenant_inbound_saml_config_log.Printf("[DEBUG] Finished deleting TenantInboundSamlConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantInboundSamlConfigImport(d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, meta interface{}) ([]*resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)/inboundSamlConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/inboundSamlConfigs/{{name}}") - if err != nil { - return nil, resource_identity_platform_tenant_inbound_saml_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantInboundSamlConfigName(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformTenantInboundSamlConfigDisplayName(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigEnabled(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfig(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["idp_entity_id"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(original["idpEntityId"], d, config) - transformed["sso_url"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(original["ssoUrl"], d, config) - transformed["sign_request"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(original["signRequest"], d, config) - transformed["idp_certificates"] = - flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(original["idpCertificates"], d, config) - return []interface{}{transformed} -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} - -func flattenIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfig(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["sp_entity_id"] = - flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(original["spEntityId"], d, config) - transformed["callback_uri"] = - flattenIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(original["callbackUri"], d, config) - transformed["sp_certificates"] = - flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(original["spCertificates"], d, config) - return []interface{}{transformed} -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "x509_certificate": flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509Certificate"], d, config), - }) - } - return transformed -} - -func flattenIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d *resource_identity_platform_tenant_inbound_saml_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantInboundSamlConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIdpEntityId, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(original["idp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedIdpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["idpEntityId"] = transformedIdpEntityId - } - - transformedSsoUrl, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(original["sso_url"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedSsoUrl); val.IsValid() && !isEmptyValue(val) { - transformed["ssoUrl"] = transformedSsoUrl - } - - transformedSignRequest, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(original["sign_request"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedSignRequest); val.IsValid() && !isEmptyValue(val) { - transformed["signRequest"] = transformedSignRequest - } - - transformedIdpCertificates, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(original["idp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedIdpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["idpCertificates"] = transformedIdpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigSsoUrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigSignRequest(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigIdpConfigIdpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSpEntityId, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(original["sp_entity_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedSpEntityId); val.IsValid() && !isEmptyValue(val) { - transformed["spEntityId"] = transformedSpEntityId - } - - transformedCallbackUri, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(original["callback_uri"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedCallbackUri); val.IsValid() && !isEmptyValue(val) { - transformed["callbackUri"] = transformedCallbackUri - } - - transformedSpCertificates, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(original["sp_certificates"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedSpCertificates); val.IsValid() && !isEmptyValue(val) { - transformed["spCertificates"] = transformedSpCertificates - } - - return transformed, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpEntityId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigCallbackUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Certificate, err := expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(original["x509_certificate"], d, config) - if err != nil { - return nil, err - } else if val := resource_identity_platform_tenant_inbound_saml_config_reflect.ValueOf(transformedX509Certificate); val.IsValid() && !isEmptyValue(val) { - transformed["x509Certificate"] = transformedX509Certificate - } - - req = append(req, transformed) - } - return req, nil -} - -func expandIdentityPlatformTenantInboundSamlConfigSpConfigSpCertificatesX509Certificate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceIdentityPlatformTenantOauthIdpConfig() *resource_identity_platform_tenant_oauth_idp_config_schema.Resource { - return &resource_identity_platform_tenant_oauth_idp_config_schema.Resource{ - Create: resourceIdentityPlatformTenantOauthIdpConfigCreate, - Read: resourceIdentityPlatformTenantOauthIdpConfigRead, - Update: resourceIdentityPlatformTenantOauthIdpConfigUpdate, - Delete: resourceIdentityPlatformTenantOauthIdpConfigDelete, - - Importer: &resource_identity_platform_tenant_oauth_idp_config_schema.ResourceImporter{ - State: resourceIdentityPlatformTenantOauthIdpConfigImport, - }, - - Timeouts: &resource_identity_platform_tenant_oauth_idp_config_schema.ResourceTimeout{ - Create: resource_identity_platform_tenant_oauth_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_oauth_idp_config_time.Minute), - Update: resource_identity_platform_tenant_oauth_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_oauth_idp_config_time.Minute), - Delete: resource_identity_platform_tenant_oauth_idp_config_schema.DefaultTimeout(4 * resource_identity_platform_tenant_oauth_idp_config_time.Minute), - }, - - Schema: map[string]*resource_identity_platform_tenant_oauth_idp_config_schema.Schema{ - "client_id": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeString, - Required: true, - Description: `The client id of an OAuth client.`, - }, - "display_name": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeString, - Required: true, - Description: `Human friendly display name.`, - }, - "issuer": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeString, - Required: true, - Description: `For OIDC Idps, the issuer identifier.`, - }, - "name": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the OauthIdpConfig. Must start with 'oidc.'.`, - }, - "tenant": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the tenant where this OIDC IDP configuration resource exists`, - }, - "client_secret": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeString, - Optional: true, - Description: `The client secret of the OAuth client, to enable OIDC code flow.`, - }, - "enabled": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeBool, - Optional: true, - Description: `If this config allows users to sign in with the provider.`, - }, - "project": { - Type: resource_identity_platform_tenant_oauth_idp_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceIdentityPlatformTenantOauthIdpConfigCreate(d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandIdentityPlatformTenantOauthIdpConfigName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(nameProp)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - displayNameProp, err := expandIdentityPlatformTenantOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(displayNameProp)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(enabledProp)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformTenantOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(issuerProp)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformTenantOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(clientIdProp)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(clientSecretProp)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs?oauthIdpConfigId={{name}}") - if err != nil { - return err - } - - resource_identity_platform_tenant_oauth_idp_config_log.Printf("[DEBUG] Creating new TenantOauthIdpConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_oauth_idp_config_schema.TimeoutCreate)) - if err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error creating TenantOauthIdpConfig: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_identity_platform_tenant_oauth_idp_config_log.Printf("[DEBUG] Finished creating TenantOauthIdpConfig %q: %#v", d.Id(), res) - - return resourceIdentityPlatformTenantOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantOauthIdpConfigRead(d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_identity_platform_tenant_oauth_idp_config_fmt.Sprintf("IdentityPlatformTenantOauthIdpConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - - if err := d.Set("name", flattenIdentityPlatformTenantOauthIdpConfigName(res["name"], d, config)); err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("display_name", flattenIdentityPlatformTenantOauthIdpConfigDisplayName(res["displayName"], d, config)); err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("enabled", flattenIdentityPlatformTenantOauthIdpConfigEnabled(res["enabled"], d, config)); err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("issuer", flattenIdentityPlatformTenantOauthIdpConfigIssuer(res["issuer"], d, config)); err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("client_id", flattenIdentityPlatformTenantOauthIdpConfigClientId(res["clientId"], d, config)); err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - if err := d.Set("client_secret", flattenIdentityPlatformTenantOauthIdpConfigClientSecret(res["clientSecret"], d, config)); err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error reading TenantOauthIdpConfig: %s", err) - } - - return nil -} - -func resourceIdentityPlatformTenantOauthIdpConfigUpdate(d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandIdentityPlatformTenantOauthIdpConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandIdentityPlatformTenantOauthIdpConfigEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, enabledProp)) { - obj["enabled"] = enabledProp - } - issuerProp, err := expandIdentityPlatformTenantOauthIdpConfigIssuer(d.Get("issuer"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuer"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, issuerProp)) { - obj["issuer"] = issuerProp - } - clientIdProp, err := expandIdentityPlatformTenantOauthIdpConfigClientId(d.Get("client_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_id"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, clientIdProp)) { - obj["clientId"] = clientIdProp - } - clientSecretProp, err := expandIdentityPlatformTenantOauthIdpConfigClientSecret(d.Get("client_secret"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("client_secret"); !isEmptyValue(resource_identity_platform_tenant_oauth_idp_config_reflect.ValueOf(v)) && (ok || !resource_identity_platform_tenant_oauth_idp_config_reflect.DeepEqual(v, clientSecretProp)) { - obj["clientSecret"] = clientSecretProp - } - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - resource_identity_platform_tenant_oauth_idp_config_log.Printf("[DEBUG] Updating TenantOauthIdpConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("issuer") { - updateMask = append(updateMask, "issuer") - } - - if d.HasChange("client_id") { - updateMask = append(updateMask, "clientId") - } - - if d.HasChange("client_secret") { - updateMask = append(updateMask, "clientSecret") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_identity_platform_tenant_oauth_idp_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_oauth_idp_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error updating TenantOauthIdpConfig %q: %s", d.Id(), err) - } else { - resource_identity_platform_tenant_oauth_idp_config_log.Printf("[DEBUG] Finished updating TenantOauthIdpConfig %q: %#v", d.Id(), res) - } - - return resourceIdentityPlatformTenantOauthIdpConfigRead(d, meta) -} - -func resourceIdentityPlatformTenantOauthIdpConfigDelete(d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error fetching project for TenantOauthIdpConfig: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{IdentityPlatformBasePath}}projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_identity_platform_tenant_oauth_idp_config_log.Printf("[DEBUG] Deleting TenantOauthIdpConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_identity_platform_tenant_oauth_idp_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TenantOauthIdpConfig") - } - - resource_identity_platform_tenant_oauth_idp_config_log.Printf("[DEBUG] Finished deleting TenantOauthIdpConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceIdentityPlatformTenantOauthIdpConfigImport(d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, meta interface{}) ([]*resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/tenants/(?P[^/]+)/oauthIdpConfigs/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/tenants/{{tenant}}/oauthIdpConfigs/{{name}}") - if err != nil { - return nil, resource_identity_platform_tenant_oauth_idp_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData{d}, nil -} - -func flattenIdentityPlatformTenantOauthIdpConfigName(v interface{}, d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenIdentityPlatformTenantOauthIdpConfigDisplayName(v interface{}, d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigEnabled(v interface{}, d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigIssuer(v interface{}, d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigClientId(v interface{}, d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenIdentityPlatformTenantOauthIdpConfigClientSecret(v interface{}, d *resource_identity_platform_tenant_oauth_idp_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandIdentityPlatformTenantOauthIdpConfigName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigIssuer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigClientId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandIdentityPlatformTenantOauthIdpConfigClientSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceKMSCryptoKey() *resource_kms_crypto_key_schema.Resource { - return &resource_kms_crypto_key_schema.Resource{ - Create: resourceKMSCryptoKeyCreate, - Read: resourceKMSCryptoKeyRead, - Update: resourceKMSCryptoKeyUpdate, - Delete: resourceKMSCryptoKeyDelete, - - Importer: &resource_kms_crypto_key_schema.ResourceImporter{ - State: resourceKMSCryptoKeyImport, - }, - - Timeouts: &resource_kms_crypto_key_schema.ResourceTimeout{ - Create: resource_kms_crypto_key_schema.DefaultTimeout(4 * resource_kms_crypto_key_time.Minute), - Update: resource_kms_crypto_key_schema.DefaultTimeout(4 * resource_kms_crypto_key_time.Minute), - Delete: resource_kms_crypto_key_schema.DefaultTimeout(4 * resource_kms_crypto_key_time.Minute), - }, - - SchemaVersion: 1, - StateUpgraders: []resource_kms_crypto_key_schema.StateUpgrader{ - { - Type: resourceKMSCryptoKeyResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceKMSCryptoKeyUpgradeV0, - Version: 0, - }, - }, - - Schema: map[string]*resource_kms_crypto_key_schema.Schema{ - "key_ring": { - Type: resource_kms_crypto_key_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: kmsCryptoKeyRingsEquivalent, - Description: `The KeyRing that this key belongs to. -Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''.`, - }, - "name": { - Type: resource_kms_crypto_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the CryptoKey.`, - }, - "destroy_scheduled_duration": { - Type: resource_kms_crypto_key_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED. -If not specified at creation time, the default duration is 24 hours.`, - }, - "import_only": { - Type: resource_kms_crypto_key_schema.TypeBool, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Whether this key may contain imported versions only.`, - }, - "labels": { - Type: resource_kms_crypto_key_schema.TypeMap, - Optional: true, - Description: `Labels with user-defined metadata to apply to this resource.`, - Elem: &resource_kms_crypto_key_schema.Schema{Type: resource_kms_crypto_key_schema.TypeString}, - }, - "purpose": { - Type: resource_kms_crypto_key_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_kms_crypto_key_validation.StringInSlice([]string{"ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT", ""}, false), - Description: `The immutable purpose of this CryptoKey. See the -[purpose reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys#CryptoKeyPurpose) -for possible inputs. Default value: "ENCRYPT_DECRYPT" Possible values: ["ENCRYPT_DECRYPT", "ASYMMETRIC_SIGN", "ASYMMETRIC_DECRYPT"]`, - Default: "ENCRYPT_DECRYPT", - }, - "rotation_period": { - Type: resource_kms_crypto_key_schema.TypeString, - Optional: true, - ValidateFunc: orEmpty(validateKmsCryptoKeyRotationPeriod), - Description: `Every time this period passes, generate a new CryptoKeyVersion and set it as the primary. -The first rotation will take place after the specified period. The rotation period has -the format of a decimal number with up to 9 fractional digits, followed by the -letter 's' (seconds). It must be greater than a day (ie, 86400).`, - }, - "skip_initial_version_creation": { - Type: resource_kms_crypto_key_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If set to true, the request will create a CryptoKey without any CryptoKeyVersions. -You must use the 'google_kms_key_ring_import_job' resource to import the CryptoKeyVersion.`, - }, - "version_template": { - Type: resource_kms_crypto_key_schema.TypeList, - Computed: true, - Optional: true, - Description: `A template describing settings for new crypto key versions.`, - MaxItems: 1, - Elem: &resource_kms_crypto_key_schema.Resource{ - Schema: map[string]*resource_kms_crypto_key_schema.Schema{ - "algorithm": { - Type: resource_kms_crypto_key_schema.TypeString, - Required: true, - Description: `The algorithm to use when creating a version based on this template. -See the [algorithm reference](https://cloud.google.com/kms/docs/reference/rest/v1/CryptoKeyVersionAlgorithm) for possible inputs.`, - }, - "protection_level": { - Type: resource_kms_crypto_key_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The protection level to use when creating a version based on this template. Possible values include "SOFTWARE", "HSM", "EXTERNAL". Defaults to "SOFTWARE".`, - Default: "SOFTWARE", - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSCryptoKeyCreate(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandKMSCryptoKeyLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(labelsProp)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - purposeProp, err := expandKMSCryptoKeyPurpose(d.Get("purpose"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("purpose"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(purposeProp)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, purposeProp)) { - obj["purpose"] = purposeProp - } - rotationPeriodProp, err := expandKMSCryptoKeyRotationPeriod(d.Get("rotation_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation_period"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(rotationPeriodProp)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, rotationPeriodProp)) { - obj["rotationPeriod"] = rotationPeriodProp - } - versionTemplateProp, err := expandKMSCryptoKeyVersionTemplate(d.Get("version_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_template"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(versionTemplateProp)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, versionTemplateProp)) { - obj["versionTemplate"] = versionTemplateProp - } - destroyScheduledDurationProp, err := expandKMSCryptoKeyDestroyScheduledDuration(d.Get("destroy_scheduled_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destroy_scheduled_duration"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(destroyScheduledDurationProp)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, destroyScheduledDurationProp)) { - obj["destroyScheduledDuration"] = destroyScheduledDurationProp - } - importOnlyProp, err := expandKMSCryptoKeyImportOnly(d.Get("import_only"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_only"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(importOnlyProp)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, importOnlyProp)) { - obj["importOnly"] = importOnlyProp - } - - obj, err = resourceKMSCryptoKeyEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys?cryptoKeyId={{name}}&skipInitialVersionCreation={{skip_initial_version_creation}}") - if err != nil { - return err - } - - resource_kms_crypto_key_log.Printf("[DEBUG] Creating new CryptoKey: %#v", obj) - billingProject := "" - - if parts := resource_kms_crypto_key_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_kms_crypto_key_schema.TimeoutCreate)) - if err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error creating CryptoKey: %s", err) - } - - id, err := replaceVars(d, config, "{{key_ring}}/cryptoKeys/{{name}}") - if err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_kms_crypto_key_log.Printf("[DEBUG] Finished creating CryptoKey %q: %#v", d.Id(), res) - - return resourceKMSCryptoKeyRead(d, meta) -} - -func resourceKMSCryptoKeyRead(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := resource_kms_crypto_key_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_kms_crypto_key_fmt.Sprintf("KMSCryptoKey %q", d.Id())) - } - - res, err = resourceKMSCryptoKeyDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_kms_crypto_key_log.Printf("[DEBUG] Removing KMSCryptoKey because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("labels", flattenKMSCryptoKeyLabels(res["labels"], d, config)); err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("purpose", flattenKMSCryptoKeyPurpose(res["purpose"], d, config)); err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("rotation_period", flattenKMSCryptoKeyRotationPeriod(res["rotationPeriod"], d, config)); err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("version_template", flattenKMSCryptoKeyVersionTemplate(res["versionTemplate"], d, config)); err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("destroy_scheduled_duration", flattenKMSCryptoKeyDestroyScheduledDuration(res["destroyScheduledDuration"], d, config)); err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error reading CryptoKey: %s", err) - } - if err := d.Set("import_only", flattenKMSCryptoKeyImportOnly(res["importOnly"], d, config)); err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error reading CryptoKey: %s", err) - } - - return nil -} - -func resourceKMSCryptoKeyUpdate(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - labelsProp, err := expandKMSCryptoKeyLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(v)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - rotationPeriodProp, err := expandKMSCryptoKeyRotationPeriod(d.Get("rotation_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation_period"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(v)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, rotationPeriodProp)) { - obj["rotationPeriod"] = rotationPeriodProp - } - versionTemplateProp, err := expandKMSCryptoKeyVersionTemplate(d.Get("version_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("version_template"); !isEmptyValue(resource_kms_crypto_key_reflect.ValueOf(v)) && (ok || !resource_kms_crypto_key_reflect.DeepEqual(v, versionTemplateProp)) { - obj["versionTemplate"] = versionTemplateProp - } - - obj, err = resourceKMSCryptoKeyUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys/{{name}}") - if err != nil { - return err - } - - resource_kms_crypto_key_log.Printf("[DEBUG] Updating CryptoKey %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("rotation_period") { - updateMask = append(updateMask, "rotationPeriod", - "nextRotationTime") - } - - if d.HasChange("version_template") { - updateMask = append(updateMask, "versionTemplate.algorithm") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_kms_crypto_key_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - if parts := resource_kms_crypto_key_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_kms_crypto_key_schema.TimeoutUpdate)) - - if err != nil { - return resource_kms_crypto_key_fmt.Errorf("Error updating CryptoKey %q: %s", d.Id(), err) - } else { - resource_kms_crypto_key_log.Printf("[DEBUG] Finished updating CryptoKey %q: %#v", d.Id(), res) - } - - return resourceKMSCryptoKeyRead(d, meta) -} - -func resourceKMSCryptoKeyDelete(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) - if err != nil { - return err - } - - resource_kms_crypto_key_log.Printf(` -[WARNING] KMS CryptoKey resources cannot be deleted from GCP. The CryptoKey %s will be removed from Terraform state, -and all its CryptoKeyVersions will be destroyed, but it will still be present in the project.`, cryptoKeyId.cryptoKeyId()) - - if err := clearCryptoKeyVersions(cryptoKeyId, userAgent, config); err != nil { - return err - } - - if d.Get("rotation_period") != "" { - if err := disableCryptoKeyRotation(cryptoKeyId, userAgent, config); err != nil { - return resource_kms_crypto_key_fmt.Errorf( - "While cryptoKeyVersions were cleared, Terraform was unable to disable automatic rotation of key due to an error: %s."+ - "Please retry or manually disable automatic rotation to prevent creation of a new version of this key.", err) - } - } - - d.SetId("") - return nil -} - -func resourceKMSCryptoKeyImport(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}) ([]*resource_kms_crypto_key_schema.ResourceData, error) { - - config := meta.(*Config) - - cryptoKeyId, err := parseKmsCryptoKeyId(d.Id(), config) - if err != nil { - return nil, err - } - - if err := d.Set("key_ring", cryptoKeyId.KeyRingId.keyRingId()); err != nil { - return nil, resource_kms_crypto_key_fmt.Errorf("Error setting key_ring: %s", err) - } - if err := d.Set("name", cryptoKeyId.Name); err != nil { - return nil, resource_kms_crypto_key_fmt.Errorf("Error setting name: %s", err) - } - - if err := d.Set("skip_initial_version_creation", false); err != nil { - return nil, resource_kms_crypto_key_fmt.Errorf("Error setting skip_initial_version_creation: %s", err) - } - - return []*resource_kms_crypto_key_schema.ResourceData{d}, nil -} - -func flattenKMSCryptoKeyLabels(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyPurpose(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyRotationPeriod(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyVersionTemplate(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["algorithm"] = - flattenKMSCryptoKeyVersionTemplateAlgorithm(original["algorithm"], d, config) - transformed["protection_level"] = - flattenKMSCryptoKeyVersionTemplateProtectionLevel(original["protectionLevel"], d, config) - return []interface{}{transformed} -} - -func flattenKMSCryptoKeyVersionTemplateAlgorithm(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyVersionTemplateProtectionLevel(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyDestroyScheduledDuration(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSCryptoKeyImportOnly(v interface{}, d *resource_kms_crypto_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandKMSCryptoKeyLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandKMSCryptoKeyPurpose(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyRotationPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyVersionTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAlgorithm, err := expandKMSCryptoKeyVersionTemplateAlgorithm(original["algorithm"], d, config) - if err != nil { - return nil, err - } else if val := resource_kms_crypto_key_reflect.ValueOf(transformedAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["algorithm"] = transformedAlgorithm - } - - transformedProtectionLevel, err := expandKMSCryptoKeyVersionTemplateProtectionLevel(original["protection_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_kms_crypto_key_reflect.ValueOf(transformedProtectionLevel); val.IsValid() && !isEmptyValue(val) { - transformed["protectionLevel"] = transformedProtectionLevel - } - - return transformed, nil -} - -func expandKMSCryptoKeyVersionTemplateAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyVersionTemplateProtectionLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyDestroyScheduledDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSCryptoKeyImportOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceKMSCryptoKeyEncoder(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if d.Get("rotation_period") != "" { - rotationPeriod := d.Get("rotation_period").(string) - nextRotation, err := kmsCryptoKeyNextRotation(resource_kms_crypto_key_time.Now(), rotationPeriod) - - if err != nil { - return nil, resource_kms_crypto_key_fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) - } - - obj["nextRotationTime"] = nextRotation - } - - if !(d.Get("skip_initial_version_creation").(bool)) { - if err := d.Set("skip_initial_version_creation", false); err != nil { - return nil, resource_kms_crypto_key_fmt.Errorf("Error setting skip_initial_version_creation: %s", err) - } - } - - return obj, nil -} - -func resourceKMSCryptoKeyUpdateEncoder(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if d.HasChange("rotation_period") && d.Get("rotation_period") != "" { - rotationPeriod := d.Get("rotation_period").(string) - nextRotation, err := kmsCryptoKeyNextRotation(resource_kms_crypto_key_time.Now(), rotationPeriod) - - if err != nil { - return nil, resource_kms_crypto_key_fmt.Errorf("Error setting CryptoKey rotation period: %s", err.Error()) - } - - obj["nextRotationTime"] = nextRotation - } - - return obj, nil -} - -func resourceKMSCryptoKeyDecoder(d *resource_kms_crypto_key_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["name"] = d.Get("name").(string) - return res, nil -} - -func resourceKMSCryptoKeyResourceV0() *resource_kms_crypto_key_schema.Resource { - return &resource_kms_crypto_key_schema.Resource{ - Schema: map[string]*resource_kms_crypto_key_schema.Schema{ - "name": { - Type: resource_kms_crypto_key_schema.TypeString, - Required: true, - }, - "key_ring": { - Type: resource_kms_crypto_key_schema.TypeString, - Required: true, - }, - "rotation_period": { - Type: resource_kms_crypto_key_schema.TypeString, - Optional: true, - }, - "version_template": { - Type: resource_kms_crypto_key_schema.TypeList, - Optional: true, - }, - "self_link": { - Type: resource_kms_crypto_key_schema.TypeString, - }, - }, - } -} - -func resourceKMSCryptoKeyUpgradeV0(_ resource_kms_crypto_key_context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - resource_kms_crypto_key_log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - - config := meta.(*Config) - keyRingId := rawState["key_ring"].(string) - parsed, err := parseKmsKeyRingId(keyRingId, config) - if err != nil { - return nil, err - } - rawState["key_ring"] = parsed.keyRingId() - - resource_kms_crypto_key_log.Printf("[DEBUG] Attributes after migration: %#v", rawState) - return rawState, nil -} - -func resourceKMSKeyRing() *resource_kms_key_ring_schema.Resource { - return &resource_kms_key_ring_schema.Resource{ - Create: resourceKMSKeyRingCreate, - Read: resourceKMSKeyRingRead, - Delete: resourceKMSKeyRingDelete, - - Importer: &resource_kms_key_ring_schema.ResourceImporter{ - State: resourceKMSKeyRingImport, - }, - - Timeouts: &resource_kms_key_ring_schema.ResourceTimeout{ - Create: resource_kms_key_ring_schema.DefaultTimeout(4 * resource_kms_key_ring_time.Minute), - Delete: resource_kms_key_ring_schema.DefaultTimeout(4 * resource_kms_key_ring_time.Minute), - }, - - Schema: map[string]*resource_kms_key_ring_schema.Schema{ - "location": { - Type: resource_kms_key_ring_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location for the KeyRing. -A full list of valid locations can be found by running 'gcloud kms locations list'.`, - }, - "name": { - Type: resource_kms_key_ring_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name for the KeyRing.`, - }, - "project": { - Type: resource_kms_key_ring_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSKeyRingCreate(d *resource_kms_key_ring_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandKMSKeyRingName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_kms_key_ring_reflect.ValueOf(nameProp)) && (ok || !resource_kms_key_ring_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - locationProp, err := expandKMSKeyRingLocation(d.Get("location"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location"); !isEmptyValue(resource_kms_key_ring_reflect.ValueOf(locationProp)) && (ok || !resource_kms_key_ring_reflect.DeepEqual(v, locationProp)) { - obj["location"] = locationProp - } - - obj, err = resourceKMSKeyRingEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/keyRings?keyRingId={{name}}") - if err != nil { - return err - } - - resource_kms_key_ring_log.Printf("[DEBUG] Creating new KeyRing: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_kms_key_ring_fmt.Errorf("Error fetching project for KeyRing: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_kms_key_ring_schema.TimeoutCreate)) - if err != nil { - return resource_kms_key_ring_fmt.Errorf("Error creating KeyRing: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/keyRings/{{name}}") - if err != nil { - return resource_kms_key_ring_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_kms_key_ring_log.Printf("[DEBUG] Finished creating KeyRing %q: %#v", d.Id(), res) - - return resourceKMSKeyRingRead(d, meta) -} - -func resourceKMSKeyRingRead(d *resource_kms_key_ring_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/keyRings/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_kms_key_ring_fmt.Errorf("Error fetching project for KeyRing: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_kms_key_ring_fmt.Sprintf("KMSKeyRing %q", d.Id())) - } - - res, err = resourceKMSKeyRingDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_kms_key_ring_log.Printf("[DEBUG] Removing KMSKeyRing because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_kms_key_ring_fmt.Errorf("Error reading KeyRing: %s", err) - } - - if err := d.Set("name", flattenKMSKeyRingName(res["name"], d, config)); err != nil { - return resource_kms_key_ring_fmt.Errorf("Error reading KeyRing: %s", err) - } - - return nil -} - -func resourceKMSKeyRingDelete(d *resource_kms_key_ring_schema.ResourceData, meta interface{}) error { - resource_kms_key_ring_log.Printf("[WARNING] KMS KeyRing resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceKMSKeyRingImport(d *resource_kms_key_ring_schema.ResourceData, meta interface{}) ([]*resource_kms_key_ring_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/keyRings/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/keyRings/{{name}}") - if err != nil { - return nil, resource_kms_key_ring_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_kms_key_ring_schema.ResourceData{d}, nil -} - -func flattenKMSKeyRingName(v interface{}, d *resource_kms_key_ring_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandKMSKeyRingName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSKeyRingLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceKMSKeyRingEncoder(d *resource_kms_key_ring_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - return nil, nil -} - -func resourceKMSKeyRingDecoder(d *resource_kms_key_ring_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["name"] = d.Get("name").(string) - return res, nil -} - -func resourceKMSKeyRingImportJob() *resource_kms_key_ring_import_job_schema.Resource { - return &resource_kms_key_ring_import_job_schema.Resource{ - Create: resourceKMSKeyRingImportJobCreate, - Read: resourceKMSKeyRingImportJobRead, - Delete: resourceKMSKeyRingImportJobDelete, - - Importer: &resource_kms_key_ring_import_job_schema.ResourceImporter{ - State: resourceKMSKeyRingImportJobImport, - }, - - Timeouts: &resource_kms_key_ring_import_job_schema.ResourceTimeout{ - Create: resource_kms_key_ring_import_job_schema.DefaultTimeout(4 * resource_kms_key_ring_import_job_time.Minute), - Delete: resource_kms_key_ring_import_job_schema.DefaultTimeout(4 * resource_kms_key_ring_import_job_time.Minute), - }, - - Schema: map[string]*resource_kms_key_ring_import_job_schema.Schema{ - "import_job_id": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Required: true, - ForceNew: true, - Description: `It must be unique within a KeyRing and match the regular expression [a-zA-Z0-9_-]{1,63}`, - }, - "import_method": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_kms_key_ring_import_job_validation.StringInSlice([]string{"RSA_OAEP_3072_SHA1_AES_256", "RSA_OAEP_4096_SHA1_AES_256"}, false), - Description: `The wrapping method to be used for incoming key material. Possible values: ["RSA_OAEP_3072_SHA1_AES_256", "RSA_OAEP_4096_SHA1_AES_256"]`, - }, - "key_ring": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: kmsCryptoKeyRingsEquivalent, - Description: `The KeyRing that this import job belongs to. -Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}''.`, - }, - "protection_level": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_kms_key_ring_import_job_validation.StringInSlice([]string{"SOFTWARE", "HSM", "EXTERNAL"}, false), - Description: `The protection level of the ImportJob. This must match the protectionLevel of the -versionTemplate on the CryptoKey you attempt to import into. Possible values: ["SOFTWARE", "HSM", "EXTERNAL"]`, - }, - "attestation": { - Type: resource_kms_key_ring_import_job_schema.TypeList, - Computed: true, - Description: `Statement that was generated and signed by the key creator (for example, an HSM) at key creation time. -Use this statement to verify attributes of the key as stored on the HSM, independently of Google. -Only present if the chosen ImportMethod is one with a protection level of HSM.`, - Elem: &resource_kms_key_ring_import_job_schema.Resource{ - Schema: map[string]*resource_kms_key_ring_import_job_schema.Schema{ - "content": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Computed: true, - Description: `The attestation data provided by the HSM when the key operation was performed. -A base64-encoded string.`, - }, - "format": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Computed: true, - Description: `The format of the attestation data.`, - }, - }, - }, - }, - "expire_time": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Computed: true, - Description: `The time at which this resource is scheduled for expiration and can no longer be used. -This is in RFC3339 text format.`, - }, - "name": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Computed: true, - Description: `The resource name for this ImportJob in the format projects/*/locations/*/keyRings/*/importJobs/*.`, - }, - "public_key": { - Type: resource_kms_key_ring_import_job_schema.TypeList, - Computed: true, - Description: `The public key with which to wrap key material prior to import. Only returned if state is 'ACTIVE'.`, - Elem: &resource_kms_key_ring_import_job_schema.Resource{ - Schema: map[string]*resource_kms_key_ring_import_job_schema.Schema{ - "pem": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Computed: true, - Description: `The public key, encoded in PEM format. For more information, see the RFC 7468 sections -for General Considerations and Textual Encoding of Subject Public Key Info.`, - }, - }, - }, - }, - "state": { - Type: resource_kms_key_ring_import_job_schema.TypeString, - Computed: true, - Description: `The current state of the ImportJob, indicating if it can be used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSKeyRingImportJobCreate(d *resource_kms_key_ring_import_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - importMethodProp, err := expandKMSKeyRingImportJobImportMethod(d.Get("import_method"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("import_method"); !isEmptyValue(resource_kms_key_ring_import_job_reflect.ValueOf(importMethodProp)) && (ok || !resource_kms_key_ring_import_job_reflect.DeepEqual(v, importMethodProp)) { - obj["importMethod"] = importMethodProp - } - protectionLevelProp, err := expandKMSKeyRingImportJobProtectionLevel(d.Get("protection_level"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protection_level"); !isEmptyValue(resource_kms_key_ring_import_job_reflect.ValueOf(protectionLevelProp)) && (ok || !resource_kms_key_ring_import_job_reflect.DeepEqual(v, protectionLevelProp)) { - obj["protectionLevel"] = protectionLevelProp - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/importJobs?importJobId={{import_job_id}}") - if err != nil { - return err - } - - resource_kms_key_ring_import_job_log.Printf("[DEBUG] Creating new KeyRingImportJob: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_kms_key_ring_import_job_schema.TimeoutCreate)) - if err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error creating KeyRingImportJob: %s", err) - } - if err := d.Set("name", flattenKMSKeyRingImportJobName(res["name"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_kms_key_ring_import_job_log.Printf("[DEBUG] Finished creating KeyRingImportJob %q: %#v", d.Id(), res) - - return resourceKMSKeyRingImportJobRead(d, meta) -} - -func resourceKMSKeyRingImportJobRead(d *resource_kms_key_ring_import_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_kms_key_ring_import_job_fmt.Sprintf("KMSKeyRingImportJob %q", d.Id())) - } - - if err := d.Set("name", flattenKMSKeyRingImportJobName(res["name"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("import_method", flattenKMSKeyRingImportJobImportMethod(res["importMethod"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("protection_level", flattenKMSKeyRingImportJobProtectionLevel(res["protectionLevel"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("expire_time", flattenKMSKeyRingImportJobExpireTime(res["expireTime"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("state", flattenKMSKeyRingImportJobState(res["state"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("public_key", flattenKMSKeyRingImportJobPublicKey(res["publicKey"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - if err := d.Set("attestation", flattenKMSKeyRingImportJobAttestation(res["attestation"], d, config)); err != nil { - return resource_kms_key_ring_import_job_fmt.Errorf("Error reading KeyRingImportJob: %s", err) - } - - return nil -} - -func resourceKMSKeyRingImportJobDelete(d *resource_kms_key_ring_import_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_kms_key_ring_import_job_log.Printf("[DEBUG] Deleting KeyRingImportJob %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_kms_key_ring_import_job_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "KeyRingImportJob") - } - - resource_kms_key_ring_import_job_log.Printf("[DEBUG] Finished deleting KeyRingImportJob %q: %#v", d.Id(), res) - return nil -} - -func resourceKMSKeyRingImportJobImport(d *resource_kms_key_ring_import_job_schema.ResourceData, meta interface{}) ([]*resource_kms_key_ring_import_job_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := resource_kms_key_ring_import_job_strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 8 { - return nil, resource_kms_key_ring_import_job_fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/importJobs/{{importJobId}}", - ) - } - - if err := d.Set("key_ring", stringParts[3]); err != nil { - return nil, resource_kms_key_ring_import_job_fmt.Errorf("Error setting key_ring: %s", err) - } - if err := d.Set("import_job_id", stringParts[5]); err != nil { - return nil, resource_kms_key_ring_import_job_fmt.Errorf("Error setting import_job_id: %s", err) - } - return []*resource_kms_key_ring_import_job_schema.ResourceData{d}, nil -} - -func flattenKMSKeyRingImportJobName(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobImportMethod(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobProtectionLevel(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobExpireTime(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobState(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobPublicKey(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["pem"] = - flattenKMSKeyRingImportJobPublicKeyPem(original["pem"], d, config) - return []interface{}{transformed} -} - -func flattenKMSKeyRingImportJobPublicKeyPem(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobAttestation(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["format"] = - flattenKMSKeyRingImportJobAttestationFormat(original["format"], d, config) - transformed["content"] = - flattenKMSKeyRingImportJobAttestationContent(original["content"], d, config) - return []interface{}{transformed} -} - -func flattenKMSKeyRingImportJobAttestationFormat(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenKMSKeyRingImportJobAttestationContent(v interface{}, d *resource_kms_key_ring_import_job_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandKMSKeyRingImportJobImportMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandKMSKeyRingImportJobProtectionLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceKMSSecretCiphertext() *resource_kms_secret_ciphertext_schema.Resource { - return &resource_kms_secret_ciphertext_schema.Resource{ - Create: resourceKMSSecretCiphertextCreate, - Read: resourceKMSSecretCiphertextRead, - Delete: resourceKMSSecretCiphertextDelete, - - Timeouts: &resource_kms_secret_ciphertext_schema.ResourceTimeout{ - Create: resource_kms_secret_ciphertext_schema.DefaultTimeout(4 * resource_kms_secret_ciphertext_time.Minute), - Delete: resource_kms_secret_ciphertext_schema.DefaultTimeout(4 * resource_kms_secret_ciphertext_time.Minute), - }, - - Schema: map[string]*resource_kms_secret_ciphertext_schema.Schema{ - "crypto_key": { - Type: resource_kms_secret_ciphertext_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full name of the CryptoKey that will be used to encrypt the provided plaintext. -Format: ''projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}''`, - }, - "plaintext": { - Type: resource_kms_secret_ciphertext_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The plaintext to be encrypted.`, - Sensitive: true, - }, - "additional_authenticated_data": { - Type: resource_kms_secret_ciphertext_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The additional authenticated data used for integrity checks during encryption and decryption.`, - Sensitive: true, - }, - "ciphertext": { - Type: resource_kms_secret_ciphertext_schema.TypeString, - Computed: true, - Description: `Contains the result of encrypting the provided plaintext, encoded in base64.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceKMSSecretCiphertextCreate(d *resource_kms_secret_ciphertext_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - plaintextProp, err := expandKMSSecretCiphertextPlaintext(d.Get("plaintext"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("plaintext"); !isEmptyValue(resource_kms_secret_ciphertext_reflect.ValueOf(plaintextProp)) && (ok || !resource_kms_secret_ciphertext_reflect.DeepEqual(v, plaintextProp)) { - obj["plaintext"] = plaintextProp - } - additionalAuthenticatedDataProp, err := expandKMSSecretCiphertextAdditionalAuthenticatedData(d.Get("additional_authenticated_data"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("additional_authenticated_data"); !isEmptyValue(resource_kms_secret_ciphertext_reflect.ValueOf(additionalAuthenticatedDataProp)) && (ok || !resource_kms_secret_ciphertext_reflect.DeepEqual(v, additionalAuthenticatedDataProp)) { - obj["additionalAuthenticatedData"] = additionalAuthenticatedDataProp - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}:encrypt") - if err != nil { - return err - } - - resource_kms_secret_ciphertext_log.Printf("[DEBUG] Creating new SecretCiphertext: %#v", obj) - billingProject := "" - - if parts := resource_kms_secret_ciphertext_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_kms_secret_ciphertext_schema.TimeoutCreate)) - if err != nil { - return resource_kms_secret_ciphertext_fmt.Errorf("Error creating SecretCiphertext: %s", err) - } - - id, err := replaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") - if err != nil { - return resource_kms_secret_ciphertext_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - ciphertext, ok := res["ciphertext"] - if !ok { - return resource_kms_secret_ciphertext_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - if err := d.Set("ciphertext", ciphertext.(string)); err != nil { - return resource_kms_secret_ciphertext_fmt.Errorf("Error setting ciphertext: %s", err) - } - - id, err = replaceVars(d, config, "{{crypto_key}}/{{ciphertext}}") - if err != nil { - return resource_kms_secret_ciphertext_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_kms_secret_ciphertext_log.Printf("[DEBUG] Finished creating SecretCiphertext %q: %#v", d.Id(), res) - - return resourceKMSSecretCiphertextRead(d, meta) -} - -func resourceKMSSecretCiphertextRead(d *resource_kms_secret_ciphertext_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{KMSBasePath}}{{crypto_key}}") - if err != nil { - return err - } - - billingProject := "" - - if parts := resource_kms_secret_ciphertext_regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { - billingProject = parts[1] - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_kms_secret_ciphertext_fmt.Sprintf("KMSSecretCiphertext %q", d.Id())) - } - - res, err = resourceKMSSecretCiphertextDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_kms_secret_ciphertext_log.Printf("[DEBUG] Removing KMSSecretCiphertext because it no longer exists.") - d.SetId("") - return nil - } - - return nil -} - -func resourceKMSSecretCiphertextDelete(d *resource_kms_secret_ciphertext_schema.ResourceData, meta interface{}) error { - resource_kms_secret_ciphertext_log.Printf("[WARNING] KMS SecretCiphertext resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func expandKMSSecretCiphertextPlaintext(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return resource_kms_secret_ciphertext_base64.StdEncoding.EncodeToString([]byte(v.(string))), nil -} - -func expandKMSSecretCiphertextAdditionalAuthenticatedData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return resource_kms_secret_ciphertext_base64.StdEncoding.EncodeToString([]byte(v.(string))), nil -} - -func resourceKMSSecretCiphertextDecoder(d *resource_kms_secret_ciphertext_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - return res, nil -} - -var loggingBillingAccountBucketConfigSchema = map[string]*resource_logging_billing_account_bucket_config_schema.Schema{ - "billing_account": { - Type: resource_logging_billing_account_bucket_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent resource that contains the logging bucket.`, - }, -} - -func billingAccountBucketConfigID(d *resource_logging_billing_account_bucket_config_schema.ResourceData, config *Config) (string, error) { - billingAccount := d.Get("billing_account").(string) - location := d.Get("location").(string) - bucketID := d.Get("bucket_id").(string) - - if !resource_logging_billing_account_bucket_config_strings.HasPrefix(billingAccount, "billingAccounts") { - billingAccount = "billingAccounts/" + billingAccount - } - - id := resource_logging_billing_account_bucket_config_fmt.Sprintf("%s/locations/%s/buckets/%s", billingAccount, location, bucketID) - return id, nil -} - -func ResourceLoggingBillingAccountBucketConfig() *resource_logging_billing_account_bucket_config_schema.Resource { - return ResourceLoggingBucketConfig("billing_account", loggingBillingAccountBucketConfigSchema, billingAccountBucketConfigID) -} - -func resourceLoggingBillingAccountSink() *resource_logging_billing_account_sink_schema.Resource { - schm := &resource_logging_billing_account_sink_schema.Resource{ - Create: resourceLoggingBillingAccountSinkCreate, - Read: resourceLoggingBillingAccountSinkRead, - Delete: resourceLoggingBillingAccountSinkDelete, - Update: resourceLoggingBillingAccountSinkUpdate, - Schema: resourceLoggingSinkSchema(), - Importer: &resource_logging_billing_account_sink_schema.ResourceImporter{ - State: resourceLoggingSinkImportState("billing_account"), - }, - UseJSONNumber: true, - } - schm.Schema["billing_account"] = &resource_logging_billing_account_sink_schema.Schema{ - Type: resource_logging_billing_account_sink_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The billing account exported to the sink.`, - } - return schm -} - -func resourceLoggingBillingAccountSinkCreate(d *resource_logging_billing_account_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - id, sink := expandResourceLoggingSink(d, "billingAccounts", d.Get("billing_account").(string)) - - _, err = config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - d.SetId(id.canonicalId()) - return resourceLoggingBillingAccountSinkRead(d, meta) -} - -func resourceLoggingBillingAccountSinkRead(d *resource_logging_billing_account_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sink, err := config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_logging_billing_account_sink_fmt.Sprintf("Billing Logging Sink %s", d.Get("name").(string))) - } - - if err := flattenResourceLoggingSink(d, sink); err != nil { - return err - } - - return nil -} - -func resourceLoggingBillingAccountSinkUpdate(d *resource_logging_billing_account_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sink, updateMask := expandResourceLoggingSinkForUpdate(d) - - _, err = config.NewLoggingClient(userAgent).BillingAccounts.Sinks.Patch(d.Id(), sink). - UpdateMask(updateMask).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - return resourceLoggingBillingAccountSinkRead(d, meta) -} - -func resourceLoggingBillingAccountSinkDelete(d *resource_logging_billing_account_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Delete(d.Id()).Do() - if err != nil { - return err - } - - return nil -} - -var loggingBucketConfigSchema = map[string]*resource_logging_bucket_config_schema.Schema{ - "name": { - Type: resource_logging_bucket_config_schema.TypeString, - Computed: true, - Description: `The resource name of the bucket`, - }, - "location": { - Type: resource_logging_bucket_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The location of the bucket.`, - }, - "bucket_id": { - Type: resource_logging_bucket_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the logging bucket. Logging automatically creates two log buckets: _Required and _Default.`, - }, - "description": { - Type: resource_logging_bucket_config_schema.TypeString, - Optional: true, - Computed: true, - Description: `An optional description for this bucket.`, - }, - "retention_days": { - Type: resource_logging_bucket_config_schema.TypeInt, - Optional: true, - Default: 30, - Description: `Logs will be retained by default for this amount of time, after which they will automatically be deleted. The minimum retention period is 1 day. If this value is set to zero at bucket creation time, the default time of 30 days will be used.`, - }, - "lifecycle_state": { - Type: resource_logging_bucket_config_schema.TypeString, - Computed: true, - Description: `The bucket's lifecycle such as active or deleted.`, - }, -} - -type loggingBucketConfigIDFunc func(d *resource_logging_bucket_config_schema.ResourceData, config *Config) (string, error) - -func ResourceLoggingBucketConfig(parentType string, parentSpecificSchema map[string]*resource_logging_bucket_config_schema.Schema, iDFunc loggingBucketConfigIDFunc) *resource_logging_bucket_config_schema.Resource { - return &resource_logging_bucket_config_schema.Resource{ - Create: resourceLoggingBucketConfigAcquireOrCreate(parentType, iDFunc), - Read: resourceLoggingBucketConfigRead, - Update: resourceLoggingBucketConfigUpdate, - Delete: resourceLoggingBucketConfigDelete, - Importer: &resource_logging_bucket_config_schema.ResourceImporter{ - State: resourceLoggingBucketConfigImportState(parentType), - }, - Schema: mergeSchemas(loggingBucketConfigSchema, parentSpecificSchema), - UseJSONNumber: true, - } -} - -var loggingBucketConfigIDRegex = resource_logging_bucket_config_regexp.MustCompile("(.+)/(.+)/locations/(.+)/buckets/(.+)") - -func resourceLoggingBucketConfigImportState(parent string) resource_logging_bucket_config_schema.StateFunc { - return func(d *resource_logging_bucket_config_schema.ResourceData, meta interface{}) ([]*resource_logging_bucket_config_schema.ResourceData, error) { - parts := loggingBucketConfigIDRegex.FindStringSubmatch(d.Id()) - if parts == nil { - return nil, resource_logging_bucket_config_fmt.Errorf("unable to parse logging sink id %#v", d.Id()) - } - - if len(parts) != 5 { - return nil, resource_logging_bucket_config_fmt.Errorf("Invalid id format. Format should be '{{parent}}/{{parent_id}}/locations/{{location}}/buckets/{{bucket_id}} with parent in %s", loggingSinkResourceTypes) - } - - validLoggingType := false - for _, v := range loggingSinkResourceTypes { - if v == parts[1] { - validLoggingType = true - break - } - } - if !validLoggingType { - return nil, resource_logging_bucket_config_fmt.Errorf("Logging parent type %s is not valid. Valid resource types: %#v", parts[1], - loggingSinkResourceTypes) - } - - if err := d.Set(parent, parts[1]+"/"+parts[2]); err != nil { - return nil, resource_logging_bucket_config_fmt.Errorf("Error setting parent: %s", err) - } - - if err := d.Set("location", parts[3]); err != nil { - return nil, resource_logging_bucket_config_fmt.Errorf("Error setting location: %s", err) - } - - if err := d.Set("bucket_id", parts[4]); err != nil { - return nil, resource_logging_bucket_config_fmt.Errorf("Error setting bucket_id: %s", err) - } - - return []*resource_logging_bucket_config_schema.ResourceData{d}, nil - } -} - -func resourceLoggingBucketConfigAcquireOrCreate(parentType string, iDFunc loggingBucketConfigIDFunc) func(*resource_logging_bucket_config_schema.ResourceData, interface{}) error { - return func(d *resource_logging_bucket_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - id, err := iDFunc(d, config) - if err != nil { - return err - } - - d.SetId(id) - - if parentType == "project" { - - resource_logging_bucket_config_log.Printf("[DEBUG] Fetching logging bucket config: %#v", d.Id()) - url, err := replaceVars(d, config, resource_logging_bucket_config_fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) - if err != nil { - return err - } - - res, _ := sendRequest(config, "GET", "", url, userAgent, nil) - if res == nil { - resource_logging_bucket_config_log.Printf("[DEGUG] Loggin Bucket not exist %s", d.Id()) - return resourceLoggingBucketConfigCreate(d, meta) - } - } - - return resourceLoggingBucketConfigUpdate(d, meta) - } -} - -func resourceLoggingBucketConfigCreate(d *resource_logging_bucket_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["name"] = d.Get("name") - obj["description"] = d.Get("description") - obj["retentionDays"] = d.Get("retention_days") - obj["locked"] = d.Get("locked") - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/locations/{{location}}/buckets?bucketId={{bucket_id}}") - if err != nil { - return err - } - - resource_logging_bucket_config_log.Printf("[DEBUG] Creating new Bucket: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return err - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_logging_bucket_config_schema.TimeoutCreate)) - if err != nil { - return resource_logging_bucket_config_fmt.Errorf("Error creating Bucket: %s", err) - } - - resource_logging_bucket_config_log.Printf("[DEBUG] Finished creating Bucket %q: %#v", d.Id(), res) - - return resourceLoggingBucketConfigRead(d, meta) -} - -func resourceLoggingBucketConfigRead(d *resource_logging_bucket_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - resource_logging_bucket_config_log.Printf("[DEBUG] Fetching logging bucket config: %#v", d.Id()) - - url, err := replaceVars(d, config, resource_logging_bucket_config_fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) - if err != nil { - return err - } - - res, err := sendRequest(config, "GET", "", url, userAgent, nil) - if err != nil { - resource_logging_bucket_config_log.Printf("[WARN] Unable to acquire logging bucket config at %s", d.Id()) - - d.SetId("") - return err - } - - if err := d.Set("name", res["name"]); err != nil { - return resource_logging_bucket_config_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", res["description"]); err != nil { - return resource_logging_bucket_config_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("lifecycle_state", res["lifecycleState"]); err != nil { - return resource_logging_bucket_config_fmt.Errorf("Error setting lifecycle_state: %s", err) - } - if err := d.Set("retention_days", res["retentionDays"]); err != nil { - return resource_logging_bucket_config_fmt.Errorf("Error setting retention_days: %s", err) - } - - return nil -} - -func resourceLoggingBucketConfigUpdate(d *resource_logging_bucket_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - - url, err := replaceVars(d, config, resource_logging_bucket_config_fmt.Sprintf("{{LoggingBasePath}}%s", d.Id())) - if err != nil { - return err - } - - obj["retentionDays"] = d.Get("retention_days") - obj["description"] = d.Get("description") - - updateMask := []string{} - if d.HasChange("retention_days") { - updateMask = append(updateMask, "retentionDays") - } - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - url, err = addQueryParams(url, map[string]string{"updateMask": resource_logging_bucket_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - _, err = sendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(resource_logging_bucket_config_schema.TimeoutUpdate)) - if err != nil { - return resource_logging_bucket_config_fmt.Errorf("Error updating Logging Bucket Config %q: %s", d.Id(), err) - } - - return resourceLoggingBucketConfigRead(d, meta) - -} - -func resourceLoggingBucketConfigDelete(d *resource_logging_bucket_config_schema.ResourceData, meta interface{}) error { - - resource_logging_bucket_config_log.Printf("[WARN] Logging bucket configs cannot be deleted. Removing logging bucket config from state: %#v", d.Id()) - d.SetId("") - - return nil -} - -var LoggingExclusionBaseSchema = map[string]*resource_logging_exclusion_schema.Schema{ - "filter": { - Type: resource_logging_exclusion_schema.TypeString, - Required: true, - Description: `The filter to apply when excluding logs. Only log entries that match the filter are excluded.`, - }, - "name": { - Type: resource_logging_exclusion_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the logging exclusion.`, - }, - "description": { - Type: resource_logging_exclusion_schema.TypeString, - Optional: true, - Description: `A human-readable description.`, - }, - "disabled": { - Type: resource_logging_exclusion_schema.TypeBool, - Optional: true, - Description: `Whether this exclusion rule should be disabled or not. This defaults to false.`, - }, -} - -func ResourceLoggingExclusion(parentSpecificSchema map[string]*resource_logging_exclusion_schema.Schema, newUpdaterFunc newResourceLoggingExclusionUpdaterFunc, resourceIdParser resourceIdParserFunc) *resource_logging_exclusion_schema.Resource { - return &resource_logging_exclusion_schema.Resource{ - Create: resourceLoggingExclusionCreate(newUpdaterFunc), - Read: resourceLoggingExclusionRead(newUpdaterFunc), - Update: resourceLoggingExclusionUpdate(newUpdaterFunc), - Delete: resourceLoggingExclusionDelete(newUpdaterFunc), - - Importer: &resource_logging_exclusion_schema.ResourceImporter{ - State: resourceLoggingExclusionImportState(resourceIdParser), - }, - - Schema: mergeSchemas(LoggingExclusionBaseSchema, parentSpecificSchema), - UseJSONNumber: true, - } -} - -func resourceLoggingExclusionCreate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) resource_logging_exclusion_schema.CreateFunc { - return func(d *resource_logging_exclusion_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - id, exclusion := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) - - mutexKV.Lock(id.parent()) - defer mutexKV.Unlock(id.parent()) - - err = updater.CreateLoggingExclusion(id.parent(), exclusion) - if err != nil { - return err - } - - d.SetId(id.canonicalId()) - - return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) - } -} - -func resourceLoggingExclusionRead(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) resource_logging_exclusion_schema.ReadFunc { - return func(d *resource_logging_exclusion_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - exclusion, err := updater.ReadLoggingExclusion(d.Id()) - - if err != nil { - return handleNotFoundError(err, d, resource_logging_exclusion_fmt.Sprintf("Logging Exclusion %s", d.Get("name").(string))) - } - - if err := flattenResourceLoggingExclusion(d, exclusion); err != nil { - return err - } - - if updater.GetResourceType() == "projects" { - if err := d.Set("project", updater.GetResourceId()); err != nil { - return resource_logging_exclusion_fmt.Errorf("Error setting project: %s", err) - } - } - - return nil - } -} - -func resourceLoggingExclusionUpdate(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) resource_logging_exclusion_schema.UpdateFunc { - return func(d *resource_logging_exclusion_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - id, _ := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) - exclusion, updateMask := expandResourceLoggingExclusionForUpdate(d) - - mutexKV.Lock(id.parent()) - defer mutexKV.Unlock(id.parent()) - - err = updater.UpdateLoggingExclusion(d.Id(), exclusion, updateMask) - if err != nil { - return err - } - - return resourceLoggingExclusionRead(newUpdaterFunc)(d, meta) - } -} - -func resourceLoggingExclusionDelete(newUpdaterFunc newResourceLoggingExclusionUpdaterFunc) resource_logging_exclusion_schema.DeleteFunc { - return func(d *resource_logging_exclusion_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - updater, err := newUpdaterFunc(d, config) - if err != nil { - return err - } - - id, _ := expandResourceLoggingExclusion(d, updater.GetResourceType(), updater.GetResourceId()) - - mutexKV.Lock(id.parent()) - defer mutexKV.Unlock(id.parent()) - - err = updater.DeleteLoggingExclusion(d.Id()) - if err != nil { - return err - } - - d.SetId("") - return nil - } -} - -func resourceLoggingExclusionImportState(resourceIdParser resourceIdParserFunc) resource_logging_exclusion_schema.StateFunc { - return func(d *resource_logging_exclusion_schema.ResourceData, meta interface{}) ([]*resource_logging_exclusion_schema.ResourceData, error) { - config := meta.(*Config) - err := resourceIdParser(d, config) - if err != nil { - return nil, err - } - return []*resource_logging_exclusion_schema.ResourceData{d}, nil - } -} - -func expandResourceLoggingExclusion(d *resource_logging_exclusion_schema.ResourceData, resourceType, resourceId string) (LoggingExclusionId, *resource_logging_exclusion_logging.LogExclusion) { - id := LoggingExclusionId{ - resourceType: resourceType, - resourceId: resourceId, - name: d.Get("name").(string), - } - - exclusion := resource_logging_exclusion_logging.LogExclusion{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - Filter: d.Get("filter").(string), - Disabled: d.Get("disabled").(bool), - } - return id, &exclusion -} - -func flattenResourceLoggingExclusion(d *resource_logging_exclusion_schema.ResourceData, exclusion *resource_logging_exclusion_logging.LogExclusion) error { - if err := d.Set("name", exclusion.Name); err != nil { - return resource_logging_exclusion_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("description", exclusion.Description); err != nil { - return resource_logging_exclusion_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("filter", exclusion.Filter); err != nil { - return resource_logging_exclusion_fmt.Errorf("Error setting filter: %s", err) - } - if err := d.Set("disabled", exclusion.Disabled); err != nil { - return resource_logging_exclusion_fmt.Errorf("Error setting disabled: %s", err) - } - - return nil -} - -func expandResourceLoggingExclusionForUpdate(d *resource_logging_exclusion_schema.ResourceData) (*resource_logging_exclusion_logging.LogExclusion, string) { - - exclusion := resource_logging_exclusion_logging.LogExclusion{} - - var updateMaskArr []string - - if d.HasChange("description") { - exclusion.Description = d.Get("description").(string) - exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Description") - updateMaskArr = append(updateMaskArr, "description") - } - - if d.HasChange("filter") { - exclusion.Filter = d.Get("filter").(string) - exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Filter") - updateMaskArr = append(updateMaskArr, "filter") - } - - if d.HasChange("disabled") { - exclusion.Disabled = d.Get("disabled").(bool) - exclusion.ForceSendFields = append(exclusion.ForceSendFields, "Disabled") - updateMaskArr = append(updateMaskArr, "disabled") - } - - updateMask := resource_logging_exclusion_strings.Join(updateMaskArr, ",") - return &exclusion, updateMask -} - -type ResourceLoggingExclusionUpdater interface { - CreateLoggingExclusion(parent string, exclusion *resource_logging_exclusion_logging.LogExclusion) error - ReadLoggingExclusion(id string) (*resource_logging_exclusion_logging.LogExclusion, error) - UpdateLoggingExclusion(id string, exclusion *resource_logging_exclusion_logging.LogExclusion, updateMask string) error - DeleteLoggingExclusion(id string) error - - GetResourceType() string - - GetResourceId() string - - DescribeResource() string -} - -type newResourceLoggingExclusionUpdaterFunc func(d *resource_logging_exclusion_schema.ResourceData, config *Config) (ResourceLoggingExclusionUpdater, error) - -var loggingExclusionResourceTypes = []string{ - "billingAccounts", - "folders", - "organizations", - "projects", -} - -type LoggingExclusionId struct { - resourceType string - resourceId string - name string -} - -var loggingExclusionIdRegex = resource_logging_exclusion_regexp.MustCompile("(.+)/(.+)/exclusions/(.+)") - -func (l LoggingExclusionId) canonicalId() string { - return resource_logging_exclusion_fmt.Sprintf("%s/%s/exclusions/%s", l.resourceType, l.resourceId, l.name) -} - -func (l LoggingExclusionId) parent() string { - return resource_logging_exclusion_fmt.Sprintf("%s/%s", l.resourceType, l.resourceId) -} - -func parseLoggingExclusionId(id string) (*LoggingExclusionId, error) { - parts := loggingExclusionIdRegex.FindStringSubmatch(id) - if parts == nil { - return nil, resource_logging_exclusion_fmt.Errorf("unable to parse logging exclusion id %#v", id) - } - - validLoggingExclusionResourceType := false - for _, v := range loggingExclusionResourceTypes { - if v == parts[1] { - validLoggingExclusionResourceType = true - break - } - } - - if !validLoggingExclusionResourceType { - return nil, resource_logging_exclusion_fmt.Errorf("Logging resource type %s is not valid. Valid resource types: %#v", parts[1], - loggingExclusionResourceTypes) - } - return &LoggingExclusionId{ - resourceType: parts[1], - resourceId: parts[2], - name: parts[3], - }, nil -} - -var loggingFolderBucketConfigSchema = map[string]*resource_logging_folder_bucket_config_schema.Schema{ - "folder": { - Type: resource_logging_folder_bucket_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent resource that contains the logging bucket.`, - }, -} - -func folderBucketConfigID(d *resource_logging_folder_bucket_config_schema.ResourceData, config *Config) (string, error) { - folder := d.Get("folder").(string) - location := d.Get("location").(string) - bucketID := d.Get("bucket_id").(string) - - if !resource_logging_folder_bucket_config_strings.HasPrefix(folder, "folder") { - folder = "folders/" + folder - } - - id := resource_logging_folder_bucket_config_fmt.Sprintf("%s/locations/%s/buckets/%s", folder, location, bucketID) - return id, nil -} - -func ResourceLoggingFolderBucketConfig() *resource_logging_folder_bucket_config_schema.Resource { - return ResourceLoggingBucketConfig("folder", loggingFolderBucketConfigSchema, folderBucketConfigID) -} - -func resourceLoggingFolderSink() *resource_logging_folder_sink_schema.Resource { - schm := &resource_logging_folder_sink_schema.Resource{ - Create: resourceLoggingFolderSinkCreate, - Read: resourceLoggingFolderSinkRead, - Delete: resourceLoggingFolderSinkDelete, - Update: resourceLoggingFolderSinkUpdate, - Schema: resourceLoggingSinkSchema(), - Importer: &resource_logging_folder_sink_schema.ResourceImporter{ - State: resourceLoggingSinkImportState("folder"), - }, - UseJSONNumber: true, - } - schm.Schema["folder"] = &resource_logging_folder_sink_schema.Schema{ - Type: resource_logging_folder_sink_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The folder to be exported to the sink. Note that either [FOLDER_ID] or "folders/[FOLDER_ID]" is accepted.`, - StateFunc: func(v interface{}) string { - return resource_logging_folder_sink_strings.Replace(v.(string), "folders/", "", 1) - }, - } - schm.Schema["include_children"] = &resource_logging_folder_sink_schema.Schema{ - Type: resource_logging_folder_sink_schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Whether or not to include children folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided folder are included.`, - } - - return schm -} - -func resourceLoggingFolderSinkCreate(d *resource_logging_folder_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - folder := parseFolderId(d.Get("folder")) - id, sink := expandResourceLoggingSink(d, "folders", folder) - sink.IncludeChildren = d.Get("include_children").(bool) - - _, err = config.NewLoggingClient(userAgent).Folders.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - d.SetId(id.canonicalId()) - return resourceLoggingFolderSinkRead(d, meta) -} - -func resourceLoggingFolderSinkRead(d *resource_logging_folder_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sink, err := config.NewLoggingClient(userAgent).Folders.Sinks.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_logging_folder_sink_fmt.Sprintf("Folder Logging Sink %s", d.Get("name").(string))) - } - - if err := flattenResourceLoggingSink(d, sink); err != nil { - return err - } - - if err := d.Set("include_children", sink.IncludeChildren); err != nil { - return resource_logging_folder_sink_fmt.Errorf("Error setting include_children: %s", err) - } - - return nil -} - -func resourceLoggingFolderSinkUpdate(d *resource_logging_folder_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sink, updateMask := expandResourceLoggingSinkForUpdate(d) - - sink.IncludeChildren = d.Get("include_children").(bool) - sink.ForceSendFields = append(sink.ForceSendFields, "IncludeChildren") - - _, err = config.NewLoggingClient(userAgent).Folders.Sinks.Patch(d.Id(), sink). - UpdateMask(updateMask).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - return resourceLoggingFolderSinkRead(d, meta) -} - -func resourceLoggingFolderSinkDelete(d *resource_logging_folder_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Delete(d.Id()).Do() - if err != nil { - return err - } - - return nil -} - -func resourceLoggingMetric() *resource_logging_metric_schema.Resource { - return &resource_logging_metric_schema.Resource{ - Create: resourceLoggingMetricCreate, - Read: resourceLoggingMetricRead, - Update: resourceLoggingMetricUpdate, - Delete: resourceLoggingMetricDelete, - - Importer: &resource_logging_metric_schema.ResourceImporter{ - State: resourceLoggingMetricImport, - }, - - Timeouts: &resource_logging_metric_schema.ResourceTimeout{ - Create: resource_logging_metric_schema.DefaultTimeout(4 * resource_logging_metric_time.Minute), - Update: resource_logging_metric_schema.DefaultTimeout(4 * resource_logging_metric_time.Minute), - Delete: resource_logging_metric_schema.DefaultTimeout(4 * resource_logging_metric_time.Minute), - }, - - Schema: map[string]*resource_logging_metric_schema.Schema{ - "filter": { - Type: resource_logging_metric_schema.TypeString, - Required: true, - Description: `An advanced logs filter (https://cloud.google.com/logging/docs/view/advanced-filters) which -is used to match log entries.`, - }, - "metric_descriptor": { - Type: resource_logging_metric_schema.TypeList, - Required: true, - Description: `The metric descriptor associated with the logs-based metric.`, - MaxItems: 1, - Elem: &resource_logging_metric_schema.Resource{ - Schema: map[string]*resource_logging_metric_schema.Schema{ - "metric_kind": { - Type: resource_logging_metric_schema.TypeString, - Required: true, - ValidateFunc: resource_logging_metric_validation.StringInSlice([]string{"DELTA", "GAUGE", "CUMULATIVE"}, false), - Description: `Whether the metric records instantaneous values, changes to a value, etc. -Some combinations of metricKind and valueType might not be supported. -For counter metrics, set this to DELTA. Possible values: ["DELTA", "GAUGE", "CUMULATIVE"]`, - }, - "value_type": { - Type: resource_logging_metric_schema.TypeString, - Required: true, - ValidateFunc: resource_logging_metric_validation.StringInSlice([]string{"BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION", "MONEY"}, false), - Description: `Whether the measurement is an integer, a floating-point number, etc. -Some combinations of metricKind and valueType might not be supported. -For counter metrics, set this to INT64. Possible values: ["BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION", "MONEY"]`, - }, - "display_name": { - Type: resource_logging_metric_schema.TypeString, - Optional: true, - Description: `A concise name for the metric, which can be displayed in user interfaces. Use sentence case -without an ending period, for example "Request count". This field is optional but it is -recommended to be set for any metrics associated with user-visible concepts, such as Quota.`, - }, - "labels": { - Type: resource_logging_metric_schema.TypeSet, - Optional: true, - Description: `The set of labels that can be used to describe a specific instance of this metric type. For -example, the appengine.googleapis.com/http/server/response_latencies metric type has a label -for the HTTP response code, response_code, so you can look at latencies for successful responses -or just for responses that failed.`, - Elem: loggingMetricMetricDescriptorLabelsSchema(), - }, - "unit": { - Type: resource_logging_metric_schema.TypeString, - Optional: true, - Description: `The unit in which the metric value is reported. It is only applicable if the valueType is -'INT64', 'DOUBLE', or 'DISTRIBUTION'. The supported units are a subset of -[The Unified Code for Units of Measure](http://unitsofmeasure.org/ucum.html) standard`, - Default: "1", - }, - }, - }, - }, - "name": { - Type: resource_logging_metric_schema.TypeString, - Required: true, - Description: `The client-assigned metric identifier. Examples - "error_count", "nginx/requests". -Metric identifiers are limited to 100 characters and can include only the following -characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash -character (/) denotes a hierarchy of name pieces, and it cannot be the first character -of the name.`, - }, - "bucket_options": { - Type: resource_logging_metric_schema.TypeList, - Optional: true, - Description: `The bucketOptions are required when the logs-based metric is using a DISTRIBUTION value type and it -describes the bucket boundaries used to create a histogram of the extracted values.`, - MaxItems: 1, - Elem: &resource_logging_metric_schema.Resource{ - Schema: map[string]*resource_logging_metric_schema.Schema{ - "explicit_buckets": { - Type: resource_logging_metric_schema.TypeList, - Optional: true, - Description: `Specifies a set of buckets with arbitrary widths.`, - MaxItems: 1, - Elem: &resource_logging_metric_schema.Resource{ - Schema: map[string]*resource_logging_metric_schema.Schema{ - "bounds": { - Type: resource_logging_metric_schema.TypeList, - Required: true, - Description: `The values must be monotonically increasing.`, - Elem: &resource_logging_metric_schema.Schema{ - Type: resource_logging_metric_schema.TypeFloat, - }, - }, - }, - }, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, - }, - "exponential_buckets": { - Type: resource_logging_metric_schema.TypeList, - Optional: true, - Description: `Specifies an exponential sequence of buckets that have a width that is proportional to the value of -the lower bound. Each bucket represents a constant relative uncertainty on a specific value in the bucket.`, - MaxItems: 1, - Elem: &resource_logging_metric_schema.Resource{ - Schema: map[string]*resource_logging_metric_schema.Schema{ - "growth_factor": { - Type: resource_logging_metric_schema.TypeFloat, - Optional: true, - Description: `Must be greater than 1.`, - AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, - }, - "num_finite_buckets": { - Type: resource_logging_metric_schema.TypeInt, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, - }, - "scale": { - Type: resource_logging_metric_schema.TypeFloat, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.exponential_buckets.0.num_finite_buckets", "bucket_options.0.exponential_buckets.0.growth_factor", "bucket_options.0.exponential_buckets.0.scale"}, - }, - }, - }, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, - }, - "linear_buckets": { - Type: resource_logging_metric_schema.TypeList, - Optional: true, - Description: `Specifies a linear sequence of buckets that all have the same width (except overflow and underflow). -Each bucket represents a constant absolute uncertainty on the specific value in the bucket.`, - MaxItems: 1, - Elem: &resource_logging_metric_schema.Resource{ - Schema: map[string]*resource_logging_metric_schema.Schema{ - "num_finite_buckets": { - Type: resource_logging_metric_schema.TypeInt, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, - }, - "offset": { - Type: resource_logging_metric_schema.TypeFloat, - Optional: true, - Description: `Lower bound of the first bucket.`, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, - }, - "width": { - Type: resource_logging_metric_schema.TypeFloat, - Optional: true, - Description: `Must be greater than 0.`, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets.0.num_finite_buckets", "bucket_options.0.linear_buckets.0.width", "bucket_options.0.linear_buckets.0.offset"}, - }, - }, - }, - AtLeastOneOf: []string{"bucket_options.0.linear_buckets", "bucket_options.0.exponential_buckets", "bucket_options.0.explicit_buckets"}, - }, - }, - }, - }, - "description": { - Type: resource_logging_metric_schema.TypeString, - Optional: true, - Description: `A description of this metric, which is used in documentation. The maximum length of the -description is 8000 characters.`, - }, - "label_extractors": { - Type: resource_logging_metric_schema.TypeMap, - Optional: true, - Description: `A map from a label key string to an extractor expression which is used to extract data from a log -entry field and assign as the label value. Each label key specified in the LabelDescriptor must -have an associated extractor expression in this map. The syntax of the extractor expression is -the same as for the valueExtractor field.`, - Elem: &resource_logging_metric_schema.Schema{Type: resource_logging_metric_schema.TypeString}, - }, - "value_extractor": { - Type: resource_logging_metric_schema.TypeString, - Optional: true, - Description: `A valueExtractor is required when using a distribution logs-based metric to extract the values to -record from a log entry. Two functions are supported for value extraction - EXTRACT(field) or -REGEXP_EXTRACT(field, regex). The argument are 1. field - The name of the log entry field from which -the value is to be extracted. 2. regex - A regular expression using the Google RE2 syntax -(https://github.com/google/re2/wiki/Syntax) with a single capture group to extract data from the specified -log entry field. The value of the field is converted to a string before applying the regex. It is an -error to specify a regex that does not include exactly one capture group.`, - }, - "project": { - Type: resource_logging_metric_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func loggingMetricMetricDescriptorLabelsSchema() *resource_logging_metric_schema.Resource { - return &resource_logging_metric_schema.Resource{ - Schema: map[string]*resource_logging_metric_schema.Schema{ - "key": { - Type: resource_logging_metric_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The label key.`, - }, - "description": { - Type: resource_logging_metric_schema.TypeString, - Optional: true, - Description: `A human-readable description for the label.`, - }, - "value_type": { - Type: resource_logging_metric_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_logging_metric_validation.StringInSlice([]string{"BOOL", "INT64", "STRING", ""}, false), - Description: `The type of data that can be assigned to the label. Default value: "STRING" Possible values: ["BOOL", "INT64", "STRING"]`, - Default: "STRING", - }, - }, - } -} - -func resourceLoggingMetricCreate(d *resource_logging_metric_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandLoggingMetricName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(nameProp)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandLoggingMetricDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(descriptionProp)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - filterProp, err := expandLoggingMetricFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(filterProp)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - metricDescriptorProp, err := expandLoggingMetricMetricDescriptor(d.Get("metric_descriptor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_descriptor"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(metricDescriptorProp)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, metricDescriptorProp)) { - obj["metricDescriptor"] = metricDescriptorProp - } - labelExtractorsProp, err := expandLoggingMetricLabelExtractors(d.Get("label_extractors"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_extractors"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(labelExtractorsProp)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, labelExtractorsProp)) { - obj["labelExtractors"] = labelExtractorsProp - } - valueExtractorProp, err := expandLoggingMetricValueExtractor(d.Get("value_extractor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_extractor"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(valueExtractorProp)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, valueExtractorProp)) { - obj["valueExtractor"] = valueExtractorProp - } - bucketOptionsProp, err := expandLoggingMetricBucketOptions(d.Get("bucket_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_options"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(bucketOptionsProp)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, bucketOptionsProp)) { - obj["bucketOptions"] = bucketOptionsProp - } - - lockName, err := replaceVars(d, config, "customMetric/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics") - if err != nil { - return err - } - - resource_logging_metric_log.Printf("[DEBUG] Creating new Metric: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_logging_metric_fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_logging_metric_schema.TimeoutCreate)) - if err != nil { - return resource_logging_metric_fmt.Errorf("Error creating Metric: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_logging_metric_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_logging_metric_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_logging_metric_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_logging_metric_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_logging_metric_log.Printf("[DEBUG] Finished creating Metric %q: %#v", d.Id(), res) - - return resourceLoggingMetricRead(d, meta) -} - -func resourceLoggingMetricRead(d *resource_logging_metric_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_logging_metric_fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_logging_metric_fmt.Sprintf("LoggingMetric %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - - if err := d.Set("name", flattenLoggingMetricName(res["name"], d, config)); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("description", flattenLoggingMetricDescription(res["description"], d, config)); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("filter", flattenLoggingMetricFilter(res["filter"], d, config)); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("metric_descriptor", flattenLoggingMetricMetricDescriptor(res["metricDescriptor"], d, config)); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("label_extractors", flattenLoggingMetricLabelExtractors(res["labelExtractors"], d, config)); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("value_extractor", flattenLoggingMetricValueExtractor(res["valueExtractor"], d, config)); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - if err := d.Set("bucket_options", flattenLoggingMetricBucketOptions(res["bucketOptions"], d, config)); err != nil { - return resource_logging_metric_fmt.Errorf("Error reading Metric: %s", err) - } - - return nil -} - -func resourceLoggingMetricUpdate(d *resource_logging_metric_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_logging_metric_fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandLoggingMetricName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandLoggingMetricDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - filterProp, err := expandLoggingMetricFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - metricDescriptorProp, err := expandLoggingMetricMetricDescriptor(d.Get("metric_descriptor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_descriptor"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, metricDescriptorProp)) { - obj["metricDescriptor"] = metricDescriptorProp - } - labelExtractorsProp, err := expandLoggingMetricLabelExtractors(d.Get("label_extractors"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("label_extractors"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, labelExtractorsProp)) { - obj["labelExtractors"] = labelExtractorsProp - } - valueExtractorProp, err := expandLoggingMetricValueExtractor(d.Get("value_extractor"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_extractor"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, valueExtractorProp)) { - obj["valueExtractor"] = valueExtractorProp - } - bucketOptionsProp, err := expandLoggingMetricBucketOptions(d.Get("bucket_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket_options"); !isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) && (ok || !resource_logging_metric_reflect.DeepEqual(v, bucketOptionsProp)) { - obj["bucketOptions"] = bucketOptionsProp - } - - lockName, err := replaceVars(d, config, "customMetric/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") - if err != nil { - return err - } - - resource_logging_metric_log.Printf("[DEBUG] Updating Metric %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_logging_metric_schema.TimeoutUpdate)) - - if err != nil { - return resource_logging_metric_fmt.Errorf("Error updating Metric %q: %s", d.Id(), err) - } else { - resource_logging_metric_log.Printf("[DEBUG] Finished updating Metric %q: %#v", d.Id(), res) - } - - return resourceLoggingMetricRead(d, meta) -} - -func resourceLoggingMetricDelete(d *resource_logging_metric_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_logging_metric_fmt.Errorf("Error fetching project for Metric: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "customMetric/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{LoggingBasePath}}projects/{{project}}/metrics/{{%name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_logging_metric_log.Printf("[DEBUG] Deleting Metric %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_logging_metric_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Metric") - } - - resource_logging_metric_log.Printf("[DEBUG] Finished deleting Metric %q: %#v", d.Id(), res) - return nil -} - -func resourceLoggingMetricImport(d *resource_logging_metric_schema.ResourceData, meta interface{}) ([]*resource_logging_metric_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_logging_metric_schema.ResourceData{d}, nil -} - -func flattenLoggingMetricName(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricDescription(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricFilter(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptor(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["unit"] = - flattenLoggingMetricMetricDescriptorUnit(original["unit"], d, config) - transformed["value_type"] = - flattenLoggingMetricMetricDescriptorValueType(original["valueType"], d, config) - transformed["metric_kind"] = - flattenLoggingMetricMetricDescriptorMetricKind(original["metricKind"], d, config) - transformed["labels"] = - flattenLoggingMetricMetricDescriptorLabels(original["labels"], d, config) - transformed["display_name"] = - flattenLoggingMetricMetricDescriptorDisplayName(original["displayName"], d, config) - return []interface{}{transformed} -} - -func flattenLoggingMetricMetricDescriptorUnit(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorValueType(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorMetricKind(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorLabels(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_logging_metric_schema.NewSet(resource_logging_metric_schema.HashResource(loggingMetricMetricDescriptorLabelsSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "key": flattenLoggingMetricMetricDescriptorLabelsKey(original["key"], d, config), - "description": flattenLoggingMetricMetricDescriptorLabelsDescription(original["description"], d, config), - "value_type": flattenLoggingMetricMetricDescriptorLabelsValueType(original["valueType"], d, config), - }) - } - return transformed -} - -func flattenLoggingMetricMetricDescriptorLabelsKey(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorLabelsDescription(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricMetricDescriptorLabelsValueType(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_logging_metric_reflect.ValueOf(v)) { - return "STRING" - } - - return v -} - -func flattenLoggingMetricMetricDescriptorDisplayName(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricLabelExtractors(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricValueExtractor(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptions(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["linear_buckets"] = - flattenLoggingMetricBucketOptionsLinearBuckets(original["linearBuckets"], d, config) - transformed["exponential_buckets"] = - flattenLoggingMetricBucketOptionsExponentialBuckets(original["exponentialBuckets"], d, config) - transformed["explicit_buckets"] = - flattenLoggingMetricBucketOptionsExplicitBuckets(original["explicitBuckets"], d, config) - return []interface{}{transformed} -} - -func flattenLoggingMetricBucketOptionsLinearBuckets(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_finite_buckets"] = - flattenLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(original["numFiniteBuckets"], d, config) - transformed["width"] = - flattenLoggingMetricBucketOptionsLinearBucketsWidth(original["width"], d, config) - transformed["offset"] = - flattenLoggingMetricBucketOptionsLinearBucketsOffset(original["offset"], d, config) - return []interface{}{transformed} -} - -func flattenLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_logging_metric_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenLoggingMetricBucketOptionsLinearBucketsWidth(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsLinearBucketsOffset(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsExponentialBuckets(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["num_finite_buckets"] = - flattenLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(original["numFiniteBuckets"], d, config) - transformed["growth_factor"] = - flattenLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(original["growthFactor"], d, config) - transformed["scale"] = - flattenLoggingMetricBucketOptionsExponentialBucketsScale(original["scale"], d, config) - return []interface{}{transformed} -} - -func flattenLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_logging_metric_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsExponentialBucketsScale(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenLoggingMetricBucketOptionsExplicitBuckets(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bounds"] = - flattenLoggingMetricBucketOptionsExplicitBucketsBounds(original["bounds"], d, config) - return []interface{}{transformed} -} - -func flattenLoggingMetricBucketOptionsExplicitBucketsBounds(v interface{}, d *resource_logging_metric_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandLoggingMetricName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedUnit, err := expandLoggingMetricMetricDescriptorUnit(original["unit"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedUnit); val.IsValid() && !isEmptyValue(val) { - transformed["unit"] = transformedUnit - } - - transformedValueType, err := expandLoggingMetricMetricDescriptorValueType(original["value_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedValueType); val.IsValid() && !isEmptyValue(val) { - transformed["valueType"] = transformedValueType - } - - transformedMetricKind, err := expandLoggingMetricMetricDescriptorMetricKind(original["metric_kind"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedMetricKind); val.IsValid() && !isEmptyValue(val) { - transformed["metricKind"] = transformedMetricKind - } - - transformedLabels, err := expandLoggingMetricMetricDescriptorLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - transformedDisplayName, err := expandLoggingMetricMetricDescriptorDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - return transformed, nil -} - -func expandLoggingMetricMetricDescriptorUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorMetricKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_logging_metric_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandLoggingMetricMetricDescriptorLabelsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedDescription, err := expandLoggingMetricMetricDescriptorLabelsDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedValueType, err := expandLoggingMetricMetricDescriptorLabelsValueType(original["value_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedValueType); val.IsValid() && !isEmptyValue(val) { - transformed["valueType"] = transformedValueType - } - - req = append(req, transformed) - } - return req, nil -} - -func expandLoggingMetricMetricDescriptorLabelsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorLabelsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorLabelsValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricMetricDescriptorDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricLabelExtractors(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandLoggingMetricValueExtractor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLinearBuckets, err := expandLoggingMetricBucketOptionsLinearBuckets(original["linear_buckets"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedLinearBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["linearBuckets"] = transformedLinearBuckets - } - - transformedExponentialBuckets, err := expandLoggingMetricBucketOptionsExponentialBuckets(original["exponential_buckets"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedExponentialBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["exponentialBuckets"] = transformedExponentialBuckets - } - - transformedExplicitBuckets, err := expandLoggingMetricBucketOptionsExplicitBuckets(original["explicit_buckets"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedExplicitBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["explicitBuckets"] = transformedExplicitBuckets - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsLinearBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumFiniteBuckets, err := expandLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(original["num_finite_buckets"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedNumFiniteBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["numFiniteBuckets"] = transformedNumFiniteBuckets - } - - transformedWidth, err := expandLoggingMetricBucketOptionsLinearBucketsWidth(original["width"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedWidth); val.IsValid() && !isEmptyValue(val) { - transformed["width"] = transformedWidth - } - - transformedOffset, err := expandLoggingMetricBucketOptionsLinearBucketsOffset(original["offset"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedOffset); val.IsValid() && !isEmptyValue(val) { - transformed["offset"] = transformedOffset - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsLinearBucketsNumFiniteBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsLinearBucketsWidth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsLinearBucketsOffset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExponentialBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNumFiniteBuckets, err := expandLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(original["num_finite_buckets"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedNumFiniteBuckets); val.IsValid() && !isEmptyValue(val) { - transformed["numFiniteBuckets"] = transformedNumFiniteBuckets - } - - transformedGrowthFactor, err := expandLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(original["growth_factor"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedGrowthFactor); val.IsValid() && !isEmptyValue(val) { - transformed["growthFactor"] = transformedGrowthFactor - } - - transformedScale, err := expandLoggingMetricBucketOptionsExponentialBucketsScale(original["scale"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedScale); val.IsValid() && !isEmptyValue(val) { - transformed["scale"] = transformedScale - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsExponentialBucketsNumFiniteBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExponentialBucketsGrowthFactor(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExponentialBucketsScale(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandLoggingMetricBucketOptionsExplicitBuckets(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBounds, err := expandLoggingMetricBucketOptionsExplicitBucketsBounds(original["bounds"], d, config) - if err != nil { - return nil, err - } else if val := resource_logging_metric_reflect.ValueOf(transformedBounds); val.IsValid() && !isEmptyValue(val) { - transformed["bounds"] = transformedBounds - } - - return transformed, nil -} - -func expandLoggingMetricBucketOptionsExplicitBucketsBounds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -var loggingOrganizationBucketConfigSchema = map[string]*resource_logging_organization_bucket_config_schema.Schema{ - "organization": { - Type: resource_logging_organization_bucket_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent resource that contains the logging bucket.`, - }, -} - -func organizationBucketConfigID(d *resource_logging_organization_bucket_config_schema.ResourceData, config *Config) (string, error) { - organization := d.Get("organization").(string) - location := d.Get("location").(string) - bucketID := d.Get("bucket_id").(string) - - if !resource_logging_organization_bucket_config_strings.HasPrefix(organization, "organization") { - organization = "organizations/" + organization - } - - id := resource_logging_organization_bucket_config_fmt.Sprintf("%s/locations/%s/buckets/%s", organization, location, bucketID) - return id, nil -} - -func ResourceLoggingOrganizationBucketConfig() *resource_logging_organization_bucket_config_schema.Resource { - return ResourceLoggingBucketConfig("organization", loggingOrganizationBucketConfigSchema, organizationBucketConfigID) -} - -func resourceLoggingOrganizationSink() *resource_logging_organization_sink_schema.Resource { - schm := &resource_logging_organization_sink_schema.Resource{ - Create: resourceLoggingOrganizationSinkCreate, - Read: resourceLoggingOrganizationSinkRead, - Delete: resourceLoggingOrganizationSinkDelete, - Update: resourceLoggingOrganizationSinkUpdate, - Schema: resourceLoggingSinkSchema(), - Importer: &resource_logging_organization_sink_schema.ResourceImporter{ - State: resourceLoggingSinkImportState("org_id"), - }, - UseJSONNumber: true, - } - schm.Schema["org_id"] = &resource_logging_organization_sink_schema.Schema{ - Type: resource_logging_organization_sink_schema.TypeString, - Required: true, - Description: `The numeric ID of the organization to be exported to the sink.`, - StateFunc: func(v interface{}) string { - return resource_logging_organization_sink_strings.Replace(v.(string), "organizations/", "", 1) - }, - } - schm.Schema["include_children"] = &resource_logging_organization_sink_schema.Schema{ - Type: resource_logging_organization_sink_schema.TypeBool, - Optional: true, - ForceNew: true, - Default: false, - Description: `Whether or not to include children organizations in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization are included.`, - } - - return schm -} - -func resourceLoggingOrganizationSinkCreate(d *resource_logging_organization_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - org := d.Get("org_id").(string) - id, sink := expandResourceLoggingSink(d, "organizations", org) - sink.IncludeChildren = d.Get("include_children").(bool) - - _, err = config.NewLoggingClient(userAgent).Organizations.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - d.SetId(id.canonicalId()) - return resourceLoggingOrganizationSinkRead(d, meta) -} - -func resourceLoggingOrganizationSinkRead(d *resource_logging_organization_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sink, err := config.NewLoggingClient(userAgent).Organizations.Sinks.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_logging_organization_sink_fmt.Sprintf("Organization Logging Sink %s", d.Get("name").(string))) - } - - if err := flattenResourceLoggingSink(d, sink); err != nil { - return err - } - - if err := d.Set("include_children", sink.IncludeChildren); err != nil { - return resource_logging_organization_sink_fmt.Errorf("Error setting include_children: %s", err) - } - - return nil -} - -func resourceLoggingOrganizationSinkUpdate(d *resource_logging_organization_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sink, updateMask := expandResourceLoggingSinkForUpdate(d) - - sink.IncludeChildren = d.Get("include_children").(bool) - sink.ForceSendFields = append(sink.ForceSendFields, "IncludeChildren") - - _, err = config.NewLoggingClient(userAgent).Organizations.Sinks.Patch(d.Id(), sink). - UpdateMask(updateMask).UniqueWriterIdentity(true).Do() - if err != nil { - return err - } - - return resourceLoggingOrganizationSinkRead(d, meta) -} - -func resourceLoggingOrganizationSinkDelete(d *resource_logging_organization_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Delete(d.Id()).Do() - if err != nil { - return err - } - - return nil -} - -var loggingProjectBucketConfigSchema = map[string]*resource_logging_project_bucket_config_schema.Schema{ - "project": { - Type: resource_logging_project_bucket_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The parent project that contains the logging bucket.`, - }, -} - -func projectBucketConfigID(d *resource_logging_project_bucket_config_schema.ResourceData, config *Config) (string, error) { - project := d.Get("project").(string) - location := d.Get("location").(string) - bucketID := d.Get("bucket_id").(string) - - if !resource_logging_project_bucket_config_strings.HasPrefix(project, "project") { - project = "projects/" + project - } - - id := resource_logging_project_bucket_config_fmt.Sprintf("%s/locations/%s/buckets/%s", project, location, bucketID) - return id, nil -} - -func ResourceLoggingProjectBucketConfig() *resource_logging_project_bucket_config_schema.Resource { - return ResourceLoggingBucketConfig("project", loggingProjectBucketConfigSchema, projectBucketConfigID) -} - -const nonUniqueWriterAccount = "serviceAccount:cloud-logs@system.gserviceaccount.com" - -func resourceLoggingProjectSink() *resource_logging_project_sink_schema.Resource { - schm := &resource_logging_project_sink_schema.Resource{ - Create: resourceLoggingProjectSinkCreate, - Read: resourceLoggingProjectSinkRead, - Delete: resourceLoggingProjectSinkDelete, - Update: resourceLoggingProjectSinkUpdate, - Schema: resourceLoggingSinkSchema(), - CustomizeDiff: resourceLoggingProjectSinkCustomizeDiff, - Importer: &resource_logging_project_sink_schema.ResourceImporter{ - State: resourceLoggingSinkImportState("project"), - }, - UseJSONNumber: true, - } - schm.Schema["project"] = &resource_logging_project_sink_schema.Schema{ - Type: resource_logging_project_sink_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project to create the sink in. If omitted, the project associated with the provider is used.`, - } - schm.Schema["unique_writer_identity"] = &resource_logging_project_sink_schema.Schema{ - Type: resource_logging_project_sink_schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - Description: `Whether or not to create a unique identity associated with this sink. If false (the default), then the writer_identity used is serviceAccount:cloud-logs@system.gserviceaccount.com. If true, then a unique service account is created and used for this sink. If you wish to publish logs across projects, you must set unique_writer_identity to true.`, - } - return schm -} - -func resourceLoggingProjectSinkCreate(d *resource_logging_project_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - id, sink := expandResourceLoggingSink(d, "projects", project) - uniqueWriterIdentity := d.Get("unique_writer_identity").(bool) - - _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Create(id.parent(), sink).UniqueWriterIdentity(uniqueWriterIdentity).Do() - if err != nil { - return err - } - - d.SetId(id.canonicalId()) - - return resourceLoggingProjectSinkRead(d, meta) -} - -func resourceLoggingProjectSinkCustomizeDiff(ctx resource_logging_project_sink_context.Context, d *resource_logging_project_sink_schema.ResourceDiff, meta interface{}) error { - - return resourceLoggingProjectSinkCustomizeDiffFunc(d) -} - -func resourceLoggingProjectSinkCustomizeDiffFunc(diff TerraformResourceDiff) error { - if !diff.HasChange("bigquery_options.#") { - return nil - } - - bigqueryOptions := diff.Get("bigquery_options.#").(int) - if bigqueryOptions > 0 { - uwi := diff.Get("unique_writer_identity") - if !uwi.(bool) { - return resource_logging_project_sink_errors.New("unique_writer_identity must be true when bigquery_options is supplied") - } - } - return nil -} - -func resourceLoggingProjectSinkRead(d *resource_logging_project_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - sink, err := config.NewLoggingClient(userAgent).Projects.Sinks.Get(d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, resource_logging_project_sink_fmt.Sprintf("Project Logging Sink %s", d.Get("name").(string))) - } - - if err := d.Set("project", project); err != nil { - return resource_logging_project_sink_fmt.Errorf("Error setting project: %s", err) - } - - if err := flattenResourceLoggingSink(d, sink); err != nil { - return err - } - - if sink.WriterIdentity != nonUniqueWriterAccount { - if err := d.Set("unique_writer_identity", true); err != nil { - return resource_logging_project_sink_fmt.Errorf("Error setting unique_writer_identity: %s", err) - } - } else { - if err := d.Set("unique_writer_identity", false); err != nil { - return resource_logging_project_sink_fmt.Errorf("Error setting unique_writer_identity: %s", err) - } - } - return nil -} - -func resourceLoggingProjectSinkUpdate(d *resource_logging_project_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sink, updateMask := expandResourceLoggingSinkForUpdate(d) - uniqueWriterIdentity := d.Get("unique_writer_identity").(bool) - - _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Patch(d.Id(), sink). - UpdateMask(updateMask).UniqueWriterIdentity(uniqueWriterIdentity).Do() - if err != nil { - return err - } - - return resourceLoggingProjectSinkRead(d, meta) -} - -func resourceLoggingProjectSinkDelete(d *resource_logging_project_sink_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - _, err = config.NewLoggingClient(userAgent).Projects.Sinks.Delete(d.Id()).Do() - if err != nil { - return err - } - - d.SetId("") - return nil -} - -func resourceLoggingSinkSchema() map[string]*resource_logging_sink_schema.Schema { - return map[string]*resource_logging_sink_schema.Schema{ - "name": { - Type: resource_logging_sink_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the logging sink.`, - }, - - "destination": { - Type: resource_logging_sink_schema.TypeString, - Required: true, - Description: `The destination of the sink (or, in other words, where logs are written to). Can be a Cloud Storage bucket, a PubSub topic, or a BigQuery dataset. Examples: "storage.googleapis.com/[GCS_BUCKET]" "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" The writer associated with the sink must have access to write to the above resource.`, - }, - - "filter": { - Type: resource_logging_sink_schema.TypeString, - Optional: true, - DiffSuppressFunc: optionalSurroundingSpacesSuppress, - Description: `The filter to apply when exporting logs. Only log entries that match the filter are exported.`, - }, - - "description": { - Type: resource_logging_sink_schema.TypeString, - Optional: true, - Description: `A description of this sink. The maximum length of the description is 8000 characters.`, - }, - - "disabled": { - Type: resource_logging_sink_schema.TypeBool, - Optional: true, - Description: `If set to True, then this sink is disabled and it does not export any log entries.`, - }, - - "exclusions": { - Type: resource_logging_sink_schema.TypeList, - Optional: true, - Description: `Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both filter and one of exclusion_filters it will not be exported.`, - Elem: &resource_logging_sink_schema.Resource{ - Schema: map[string]*resource_logging_sink_schema.Schema{ - "name": { - Type: resource_logging_sink_schema.TypeString, - Required: true, - Description: `A client-assigned identifier, such as "load-balancer-exclusion". Identifiers are limited to 100 characters and can include only letters, digits, underscores, hyphens, and periods. First character has to be alphanumeric.`, - }, - "description": { - Type: resource_logging_sink_schema.TypeString, - Optional: true, - Description: `A description of this exclusion.`, - }, - "filter": { - Type: resource_logging_sink_schema.TypeString, - Required: true, - Description: `An advanced logs filter that matches the log entries to be excluded. By using the sample function, you can exclude less than 100% of the matching log entries`, - }, - "disabled": { - Type: resource_logging_sink_schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to True, then this exclusion is disabled and it does not exclude any log entries`, - }, - }, - }, - }, - - "writer_identity": { - Type: resource_logging_sink_schema.TypeString, - Computed: true, - Description: `The identity associated with this sink. This identity must be granted write access to the configured destination.`, - }, - - "bigquery_options": { - Type: resource_logging_sink_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Description: `Options that affect sinks exporting data to BigQuery.`, - Elem: &resource_logging_sink_schema.Resource{ - Schema: map[string]*resource_logging_sink_schema.Schema{ - "use_partitioned_tables": { - Type: resource_logging_sink_schema.TypeBool, - Required: true, - Description: `Whether to use BigQuery's partition tables. By default, Logging creates dated tables based on the log entries' timestamps, e.g. syslog_20170523. With partitioned tables the date suffix is no longer present and special query syntax has to be used instead. In both cases, tables are sharded based on UTC timezone.`, - }, - }, - }, - }, - } -} - -func expandResourceLoggingSink(d *resource_logging_sink_schema.ResourceData, resourceType, resourceId string) (LoggingSinkId, *resource_logging_sink_logging.LogSink) { - id := LoggingSinkId{ - resourceType: resourceType, - resourceId: resourceId, - name: d.Get("name").(string), - } - - sink := resource_logging_sink_logging.LogSink{ - Name: d.Get("name").(string), - Destination: d.Get("destination").(string), - Filter: d.Get("filter").(string), - Description: d.Get("description").(string), - Disabled: d.Get("disabled").(bool), - Exclusions: expandLoggingSinkExclusions(d.Get("exclusions")), - BigqueryOptions: expandLoggingSinkBigqueryOptions(d.Get("bigquery_options")), - } - return id, &sink -} - -func flattenResourceLoggingSink(d *resource_logging_sink_schema.ResourceData, sink *resource_logging_sink_logging.LogSink) error { - if err := d.Set("name", sink.Name); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("destination", sink.Destination); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting destination: %s", err) - } - if err := d.Set("filter", sink.Filter); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting filter: %s", err) - } - if err := d.Set("description", sink.Description); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("disabled", sink.Disabled); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting disabled: %s", err) - } - if err := d.Set("writer_identity", sink.WriterIdentity); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting writer_identity: %s", err) - } - if err := d.Set("exclusions", flattenLoggingSinkExclusion(sink.Exclusions)); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting exclusions: %s", err) - } - if err := d.Set("bigquery_options", flattenLoggingSinkBigqueryOptions(sink.BigqueryOptions)); err != nil { - return resource_logging_sink_fmt.Errorf("Error setting bigquery_options: %s", err) - } - - return nil -} - -func expandResourceLoggingSinkForUpdate(d *resource_logging_sink_schema.ResourceData) (sink *resource_logging_sink_logging.LogSink, updateMask string) { - - sink = &resource_logging_sink_logging.LogSink{ - Destination: d.Get("destination").(string), - Filter: d.Get("filter").(string), - Disabled: d.Get("disabled").(bool), - Description: d.Get("description").(string), - ForceSendFields: []string{"Destination", "Filter", "Disabled"}, - } - - updateFields := []string{} - if d.HasChange("destination") { - updateFields = append(updateFields, "destination") - } - if d.HasChange("filter") { - updateFields = append(updateFields, "filter") - } - if d.HasChange("description") { - updateFields = append(updateFields, "description") - } - if d.HasChange("disabled") { - updateFields = append(updateFields, "disabled") - } - if d.HasChange("exclusions") { - sink.Exclusions = expandLoggingSinkExclusions(d.Get("exclusions")) - updateFields = append(updateFields, "exclusions") - } - if d.HasChange("bigquery_options") { - sink.BigqueryOptions = expandLoggingSinkBigqueryOptions(d.Get("bigquery_options")) - updateFields = append(updateFields, "bigqueryOptions") - } - updateMask = resource_logging_sink_strings.Join(updateFields, ",") - return -} - -func expandLoggingSinkBigqueryOptions(v interface{}) *resource_logging_sink_logging.BigQueryOptions { - if v == nil { - return nil - } - optionsSlice := v.([]interface{}) - if len(optionsSlice) == 0 || optionsSlice[0] == nil { - return nil - } - options := optionsSlice[0].(map[string]interface{}) - bo := &resource_logging_sink_logging.BigQueryOptions{} - if usePartitionedTables, ok := options["use_partitioned_tables"]; ok { - bo.UsePartitionedTables = usePartitionedTables.(bool) - } - return bo -} - -func flattenLoggingSinkBigqueryOptions(o *resource_logging_sink_logging.BigQueryOptions) []map[string]interface{} { - if o == nil { - return nil - } - oMap := map[string]interface{}{ - "use_partitioned_tables": o.UsePartitionedTables, - } - return []map[string]interface{}{oMap} -} - -func expandLoggingSinkExclusions(v interface{}) []*resource_logging_sink_logging.LogExclusion { - if v == nil { - return nil - } - exclusions := v.([]interface{}) - if len(exclusions) == 0 { - return nil - } - results := make([]*resource_logging_sink_logging.LogExclusion, 0, len(exclusions)) - for _, e := range exclusions { - exclusion := e.(map[string]interface{}) - results = append(results, &resource_logging_sink_logging.LogExclusion{ - Name: exclusion["name"].(string), - Description: exclusion["description"].(string), - Filter: exclusion["filter"].(string), - Disabled: exclusion["disabled"].(bool), - }) - } - return results -} - -func flattenLoggingSinkExclusion(exclusions []*resource_logging_sink_logging.LogExclusion) []map[string]interface{} { - if exclusions == nil { - return nil - } - flattenedExclusions := make([]map[string]interface{}, 0, len(exclusions)) - for _, e := range exclusions { - flattenedExclusion := map[string]interface{}{ - "name": e.Name, - "description": e.Description, - "filter": e.Filter, - "disabled": e.Disabled, - } - flattenedExclusions = append(flattenedExclusions, flattenedExclusion) - - } - - return flattenedExclusions -} - -func resourceLoggingSinkImportState(sinkType string) resource_logging_sink_schema.StateFunc { - return func(d *resource_logging_sink_schema.ResourceData, meta interface{}) ([]*resource_logging_sink_schema.ResourceData, error) { - loggingSinkId, err := parseLoggingSinkId(d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set(sinkType, loggingSinkId.resourceId); err != nil { - return nil, resource_logging_sink_fmt.Errorf("Error setting sinkType: %s", err) - } - - return []*resource_logging_sink_schema.ResourceData{d}, nil - } -} - -type ResourceManagerOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *ResourceManagerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, resource_manager_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := resource_manager_operation_fmt.Sprintf("https://cloudresourcemanager.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createResourceManagerWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*ResourceManagerOperationWaiter, error) { - w := &ResourceManagerOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func resourceManagerOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout resource_manager_operation_time.Duration) error { - w, err := createResourceManagerWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return resource_manager_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func resourceManagerOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout resource_manager_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createResourceManagerWaiter(config, op, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func resourceMemcacheInstance() *resource_memcache_instance_schema.Resource { - return &resource_memcache_instance_schema.Resource{ - Create: resourceMemcacheInstanceCreate, - Read: resourceMemcacheInstanceRead, - Update: resourceMemcacheInstanceUpdate, - Delete: resourceMemcacheInstanceDelete, - - Importer: &resource_memcache_instance_schema.ResourceImporter{ - State: resourceMemcacheInstanceImport, - }, - - Timeouts: &resource_memcache_instance_schema.ResourceTimeout{ - Create: resource_memcache_instance_schema.DefaultTimeout(20 * resource_memcache_instance_time.Minute), - Update: resource_memcache_instance_schema.DefaultTimeout(20 * resource_memcache_instance_time.Minute), - Delete: resource_memcache_instance_schema.DefaultTimeout(20 * resource_memcache_instance_time.Minute), - }, - - Schema: map[string]*resource_memcache_instance_schema.Schema{ - "name": { - Type: resource_memcache_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The resource name of the instance.`, - }, - "node_config": { - Type: resource_memcache_instance_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Configuration for memcache nodes.`, - MaxItems: 1, - Elem: &resource_memcache_instance_schema.Resource{ - Schema: map[string]*resource_memcache_instance_schema.Schema{ - "cpu_count": { - Type: resource_memcache_instance_schema.TypeInt, - Required: true, - Description: `Number of CPUs per node.`, - }, - "memory_size_mb": { - Type: resource_memcache_instance_schema.TypeInt, - Required: true, - Description: `Memory size in Mebibytes for each memcache node.`, - }, - }, - }, - }, - "node_count": { - Type: resource_memcache_instance_schema.TypeInt, - Required: true, - Description: `Number of nodes in the memcache instance.`, - }, - "authorized_network": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The full name of the GCE network to connect the instance to. If not provided, -'default' will be used.`, - }, - "display_name": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Optional: true, - Description: `A user-visible name for the instance.`, - }, - "labels": { - Type: resource_memcache_instance_schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user-provided metadata.`, - Elem: &resource_memcache_instance_schema.Schema{Type: resource_memcache_instance_schema.TypeString}, - }, - "memcache_parameters": { - Type: resource_memcache_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `User-specified parameters for this memcache instance.`, - MaxItems: 1, - Elem: &resource_memcache_instance_schema.Resource{ - Schema: map[string]*resource_memcache_instance_schema.Schema{ - "params": { - Type: resource_memcache_instance_schema.TypeMap, - Optional: true, - Description: `User-defined set of parameters to use in the memcache process.`, - Elem: &resource_memcache_instance_schema.Schema{Type: resource_memcache_instance_schema.TypeString}, - }, - "id": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `This is a unique ID associated with this set of parameters.`, - }, - }, - }, - }, - "memcache_version": { - Type: resource_memcache_instance_schema.TypeString, - Optional: true, - ValidateFunc: resource_memcache_instance_validation.StringInSlice([]string{"MEMCACHE_1_5", ""}, false), - Description: `The major version of Memcached software. If not provided, latest supported version will be used. -Currently the latest supported major version is MEMCACHE_1_5. The minor version will be automatically -determined by our system based on the latest supported minor version. Default value: "MEMCACHE_1_5" Possible values: ["MEMCACHE_1_5"]`, - Default: "MEMCACHE_1_5", - }, - "region": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region of the Memcache instance. If it is not provided, the provider region is used.`, - }, - "zones": { - Type: resource_memcache_instance_schema.TypeSet, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Zones where memcache nodes should be provisioned. If not -provided, all zones will be used.`, - Elem: &resource_memcache_instance_schema.Schema{ - Type: resource_memcache_instance_schema.TypeString, - }, - Set: resource_memcache_instance_schema.HashString, - }, - "create_time": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `Creation timestamp in RFC3339 text format.`, - }, - "discovery_endpoint": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `Endpoint for Discovery API`, - }, - "memcache_full_version": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `The full version of memcached server running on this instance.`, - }, - "memcache_nodes": { - Type: resource_memcache_instance_schema.TypeList, - Computed: true, - Description: `Additional information about the instance state, if available.`, - Elem: &resource_memcache_instance_schema.Resource{ - Schema: map[string]*resource_memcache_instance_schema.Schema{ - "host": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `Hostname or IP address of the Memcached node used by the clients to connect to the Memcached server on this node.`, - }, - "node_id": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `Identifier of the Memcached node. The node id does not include project or location like the Memcached instance name.`, - }, - "port": { - Type: resource_memcache_instance_schema.TypeInt, - Computed: true, - Description: `The port number of the Memcached server on this node.`, - }, - "state": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `Current state of the Memcached node.`, - }, - "zone": { - Type: resource_memcache_instance_schema.TypeString, - Computed: true, - Description: `Location (GCP Zone) for the Memcached node.`, - }, - }, - }, - }, - "project": { - Type: resource_memcache_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMemcacheInstanceCreate(d *resource_memcache_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMemcacheInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(displayNameProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandMemcacheInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(labelsProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - zonesProp, err := expandMemcacheInstanceZones(d.Get("zones"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("zones"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(zonesProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, zonesProp)) { - obj["zones"] = zonesProp - } - authorizedNetworkProp, err := expandMemcacheInstanceAuthorizedNetwork(d.Get("authorized_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(authorizedNetworkProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, authorizedNetworkProp)) { - obj["authorizedNetwork"] = authorizedNetworkProp - } - nodeCountProp, err := expandMemcacheInstanceNodeCount(d.Get("node_count"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_count"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(nodeCountProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, nodeCountProp)) { - obj["nodeCount"] = nodeCountProp - } - memcacheVersionProp, err := expandMemcacheInstanceMemcacheVersion(d.Get("memcache_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("memcache_version"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(memcacheVersionProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, memcacheVersionProp)) { - obj["memcacheVersion"] = memcacheVersionProp - } - nodeConfigProp, err := expandMemcacheInstanceNodeConfig(d.Get("node_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_config"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(nodeConfigProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, nodeConfigProp)) { - obj["nodeConfig"] = nodeConfigProp - } - parametersProp, err := expandMemcacheInstanceMemcacheParameters(d.Get("memcache_parameters"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("memcache_parameters"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(parametersProp)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, parametersProp)) { - obj["parameters"] = parametersProp - } - - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") - if err != nil { - return err - } - - resource_memcache_instance_log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_memcache_instance_schema.TimeoutCreate)) - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error creating Instance: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = memcacheOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(resource_memcache_instance_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_memcache_instance_fmt.Errorf("Error waiting to create Instance: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_memcache_instance_log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceMemcacheInstanceRead(d, meta) -} - -func resourceMemcacheInstanceRead(d *resource_memcache_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_memcache_instance_fmt.Sprintf("MemcacheInstance %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("display_name", flattenMemcacheInstanceDisplayName(res["displayName"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("memcache_nodes", flattenMemcacheInstanceMemcacheNodes(res["memcacheNodes"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenMemcacheInstanceCreateTime(res["createTime"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("discovery_endpoint", flattenMemcacheInstanceDiscoveryEndpoint(res["discoveryEndpoint"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenMemcacheInstanceLabels(res["labels"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("memcache_full_version", flattenMemcacheInstanceMemcacheFullVersion(res["memcacheFullVersion"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("zones", flattenMemcacheInstanceZones(res["zones"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("authorized_network", flattenMemcacheInstanceAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("node_count", flattenMemcacheInstanceNodeCount(res["nodeCount"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("memcache_version", flattenMemcacheInstanceMemcacheVersion(res["memcacheVersion"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("node_config", flattenMemcacheInstanceNodeConfig(res["nodeConfig"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("memcache_parameters", flattenMemcacheInstanceMemcacheParameters(res["parameters"], d, config)); err != nil { - return resource_memcache_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceMemcacheInstanceUpdate(d *resource_memcache_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMemcacheInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(v)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandMemcacheInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(v)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - nodeCountProp, err := expandMemcacheInstanceNodeCount(d.Get("node_count"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("node_count"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(v)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, nodeCountProp)) { - obj["nodeCount"] = nodeCountProp - } - memcacheVersionProp, err := expandMemcacheInstanceMemcacheVersion(d.Get("memcache_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("memcache_version"); !isEmptyValue(resource_memcache_instance_reflect.ValueOf(v)) && (ok || !resource_memcache_instance_reflect.DeepEqual(v, memcacheVersionProp)) { - obj["memcacheVersion"] = memcacheVersionProp - } - - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - resource_memcache_instance_log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("node_count") { - updateMask = append(updateMask, "nodeCount") - } - - if d.HasChange("memcache_version") { - updateMask = append(updateMask, "memcacheVersion") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_memcache_instance_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_memcache_instance_schema.TimeoutUpdate)) - - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - resource_memcache_instance_log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = memcacheOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(resource_memcache_instance_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceMemcacheInstanceRead(d, meta) -} - -func resourceMemcacheInstanceDelete(d *resource_memcache_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_memcache_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MemcacheBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_memcache_instance_log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_memcache_instance_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = memcacheOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(resource_memcache_instance_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_memcache_instance_log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceMemcacheInstanceImport(d *resource_memcache_instance_schema.ResourceData, meta interface{}) ([]*resource_memcache_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return nil, resource_memcache_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_memcache_instance_schema.ResourceData{d}, nil -} - -func flattenMemcacheInstanceDisplayName(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceMemcacheNodes(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "node_id": flattenMemcacheInstanceMemcacheNodesNodeId(original["nodeId"], d, config), - "zone": flattenMemcacheInstanceMemcacheNodesZone(original["zone"], d, config), - "port": flattenMemcacheInstanceMemcacheNodesPort(original["port"], d, config), - "host": flattenMemcacheInstanceMemcacheNodesHost(original["host"], d, config), - "state": flattenMemcacheInstanceMemcacheNodesState(original["state"], d, config), - }) - } - return transformed -} - -func flattenMemcacheInstanceMemcacheNodesNodeId(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceMemcacheNodesZone(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceMemcacheNodesPort(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_memcache_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMemcacheInstanceMemcacheNodesHost(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceMemcacheNodesState(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceCreateTime(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceDiscoveryEndpoint(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceLabels(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceMemcacheFullVersion(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceZones(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_memcache_instance_schema.NewSet(resource_memcache_instance_schema.HashString, v.([]interface{})) -} - -func flattenMemcacheInstanceAuthorizedNetwork(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceNodeCount(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_memcache_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMemcacheInstanceMemcacheVersion(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceNodeConfig(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cpu_count"] = - flattenMemcacheInstanceNodeConfigCpuCount(original["cpuCount"], d, config) - transformed["memory_size_mb"] = - flattenMemcacheInstanceNodeConfigMemorySizeMb(original["memorySizeMb"], d, config) - return []interface{}{transformed} -} - -func flattenMemcacheInstanceNodeConfigCpuCount(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_memcache_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMemcacheInstanceNodeConfigMemorySizeMb(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_memcache_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMemcacheInstanceMemcacheParameters(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["id"] = - flattenMemcacheInstanceMemcacheParametersId(original["id"], d, config) - transformed["params"] = - flattenMemcacheInstanceMemcacheParametersParams(original["params"], d, config) - return []interface{}{transformed} -} - -func flattenMemcacheInstanceMemcacheParametersId(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMemcacheInstanceMemcacheParametersParams(v interface{}, d *resource_memcache_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMemcacheInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMemcacheInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMemcacheInstanceZones(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_memcache_instance_schema.Set).List() - return v, nil -} - -func expandMemcacheInstanceAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMemcacheInstanceNodeCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMemcacheInstanceMemcacheVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMemcacheInstanceNodeConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCpuCount, err := expandMemcacheInstanceNodeConfigCpuCount(original["cpu_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_memcache_instance_reflect.ValueOf(transformedCpuCount); val.IsValid() && !isEmptyValue(val) { - transformed["cpuCount"] = transformedCpuCount - } - - transformedMemorySizeMb, err := expandMemcacheInstanceNodeConfigMemorySizeMb(original["memory_size_mb"], d, config) - if err != nil { - return nil, err - } else if val := resource_memcache_instance_reflect.ValueOf(transformedMemorySizeMb); val.IsValid() && !isEmptyValue(val) { - transformed["memorySizeMb"] = transformedMemorySizeMb - } - - return transformed, nil -} - -func expandMemcacheInstanceNodeConfigCpuCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMemcacheInstanceNodeConfigMemorySizeMb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMemcacheInstanceMemcacheParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandMemcacheInstanceMemcacheParametersId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_memcache_instance_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedParams, err := expandMemcacheInstanceMemcacheParametersParams(original["params"], d, config) - if err != nil { - return nil, err - } else if val := resource_memcache_instance_reflect.ValueOf(transformedParams); val.IsValid() && !isEmptyValue(val) { - transformed["params"] = transformedParams - } - - return transformed, nil -} - -func expandMemcacheInstanceMemcacheParametersId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMemcacheInstanceMemcacheParametersParams(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceMLEngineModel() *resource_ml_engine_model_schema.Resource { - return &resource_ml_engine_model_schema.Resource{ - Create: resourceMLEngineModelCreate, - Read: resourceMLEngineModelRead, - Delete: resourceMLEngineModelDelete, - - Importer: &resource_ml_engine_model_schema.ResourceImporter{ - State: resourceMLEngineModelImport, - }, - - Timeouts: &resource_ml_engine_model_schema.ResourceTimeout{ - Create: resource_ml_engine_model_schema.DefaultTimeout(4 * resource_ml_engine_model_time.Minute), - Delete: resource_ml_engine_model_schema.DefaultTimeout(4 * resource_ml_engine_model_time.Minute), - }, - - Schema: map[string]*resource_ml_engine_model_schema.Schema{ - "name": { - Type: resource_ml_engine_model_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the model.`, - }, - "default_version": { - Type: resource_ml_engine_model_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The default version of the model. This version will be used to handle -prediction requests that do not specify a version.`, - MaxItems: 1, - Elem: &resource_ml_engine_model_schema.Resource{ - Schema: map[string]*resource_ml_engine_model_schema.Schema{ - "name": { - Type: resource_ml_engine_model_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the version when it was created.`, - }, - }, - }, - }, - "description": { - Type: resource_ml_engine_model_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The description specified for the model when it was created.`, - }, - "labels": { - Type: resource_ml_engine_model_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `One or more labels that you can add, to organize your models.`, - Elem: &resource_ml_engine_model_schema.Schema{Type: resource_ml_engine_model_schema.TypeString}, - }, - "online_prediction_console_logging": { - Type: resource_ml_engine_model_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, online prediction nodes send stderr and stdout streams to Stackdriver Logging`, - }, - "online_prediction_logging": { - Type: resource_ml_engine_model_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, online prediction access logs are sent to StackDriver Logging.`, - }, - "regions": { - Type: resource_ml_engine_model_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The list of regions where the model is going to be deployed. -Currently only one region per model is supported`, - MaxItems: 1, - Elem: &resource_ml_engine_model_schema.Schema{ - Type: resource_ml_engine_model_schema.TypeString, - }, - }, - "project": { - Type: resource_ml_engine_model_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMLEngineModelCreate(d *resource_ml_engine_model_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandMLEngineModelName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_ml_engine_model_reflect.ValueOf(nameProp)) && (ok || !resource_ml_engine_model_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandMLEngineModelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_ml_engine_model_reflect.ValueOf(descriptionProp)) && (ok || !resource_ml_engine_model_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - defaultVersionProp, err := expandMLEngineModelDefaultVersion(d.Get("default_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("default_version"); !isEmptyValue(resource_ml_engine_model_reflect.ValueOf(defaultVersionProp)) && (ok || !resource_ml_engine_model_reflect.DeepEqual(v, defaultVersionProp)) { - obj["defaultVersion"] = defaultVersionProp - } - regionsProp, err := expandMLEngineModelRegions(d.Get("regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("regions"); !isEmptyValue(resource_ml_engine_model_reflect.ValueOf(regionsProp)) && (ok || !resource_ml_engine_model_reflect.DeepEqual(v, regionsProp)) { - obj["regions"] = regionsProp - } - onlinePredictionLoggingProp, err := expandMLEngineModelOnlinePredictionLogging(d.Get("online_prediction_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("online_prediction_logging"); !isEmptyValue(resource_ml_engine_model_reflect.ValueOf(onlinePredictionLoggingProp)) && (ok || !resource_ml_engine_model_reflect.DeepEqual(v, onlinePredictionLoggingProp)) { - obj["onlinePredictionLogging"] = onlinePredictionLoggingProp - } - onlinePredictionConsoleLoggingProp, err := expandMLEngineModelOnlinePredictionConsoleLogging(d.Get("online_prediction_console_logging"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("online_prediction_console_logging"); !isEmptyValue(resource_ml_engine_model_reflect.ValueOf(onlinePredictionConsoleLoggingProp)) && (ok || !resource_ml_engine_model_reflect.DeepEqual(v, onlinePredictionConsoleLoggingProp)) { - obj["onlinePredictionConsoleLogging"] = onlinePredictionConsoleLoggingProp - } - labelsProp, err := expandMLEngineModelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_ml_engine_model_reflect.ValueOf(labelsProp)) && (ok || !resource_ml_engine_model_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models") - if err != nil { - return err - } - - resource_ml_engine_model_log.Printf("[DEBUG] Creating new Model: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_ml_engine_model_fmt.Errorf("Error fetching project for Model: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_ml_engine_model_schema.TimeoutCreate)) - if err != nil { - return resource_ml_engine_model_fmt.Errorf("Error creating Model: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/models/{{name}}") - if err != nil { - return resource_ml_engine_model_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_ml_engine_model_log.Printf("[DEBUG] Finished creating Model %q: %#v", d.Id(), res) - - return resourceMLEngineModelRead(d, meta) -} - -func resourceMLEngineModelRead(d *resource_ml_engine_model_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_ml_engine_model_fmt.Errorf("Error fetching project for Model: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_ml_engine_model_fmt.Sprintf("MLEngineModel %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - - if err := d.Set("name", flattenMLEngineModelName(res["name"], d, config)); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("description", flattenMLEngineModelDescription(res["description"], d, config)); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("default_version", flattenMLEngineModelDefaultVersion(res["defaultVersion"], d, config)); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("regions", flattenMLEngineModelRegions(res["regions"], d, config)); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("online_prediction_logging", flattenMLEngineModelOnlinePredictionLogging(res["onlinePredictionLogging"], d, config)); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("online_prediction_console_logging", flattenMLEngineModelOnlinePredictionConsoleLogging(res["onlinePredictionConsoleLogging"], d, config)); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - if err := d.Set("labels", flattenMLEngineModelLabels(res["labels"], d, config)); err != nil { - return resource_ml_engine_model_fmt.Errorf("Error reading Model: %s", err) - } - - return nil -} - -func resourceMLEngineModelDelete(d *resource_ml_engine_model_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_ml_engine_model_fmt.Errorf("Error fetching project for Model: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MLEngineBasePath}}projects/{{project}}/models/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_ml_engine_model_log.Printf("[DEBUG] Deleting Model %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_ml_engine_model_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Model") - } - - err = mLEngineOperationWaitTime( - config, res, project, "Deleting Model", userAgent, - d.Timeout(resource_ml_engine_model_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_ml_engine_model_log.Printf("[DEBUG] Finished deleting Model %q: %#v", d.Id(), res) - return nil -} - -func resourceMLEngineModelImport(d *resource_ml_engine_model_schema.ResourceData, meta interface{}) ([]*resource_ml_engine_model_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/models/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/models/{{name}}") - if err != nil { - return nil, resource_ml_engine_model_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_ml_engine_model_schema.ResourceData{d}, nil -} - -func flattenMLEngineModelName(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenMLEngineModelDescription(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelDefaultVersion(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["name"] = - flattenMLEngineModelDefaultVersionName(original["name"], d, config) - return []interface{}{transformed} -} - -func flattenMLEngineModelDefaultVersionName(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelRegions(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelOnlinePredictionLogging(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelOnlinePredictionConsoleLogging(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMLEngineModelLabels(v interface{}, d *resource_ml_engine_model_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMLEngineModelName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelDefaultVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandMLEngineModelDefaultVersionName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_ml_engine_model_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - return transformed, nil -} - -func expandMLEngineModelDefaultVersionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelOnlinePredictionLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelOnlinePredictionConsoleLogging(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMLEngineModelLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceMonitoringAlertPolicy() *resource_monitoring_alert_policy_schema.Resource { - return &resource_monitoring_alert_policy_schema.Resource{ - Create: resourceMonitoringAlertPolicyCreate, - Read: resourceMonitoringAlertPolicyRead, - Update: resourceMonitoringAlertPolicyUpdate, - Delete: resourceMonitoringAlertPolicyDelete, - - Importer: &resource_monitoring_alert_policy_schema.ResourceImporter{ - State: resourceMonitoringAlertPolicyImport, - }, - - Timeouts: &resource_monitoring_alert_policy_schema.ResourceTimeout{ - Create: resource_monitoring_alert_policy_schema.DefaultTimeout(4 * resource_monitoring_alert_policy_time.Minute), - Update: resource_monitoring_alert_policy_schema.DefaultTimeout(4 * resource_monitoring_alert_policy_time.Minute), - Delete: resource_monitoring_alert_policy_schema.DefaultTimeout(4 * resource_monitoring_alert_policy_time.Minute), - }, - - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "combiner": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"AND", "OR", "AND_WITH_MATCHING_RESOURCE"}, false), - Description: `How to combine the results of multiple conditions to -determine if an incident should be opened. Possible values: ["AND", "OR", "AND_WITH_MATCHING_RESOURCE"]`, - }, - "conditions": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Required: true, - Description: `A list of conditions for the policy. The conditions are combined by -AND or OR according to the combiner field. If the combined conditions -evaluate to true, then an incident is created. A policy can have from -one to six conditions.`, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "display_name": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - Description: `A short name or phrase used to identify the -condition in dashboards, notifications, and -incidents. To avoid confusion, don't use the same -display name for multiple conditions in the same -policy.`, - }, - "condition_absent": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `A condition that checks that a time series -continues to receive new data points.`, - MaxItems: 1, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "duration": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - Description: `The amount of time that a time series must -fail to report new data to be considered -failing. Currently, only values that are a -multiple of a minute--e.g. 60s, 120s, or 300s ---are supported.`, - }, - "aggregations": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `Specifies the alignment of data points in -individual time series as well as how to -combine the retrieved time series together -(such as when aggregating multiple streams -on each resource to a single stream for each -resource or when aggregating streams across -all members of a group of resources). -Multiple aggregations are applied in the -order specified.`, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "alignment_period": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `The alignment period for per-time -series alignment. If present, -alignmentPeriod must be at least -60 seconds. After per-time series -alignment, each time series will -contain data points only on the -period boundaries. If -perSeriesAligner is not specified -or equals ALIGN_NONE, then this -field is ignored. If -perSeriesAligner is specified and -does not equal ALIGN_NONE, then -this field must be defined; -otherwise an error is returned.`, - }, - "cross_series_reducer": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}, false), - Description: `The approach to be used to combine -time series. Not all reducer -functions may be applied to all -time series, depending on the -metric type and the value type of -the original time series. -Reduction may change the metric -type of value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, - }, - "group_by_fields": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `The set of fields to preserve when -crossSeriesReducer is specified. -The groupByFields determine how -the time series are partitioned -into subsets prior to applying the -aggregation function. Each subset -contains time series that have the -same value for each of the -grouping fields. Each individual -time series is a member of exactly -one subset. The crossSeriesReducer -is applied to each subset of time -series. It is not possible to -reduce across different resource -types, so this field implicitly -contains resource.type. Fields not -specified in groupByFields are -aggregated away. If groupByFields -is not specified and all the time -series have the same resource -type, then the time series are -aggregated into a single output -time series. If crossSeriesReducer -is not defined, this field is -ignored.`, - Elem: &resource_monitoring_alert_policy_schema.Schema{ - Type: resource_monitoring_alert_policy_schema.TypeString, - }, - }, - "per_series_aligner": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}, false), - Description: `The approach to be used to align -individual time series. Not all -alignment functions may be applied -to all time series, depending on -the metric type and value type of -the original time series. -Alignment may change the metric -type or the value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, - }, - }, - }, - }, - "filter": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `A filter that identifies which time series -should be compared with the threshold.The -filter is similar to the one that is -specified in the -MetricService.ListTimeSeries request (that -call is useful to verify the time series -that will be retrieved / processed) and must -specify the metric type and optionally may -contain restrictions on resource type, -resource labels, and metric labels. This -field may not exceed 2048 Unicode characters -in length.`, - }, - "trigger": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `The number/percent of time series for which -the comparison must hold in order for the -condition to trigger. If unspecified, then -the condition will trigger if the comparison -is true for any of the time series that have -been identified by filter and aggregations.`, - MaxItems: 1, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "count": { - Type: resource_monitoring_alert_policy_schema.TypeInt, - Optional: true, - Description: `The absolute number of time series -that must fail the predicate for the -condition to be triggered.`, - }, - "percent": { - Type: resource_monitoring_alert_policy_schema.TypeFloat, - Optional: true, - Description: `The percentage of time series that -must fail the predicate for the -condition to be triggered.`, - }, - }, - }, - }, - }, - }, - }, - "condition_monitoring_query_language": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `A Monitoring Query Language query that outputs a boolean stream`, - MaxItems: 1, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "duration": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - Description: `The amount of time that a time series must -violate the threshold to be considered -failing. Currently, only values that are a -multiple of a minute--e.g., 0, 60, 120, or -300 seconds--are supported. If an invalid -value is given, an error will be returned. -When choosing a duration, it is useful to -keep in mind the frequency of the underlying -time series data (which may also be affected -by any alignments specified in the -aggregations field); a good duration is long -enough so that a single outlier does not -generate spurious alerts, but short enough -that unhealthy states are detected and -alerted on quickly.`, - }, - "query": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - Description: `Monitoring Query Language query that outputs a boolean stream.`, - }, - "trigger": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `The number/percent of time series for which -the comparison must hold in order for the -condition to trigger. If unspecified, then -the condition will trigger if the comparison -is true for any of the time series that have -been identified by filter and aggregations, -or by the ratio, if denominator_filter and -denominator_aggregations are specified.`, - MaxItems: 1, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "count": { - Type: resource_monitoring_alert_policy_schema.TypeInt, - Optional: true, - Description: `The absolute number of time series -that must fail the predicate for the -condition to be triggered.`, - }, - "percent": { - Type: resource_monitoring_alert_policy_schema.TypeFloat, - Optional: true, - Description: `The percentage of time series that -must fail the predicate for the -condition to be triggered.`, - }, - }, - }, - }, - }, - }, - }, - "condition_threshold": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `A condition that compares a time series against a -threshold.`, - MaxItems: 1, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "comparison": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"COMPARISON_GT", "COMPARISON_GE", "COMPARISON_LT", "COMPARISON_LE", "COMPARISON_EQ", "COMPARISON_NE"}, false), - Description: `The comparison to apply between the time -series (indicated by filter and aggregation) -and the threshold (indicated by -threshold_value). The comparison is applied -on each time series, with the time series on -the left-hand side and the threshold on the -right-hand side. Only COMPARISON_LT and -COMPARISON_GT are supported currently. Possible values: ["COMPARISON_GT", "COMPARISON_GE", "COMPARISON_LT", "COMPARISON_LE", "COMPARISON_EQ", "COMPARISON_NE"]`, - }, - "duration": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - Description: `The amount of time that a time series must -violate the threshold to be considered -failing. Currently, only values that are a -multiple of a minute--e.g., 0, 60, 120, or -300 seconds--are supported. If an invalid -value is given, an error will be returned. -When choosing a duration, it is useful to -keep in mind the frequency of the underlying -time series data (which may also be affected -by any alignments specified in the -aggregations field); a good duration is long -enough so that a single outlier does not -generate spurious alerts, but short enough -that unhealthy states are detected and -alerted on quickly.`, - }, - "aggregations": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `Specifies the alignment of data points in -individual time series as well as how to -combine the retrieved time series together -(such as when aggregating multiple streams -on each resource to a single stream for each -resource or when aggregating streams across -all members of a group of resources). -Multiple aggregations are applied in the -order specified.This field is similar to the -one in the MetricService.ListTimeSeries -request. It is advisable to use the -ListTimeSeries method when debugging this -field.`, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "alignment_period": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `The alignment period for per-time -series alignment. If present, -alignmentPeriod must be at least -60 seconds. After per-time series -alignment, each time series will -contain data points only on the -period boundaries. If -perSeriesAligner is not specified -or equals ALIGN_NONE, then this -field is ignored. If -perSeriesAligner is specified and -does not equal ALIGN_NONE, then -this field must be defined; -otherwise an error is returned.`, - }, - "cross_series_reducer": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}, false), - Description: `The approach to be used to combine -time series. Not all reducer -functions may be applied to all -time series, depending on the -metric type and the value type of -the original time series. -Reduction may change the metric -type of value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, - }, - "group_by_fields": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `The set of fields to preserve when -crossSeriesReducer is specified. -The groupByFields determine how -the time series are partitioned -into subsets prior to applying the -aggregation function. Each subset -contains time series that have the -same value for each of the -grouping fields. Each individual -time series is a member of exactly -one subset. The crossSeriesReducer -is applied to each subset of time -series. It is not possible to -reduce across different resource -types, so this field implicitly -contains resource.type. Fields not -specified in groupByFields are -aggregated away. If groupByFields -is not specified and all the time -series have the same resource -type, then the time series are -aggregated into a single output -time series. If crossSeriesReducer -is not defined, this field is -ignored.`, - Elem: &resource_monitoring_alert_policy_schema.Schema{ - Type: resource_monitoring_alert_policy_schema.TypeString, - }, - }, - "per_series_aligner": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}, false), - Description: `The approach to be used to align -individual time series. Not all -alignment functions may be applied -to all time series, depending on -the metric type and value type of -the original time series. -Alignment may change the metric -type or the value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, - }, - }, - }, - }, - "denominator_aggregations": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `Specifies the alignment of data points in -individual time series selected by -denominatorFilter as well as how to combine -the retrieved time series together (such as -when aggregating multiple streams on each -resource to a single stream for each -resource or when aggregating streams across -all members of a group of resources).When -computing ratios, the aggregations and -denominator_aggregations fields must use the -same alignment period and produce time -series that have the same periodicity and -labels.This field is similar to the one in -the MetricService.ListTimeSeries request. It -is advisable to use the ListTimeSeries -method when debugging this field.`, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "alignment_period": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `The alignment period for per-time -series alignment. If present, -alignmentPeriod must be at least -60 seconds. After per-time series -alignment, each time series will -contain data points only on the -period boundaries. If -perSeriesAligner is not specified -or equals ALIGN_NONE, then this -field is ignored. If -perSeriesAligner is specified and -does not equal ALIGN_NONE, then -this field must be defined; -otherwise an error is returned.`, - }, - "cross_series_reducer": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05", ""}, false), - Description: `The approach to be used to combine -time series. Not all reducer -functions may be applied to all -time series, depending on the -metric type and the value type of -the original time series. -Reduction may change the metric -type of value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["REDUCE_NONE", "REDUCE_MEAN", "REDUCE_MIN", "REDUCE_MAX", "REDUCE_SUM", "REDUCE_STDDEV", "REDUCE_COUNT", "REDUCE_COUNT_TRUE", "REDUCE_COUNT_FALSE", "REDUCE_FRACTION_TRUE", "REDUCE_PERCENTILE_99", "REDUCE_PERCENTILE_95", "REDUCE_PERCENTILE_50", "REDUCE_PERCENTILE_05"]`, - }, - "group_by_fields": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `The set of fields to preserve when -crossSeriesReducer is specified. -The groupByFields determine how -the time series are partitioned -into subsets prior to applying the -aggregation function. Each subset -contains time series that have the -same value for each of the -grouping fields. Each individual -time series is a member of exactly -one subset. The crossSeriesReducer -is applied to each subset of time -series. It is not possible to -reduce across different resource -types, so this field implicitly -contains resource.type. Fields not -specified in groupByFields are -aggregated away. If groupByFields -is not specified and all the time -series have the same resource -type, then the time series are -aggregated into a single output -time series. If crossSeriesReducer -is not defined, this field is -ignored.`, - Elem: &resource_monitoring_alert_policy_schema.Schema{ - Type: resource_monitoring_alert_policy_schema.TypeString, - }, - }, - "per_series_aligner": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_alert_policy_validation.StringInSlice([]string{"ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE", ""}, false), - Description: `The approach to be used to align -individual time series. Not all -alignment functions may be applied -to all time series, depending on -the metric type and value type of -the original time series. -Alignment may change the metric -type or the value type of the time -series.Time series data must be -aligned in order to perform cross- -time series reduction. If -crossSeriesReducer is specified, -then perSeriesAligner must be -specified and not equal ALIGN_NONE -and alignmentPeriod must be -specified; otherwise, an error is -returned. Possible values: ["ALIGN_NONE", "ALIGN_DELTA", "ALIGN_RATE", "ALIGN_INTERPOLATE", "ALIGN_NEXT_OLDER", "ALIGN_MIN", "ALIGN_MAX", "ALIGN_MEAN", "ALIGN_COUNT", "ALIGN_SUM", "ALIGN_STDDEV", "ALIGN_COUNT_TRUE", "ALIGN_COUNT_FALSE", "ALIGN_FRACTION_TRUE", "ALIGN_PERCENTILE_99", "ALIGN_PERCENTILE_95", "ALIGN_PERCENTILE_50", "ALIGN_PERCENTILE_05", "ALIGN_PERCENT_CHANGE"]`, - }, - }, - }, - }, - "denominator_filter": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `A filter that identifies a time series that -should be used as the denominator of a ratio -that will be compared with the threshold. If -a denominator_filter is specified, the time -series specified by the filter field will be -used as the numerator.The filter is similar -to the one that is specified in the -MetricService.ListTimeSeries request (that -call is useful to verify the time series -that will be retrieved / processed) and must -specify the metric type and optionally may -contain restrictions on resource type, -resource labels, and metric labels. This -field may not exceed 2048 Unicode characters -in length.`, - }, - "filter": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `A filter that identifies which time series -should be compared with the threshold.The -filter is similar to the one that is -specified in the -MetricService.ListTimeSeries request (that -call is useful to verify the time series -that will be retrieved / processed) and must -specify the metric type and optionally may -contain restrictions on resource type, -resource labels, and metric labels. This -field may not exceed 2048 Unicode characters -in length.`, - }, - "threshold_value": { - Type: resource_monitoring_alert_policy_schema.TypeFloat, - Optional: true, - Description: `A value against which to compare the time -series.`, - }, - "trigger": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `The number/percent of time series for which -the comparison must hold in order for the -condition to trigger. If unspecified, then -the condition will trigger if the comparison -is true for any of the time series that have -been identified by filter and aggregations, -or by the ratio, if denominator_filter and -denominator_aggregations are specified.`, - MaxItems: 1, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "count": { - Type: resource_monitoring_alert_policy_schema.TypeInt, - Optional: true, - Description: `The absolute number of time series -that must fail the predicate for the -condition to be triggered.`, - }, - "percent": { - Type: resource_monitoring_alert_policy_schema.TypeFloat, - Optional: true, - Description: `The percentage of time series that -must fail the predicate for the -condition to be triggered.`, - }, - }, - }, - }, - }, - }, - }, - "name": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Computed: true, - Description: `The unique resource name for this condition. -Its syntax is: -projects/[PROJECT_ID]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID] -[CONDITION_ID] is assigned by Stackdriver Monitoring when -the condition is created as part of a new or updated alerting -policy.`, - }, - }, - }, - }, - "display_name": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Required: true, - Description: `A short name or phrase used to identify the policy in -dashboards, notifications, and incidents. To avoid confusion, don't use -the same display name for multiple policies in the same project. The -name is limited to 512 Unicode characters.`, - }, - "documentation": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `Documentation that is included with notifications and incidents related -to this policy. Best practice is for the documentation to include information -to help responders understand, mitigate, escalate, and correct the underlying -problems detected by the alerting policy. Notification channels that have -limited capacity might not show this documentation.`, - MaxItems: 1, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "content": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `The text of the documentation, interpreted according to mimeType. -The content may not exceed 8,192 Unicode characters and may not -exceed more than 10,240 bytes when encoded in UTF-8 format, -whichever is smaller.`, - AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, - }, - "mime_type": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Description: `The format of the content field. Presently, only the value -"text/markdown" is supported.`, - Default: "text/markdown", - AtLeastOneOf: []string{"documentation.0.content", "documentation.0.mime_type"}, - }, - }, - }, - }, - "enabled": { - Type: resource_monitoring_alert_policy_schema.TypeBool, - Optional: true, - Description: `Whether or not the policy is enabled. The default is true.`, - Default: true, - }, - "notification_channels": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Optional: true, - Description: `Identifies the notification channels to which notifications should be -sent when incidents are opened or closed or when new violations occur -on an already opened incident. Each element of this array corresponds -to the name field in each of the NotificationChannel objects that are -returned from the notificationChannels.list method. The syntax of the -entries in this field is -'projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]'`, - Elem: &resource_monitoring_alert_policy_schema.Schema{ - Type: resource_monitoring_alert_policy_schema.TypeString, - }, - }, - "user_labels": { - Type: resource_monitoring_alert_policy_schema.TypeMap, - Optional: true, - Description: `This field is intended to be used for organizing and identifying the AlertPolicy -objects.The field can contain up to 64 entries. Each key and value is limited -to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values -can contain only lowercase letters, numerals, underscores, and dashes. Keys -must begin with a letter.`, - Elem: &resource_monitoring_alert_policy_schema.Schema{Type: resource_monitoring_alert_policy_schema.TypeString}, - }, - "creation_record": { - Type: resource_monitoring_alert_policy_schema.TypeList, - Computed: true, - Description: `A read-only record of the creation of the alerting policy. -If provided in a call to create or update, this field will -be ignored.`, - Elem: &resource_monitoring_alert_policy_schema.Resource{ - Schema: map[string]*resource_monitoring_alert_policy_schema.Schema{ - "mutate_time": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Computed: true, - Description: `When the change occurred.`, - }, - "mutated_by": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Computed: true, - Description: `The email address of the user making the change.`, - }, - }, - }, - }, - "name": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Computed: true, - Description: `The unique resource name for this policy. -Its syntax is: projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]`, - }, - "project": { - Type: resource_monitoring_alert_policy_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringAlertPolicyCreate(d *resource_monitoring_alert_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(displayNameProp)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("combiner"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(combinerProp)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, combinerProp)) { - obj["combiner"] = combinerProp - } - enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("conditions"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(conditionsProp)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, conditionsProp)) { - obj["conditions"] = conditionsProp - } - notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_channels"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(notificationChannelsProp)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, notificationChannelsProp)) { - obj["notificationChannels"] = notificationChannelsProp - } - userLabelsProp, err := expandMonitoringAlertPolicyUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(userLabelsProp)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - documentationProp, err := expandMonitoringAlertPolicyDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(documentationProp)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - - lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/alertPolicies") - if err != nil { - return err - } - - resource_monitoring_alert_policy_log.Printf("[DEBUG] Creating new AlertPolicy: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_alert_policy_schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error creating AlertPolicy: %s", err) - } - if err := d.Set("name", flattenMonitoringAlertPolicyName(res["name"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_monitoring_alert_policy_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_monitoring_alert_policy_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_monitoring_alert_policy_log.Printf("[DEBUG] Finished creating AlertPolicy %q: %#v", d.Id(), res) - - return resourceMonitoringAlertPolicyRead(d, meta) -} - -func resourceMonitoringAlertPolicyRead(d *resource_monitoring_alert_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_alert_policy_fmt.Sprintf("MonitoringAlertPolicy %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - - if err := d.Set("name", flattenMonitoringAlertPolicyName(res["name"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("display_name", flattenMonitoringAlertPolicyDisplayName(res["displayName"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("combiner", flattenMonitoringAlertPolicyCombiner(res["combiner"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("creation_record", flattenMonitoringAlertPolicyCreationRecord(res["creationRecord"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("enabled", flattenMonitoringAlertPolicyEnabled(res["enabled"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("conditions", flattenMonitoringAlertPolicyConditions(res["conditions"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("notification_channels", flattenMonitoringAlertPolicyNotificationChannels(res["notificationChannels"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("user_labels", flattenMonitoringAlertPolicyUserLabels(res["userLabels"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - if err := d.Set("documentation", flattenMonitoringAlertPolicyDocumentation(res["documentation"], d, config)); err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error reading AlertPolicy: %s", err) - } - - return nil -} - -func resourceMonitoringAlertPolicyUpdate(d *resource_monitoring_alert_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringAlertPolicyDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(v)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - combinerProp, err := expandMonitoringAlertPolicyCombiner(d.Get("combiner"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("combiner"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(v)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, combinerProp)) { - obj["combiner"] = combinerProp - } - enabledProp, err := expandMonitoringAlertPolicyEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - conditionsProp, err := expandMonitoringAlertPolicyConditions(d.Get("conditions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("conditions"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(v)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, conditionsProp)) { - obj["conditions"] = conditionsProp - } - notificationChannelsProp, err := expandMonitoringAlertPolicyNotificationChannels(d.Get("notification_channels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_channels"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(v)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, notificationChannelsProp)) { - obj["notificationChannels"] = notificationChannelsProp - } - userLabelsProp, err := expandMonitoringAlertPolicyUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(v)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - documentationProp, err := expandMonitoringAlertPolicyDocumentation(d.Get("documentation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("documentation"); !isEmptyValue(resource_monitoring_alert_policy_reflect.ValueOf(v)) && (ok || !resource_monitoring_alert_policy_reflect.DeepEqual(v, documentationProp)) { - obj["documentation"] = documentationProp - } - - lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - resource_monitoring_alert_policy_log.Printf("[DEBUG] Updating AlertPolicy %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("combiner") { - updateMask = append(updateMask, "combiner") - } - - if d.HasChange("enabled") { - updateMask = append(updateMask, "enabled") - } - - if d.HasChange("conditions") { - updateMask = append(updateMask, "conditions") - } - - if d.HasChange("notification_channels") { - updateMask = append(updateMask, "notificationChannels") - } - - if d.HasChange("user_labels") { - updateMask = append(updateMask, "userLabels") - } - - if d.HasChange("documentation") { - updateMask = append(updateMask, "documentation") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_monitoring_alert_policy_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_alert_policy_schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error updating AlertPolicy %q: %s", d.Id(), err) - } else { - resource_monitoring_alert_policy_log.Printf("[DEBUG] Finished updating AlertPolicy %q: %#v", d.Id(), res) - } - - return resourceMonitoringAlertPolicyRead(d, meta) -} - -func resourceMonitoringAlertPolicyDelete(d *resource_monitoring_alert_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_alert_policy_fmt.Errorf("Error fetching project for AlertPolicy: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "alertPolicy/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_monitoring_alert_policy_log.Printf("[DEBUG] Deleting AlertPolicy %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_alert_policy_schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "AlertPolicy") - } - - resource_monitoring_alert_policy_log.Printf("[DEBUG] Finished deleting AlertPolicy %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringAlertPolicyImport(d *resource_monitoring_alert_policy_schema.ResourceData, meta interface{}) ([]*resource_monitoring_alert_policy_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_monitoring_alert_policy_schema.ResourceData{d}, nil -} - -func flattenMonitoringAlertPolicyName(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyDisplayName(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyCombiner(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyCreationRecord(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["mutate_time"] = - flattenMonitoringAlertPolicyCreationRecordMutateTime(original["mutateTime"], d, config) - transformed["mutated_by"] = - flattenMonitoringAlertPolicyCreationRecordMutatedBy(original["mutatedBy"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyCreationRecordMutateTime(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyCreationRecordMutatedBy(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyEnabled(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditions(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "condition_absent": flattenMonitoringAlertPolicyConditionsConditionAbsent(original["conditionAbsent"], d, config), - "name": flattenMonitoringAlertPolicyConditionsName(original["name"], d, config), - "condition_monitoring_query_language": flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(original["conditionMonitoringQueryLanguage"], d, config), - "condition_threshold": flattenMonitoringAlertPolicyConditionsConditionThreshold(original["conditionThreshold"], d, config), - "display_name": flattenMonitoringAlertPolicyConditionsDisplayName(original["displayName"], d, config), - }) - } - return transformed -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsent(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["aggregations"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"], d, config) - transformed["trigger"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"], d, config) - transformed["duration"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"], d, config) - transformed["filter"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), - "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["groupByFields"], d, config), - "alignment_period": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), - "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), - }) - } - return transformed -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["percent"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"], d, config) - transformed["count"] = - flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_monitoring_alert_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsName(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["query"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(original["query"], d, config) - transformed["duration"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) - transformed["trigger"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["percent"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(original["percent"], d, config) - transformed["count"] = - flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(original["count"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_monitoring_alert_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["threshold_value"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["thresholdValue"], d, config) - transformed["denominator_filter"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominatorFilter"], d, config) - transformed["denominator_aggregations"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominatorAggregations"], d, config) - transformed["duration"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"], d, config) - transformed["comparison"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"], d, config) - transformed["trigger"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"], d, config) - transformed["aggregations"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) - transformed["filter"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), - "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["groupByFields"], d, config), - "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), - "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), - }) - } - return transformed -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["percent"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"], d, config) - transformed["count"] = - flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_monitoring_alert_policy_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "per_series_aligner": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["perSeriesAligner"], d, config), - "group_by_fields": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["groupByFields"], d, config), - "alignment_period": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignmentPeriod"], d, config), - "cross_series_reducer": flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["crossSeriesReducer"], d, config), - }) - } - return transformed -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyConditionsDisplayName(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyNotificationChannels(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyUserLabels(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyDocumentation(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["content"] = - flattenMonitoringAlertPolicyDocumentationContent(original["content"], d, config) - transformed["mime_type"] = - flattenMonitoringAlertPolicyDocumentationMimeType(original["mimeType"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringAlertPolicyDocumentationContent(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringAlertPolicyDocumentationMimeType(v interface{}, d *resource_monitoring_alert_policy_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringAlertPolicyDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyCombiner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConditionAbsent, err := expandMonitoringAlertPolicyConditionsConditionAbsent(original["condition_absent"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedConditionAbsent); val.IsValid() && !isEmptyValue(val) { - transformed["conditionAbsent"] = transformedConditionAbsent - } - - transformedName, err := expandMonitoringAlertPolicyConditionsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedConditionMonitoringQueryLanguage, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(original["condition_monitoring_query_language"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedConditionMonitoringQueryLanguage); val.IsValid() && !isEmptyValue(val) { - transformed["conditionMonitoringQueryLanguage"] = transformedConditionMonitoringQueryLanguage - } - - transformedConditionThreshold, err := expandMonitoringAlertPolicyConditionsConditionThreshold(original["condition_threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedConditionThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["conditionThreshold"] = transformedConditionThreshold - } - - transformedDisplayName, err := expandMonitoringAlertPolicyConditionsDisplayName(original["display_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedDisplayName); val.IsValid() && !isEmptyValue(val) { - transformed["displayName"] = transformedDisplayName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(original["aggregations"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedAggregations); val.IsValid() && !isEmptyValue(val) { - transformed["aggregations"] = transformedAggregations - } - - transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(original["trigger"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { - transformed["trigger"] = transformedTrigger - } - - transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionAbsentDuration(original["duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { - transformed["duration"] = transformedDuration - } - - transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionAbsentFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { - transformed["perSeriesAligner"] = transformedPerSeriesAligner - } - - transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(original["group_by_fields"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { - transformed["groupByFields"] = transformedGroupByFields - } - - transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(original["alignment_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["alignmentPeriod"] = transformedAlignmentPeriod - } - - transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { - transformed["crossSeriesReducer"] = transformedCrossSeriesReducer - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsPerSeriesAligner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsGroupByFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsAlignmentPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentAggregationsCrossSeriesReducer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedCount, err := expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentTriggerCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionAbsentFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedQuery, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(original["query"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedQuery); val.IsValid() && !isEmptyValue(val) { - transformed["query"] = transformedQuery - } - - transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(original["duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { - transformed["duration"] = transformedDuration - } - - transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(original["trigger"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { - transformed["trigger"] = transformedTrigger - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedCount, err := expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionMonitoringQueryLanguageTriggerCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThresholdValue, err := expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(original["threshold_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedThresholdValue); val.IsValid() && !isEmptyValue(val) { - transformed["thresholdValue"] = transformedThresholdValue - } - - transformedDenominatorFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(original["denominator_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedDenominatorFilter); val.IsValid() && !isEmptyValue(val) { - transformed["denominatorFilter"] = transformedDenominatorFilter - } - - transformedDenominatorAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(original["denominator_aggregations"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedDenominatorAggregations); val.IsValid() && !isEmptyValue(val) { - transformed["denominatorAggregations"] = transformedDenominatorAggregations - } - - transformedDuration, err := expandMonitoringAlertPolicyConditionsConditionThresholdDuration(original["duration"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedDuration); val.IsValid() && !isEmptyValue(val) { - transformed["duration"] = transformedDuration - } - - transformedComparison, err := expandMonitoringAlertPolicyConditionsConditionThresholdComparison(original["comparison"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedComparison); val.IsValid() && !isEmptyValue(val) { - transformed["comparison"] = transformedComparison - } - - transformedTrigger, err := expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(original["trigger"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedTrigger); val.IsValid() && !isEmptyValue(val) { - transformed["trigger"] = transformedTrigger - } - - transformedAggregations, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(original["aggregations"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedAggregations); val.IsValid() && !isEmptyValue(val) { - transformed["aggregations"] = transformedAggregations - } - - transformedFilter, err := expandMonitoringAlertPolicyConditionsConditionThresholdFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdThresholdValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { - transformed["perSeriesAligner"] = transformedPerSeriesAligner - } - - transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(original["group_by_fields"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { - transformed["groupByFields"] = transformedGroupByFields - } - - transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(original["alignment_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["alignmentPeriod"] = transformedAlignmentPeriod - } - - transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { - transformed["crossSeriesReducer"] = transformedCrossSeriesReducer - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsPerSeriesAligner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsGroupByFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsAlignmentPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDenominatorAggregationsCrossSeriesReducer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdComparison(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdTrigger(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPercent, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(original["percent"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedPercent); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercent - } - - transformedCount, err := expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerPercent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdTriggerCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregations(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerSeriesAligner, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(original["per_series_aligner"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedPerSeriesAligner); val.IsValid() && !isEmptyValue(val) { - transformed["perSeriesAligner"] = transformedPerSeriesAligner - } - - transformedGroupByFields, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(original["group_by_fields"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedGroupByFields); val.IsValid() && !isEmptyValue(val) { - transformed["groupByFields"] = transformedGroupByFields - } - - transformedAlignmentPeriod, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(original["alignment_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedAlignmentPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["alignmentPeriod"] = transformedAlignmentPeriod - } - - transformedCrossSeriesReducer, err := expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(original["cross_series_reducer"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedCrossSeriesReducer); val.IsValid() && !isEmptyValue(val) { - transformed["crossSeriesReducer"] = transformedCrossSeriesReducer - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsPerSeriesAligner(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsGroupByFields(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsAlignmentPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdAggregationsCrossSeriesReducer(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsConditionThresholdFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyConditionsDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyNotificationChannels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyUserLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringAlertPolicyDocumentation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandMonitoringAlertPolicyDocumentationContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - transformedMimeType, err := expandMonitoringAlertPolicyDocumentationMimeType(original["mime_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_alert_policy_reflect.ValueOf(transformedMimeType); val.IsValid() && !isEmptyValue(val) { - transformed["mimeType"] = transformedMimeType - } - - return transformed, nil -} - -func expandMonitoringAlertPolicyDocumentationContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringAlertPolicyDocumentationMimeType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceMonitoringService() *resource_monitoring_custom_service_schema.Resource { - return &resource_monitoring_custom_service_schema.Resource{ - Create: resourceMonitoringServiceCreate, - Read: resourceMonitoringServiceRead, - Update: resourceMonitoringServiceUpdate, - Delete: resourceMonitoringServiceDelete, - - Importer: &resource_monitoring_custom_service_schema.ResourceImporter{ - State: resourceMonitoringServiceImport, - }, - - Timeouts: &resource_monitoring_custom_service_schema.ResourceTimeout{ - Create: resource_monitoring_custom_service_schema.DefaultTimeout(4 * resource_monitoring_custom_service_time.Minute), - Update: resource_monitoring_custom_service_schema.DefaultTimeout(4 * resource_monitoring_custom_service_time.Minute), - Delete: resource_monitoring_custom_service_schema.DefaultTimeout(4 * resource_monitoring_custom_service_time.Minute), - }, - - Schema: map[string]*resource_monitoring_custom_service_schema.Schema{ - "display_name": { - Type: resource_monitoring_custom_service_schema.TypeString, - Optional: true, - Description: `Name used for UI elements listing this Service.`, - }, - "service_id": { - Type: resource_monitoring_custom_service_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z0-9\-]+$`), - Description: `An optional service ID to use. If not given, the server will generate a -service ID.`, - }, - "telemetry": { - Type: resource_monitoring_custom_service_schema.TypeList, - Optional: true, - Description: `Configuration for how to query telemetry on a Service.`, - MaxItems: 1, - Elem: &resource_monitoring_custom_service_schema.Resource{ - Schema: map[string]*resource_monitoring_custom_service_schema.Schema{ - "resource_name": { - Type: resource_monitoring_custom_service_schema.TypeString, - Optional: true, - Description: `The full name of the resource that defines this service. -Formatted as described in -https://cloud.google.com/apis/design/resource_names.`, - }, - }, - }, - }, - "name": { - Type: resource_monitoring_custom_service_schema.TypeString, - Computed: true, - Description: `The full resource name for this service. The syntax is: -projects/[PROJECT_ID]/services/[SERVICE_ID].`, - }, - "project": { - Type: resource_monitoring_custom_service_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringServiceCreate(d *resource_monitoring_custom_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringServiceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_custom_service_reflect.ValueOf(displayNameProp)) && (ok || !resource_monitoring_custom_service_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - telemetryProp, err := expandMonitoringServiceTelemetry(d.Get("telemetry"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("telemetry"); !isEmptyValue(resource_monitoring_custom_service_reflect.ValueOf(telemetryProp)) && (ok || !resource_monitoring_custom_service_reflect.DeepEqual(v, telemetryProp)) { - obj["telemetry"] = telemetryProp - } - nameProp, err := expandMonitoringServiceServiceId(d.Get("service_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_id"); !isEmptyValue(resource_monitoring_custom_service_reflect.ValueOf(nameProp)) && (ok || !resource_monitoring_custom_service_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceMonitoringServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services?serviceId={{service_id}}") - if err != nil { - return err - } - - resource_monitoring_custom_service_log.Printf("[DEBUG] Creating new Service: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_custom_service_schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error creating Service: %s", err) - } - if err := d.Set("name", flattenMonitoringServiceName(res["name"], d, config)); err != nil { - return resource_monitoring_custom_service_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_monitoring_custom_service_log.Printf("[DEBUG] Finished creating Service %q: %#v", d.Id(), res) - - return resourceMonitoringServiceRead(d, meta) -} - -func resourceMonitoringServiceRead(d *resource_monitoring_custom_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_custom_service_fmt.Sprintf("MonitoringService %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error reading Service: %s", err) - } - - if err := d.Set("name", flattenMonitoringServiceName(res["name"], d, config)); err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("display_name", flattenMonitoringServiceDisplayName(res["displayName"], d, config)); err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("telemetry", flattenMonitoringServiceTelemetry(res["telemetry"], d, config)); err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error reading Service: %s", err) - } - if err := d.Set("service_id", flattenMonitoringServiceServiceId(res["name"], d, config)); err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error reading Service: %s", err) - } - - return nil -} - -func resourceMonitoringServiceUpdate(d *resource_monitoring_custom_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringServiceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_custom_service_reflect.ValueOf(v)) && (ok || !resource_monitoring_custom_service_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - telemetryProp, err := expandMonitoringServiceTelemetry(d.Get("telemetry"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("telemetry"); !isEmptyValue(resource_monitoring_custom_service_reflect.ValueOf(v)) && (ok || !resource_monitoring_custom_service_reflect.DeepEqual(v, telemetryProp)) { - obj["telemetry"] = telemetryProp - } - - obj, err = resourceMonitoringServiceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - resource_monitoring_custom_service_log.Printf("[DEBUG] Updating Service %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("telemetry") { - updateMask = append(updateMask, "telemetry") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_monitoring_custom_service_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_custom_service_schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error updating Service %q: %s", d.Id(), err) - } else { - resource_monitoring_custom_service_log.Printf("[DEBUG] Finished updating Service %q: %#v", d.Id(), res) - } - - return resourceMonitoringServiceRead(d, meta) -} - -func resourceMonitoringServiceDelete(d *resource_monitoring_custom_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_custom_service_fmt.Errorf("Error fetching project for Service: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_monitoring_custom_service_log.Printf("[DEBUG] Deleting Service %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_custom_service_schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "Service") - } - - resource_monitoring_custom_service_log.Printf("[DEBUG] Finished deleting Service %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringServiceImport(d *resource_monitoring_custom_service_schema.ResourceData, meta interface{}) ([]*resource_monitoring_custom_service_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_monitoring_custom_service_schema.ResourceData{d}, nil -} - -func flattenMonitoringServiceName(v interface{}, d *resource_monitoring_custom_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringServiceDisplayName(v interface{}, d *resource_monitoring_custom_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringServiceTelemetry(v interface{}, d *resource_monitoring_custom_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resource_name"] = - flattenMonitoringServiceTelemetryResourceName(original["resourceName"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringServiceTelemetryResourceName(v interface{}, d *resource_monitoring_custom_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringServiceServiceId(v interface{}, d *resource_monitoring_custom_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandMonitoringServiceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringServiceTelemetry(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceName, err := expandMonitoringServiceTelemetryResourceName(original["resource_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_custom_service_reflect.ValueOf(transformedResourceName); val.IsValid() && !isEmptyValue(val) { - transformed["resourceName"] = transformedResourceName - } - - return transformed, nil -} - -func expandMonitoringServiceTelemetryResourceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringServiceServiceId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceMonitoringServiceEncoder(d *resource_monitoring_custom_service_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if _, ok := obj["custom"]; !ok { - obj["custom"] = map[string]interface{}{} - } - - delete(obj, "name") - - return obj, nil -} - -func monitoringDashboardDiffSuppress(k, old, new string, d *resource_monitoring_dashboard_schema.ResourceData) bool { - computedFields := []string{"etag", "name"} - - oldMap, err := resource_monitoring_dashboard_structure.ExpandJsonFromString(old) - if err != nil { - return false - } - - newMap, err := resource_monitoring_dashboard_structure.ExpandJsonFromString(new) - if err != nil { - return false - } - - for _, f := range computedFields { - delete(oldMap, f) - delete(newMap, f) - } - - return resource_monitoring_dashboard_reflect.DeepEqual(oldMap, newMap) -} - -func resourceMonitoringDashboard() *resource_monitoring_dashboard_schema.Resource { - return &resource_monitoring_dashboard_schema.Resource{ - Create: resourceMonitoringDashboardCreate, - Read: resourceMonitoringDashboardRead, - Update: resourceMonitoringDashboardUpdate, - Delete: resourceMonitoringDashboardDelete, - - Importer: &resource_monitoring_dashboard_schema.ResourceImporter{ - State: resourceMonitoringDashboardImport, - }, - - Timeouts: &resource_monitoring_dashboard_schema.ResourceTimeout{ - Create: resource_monitoring_dashboard_schema.DefaultTimeout(4 * resource_monitoring_dashboard_time.Minute), - Update: resource_monitoring_dashboard_schema.DefaultTimeout(4 * resource_monitoring_dashboard_time.Minute), - Delete: resource_monitoring_dashboard_schema.DefaultTimeout(4 * resource_monitoring_dashboard_time.Minute), - }, - - Schema: map[string]*resource_monitoring_dashboard_schema.Schema{ - "dashboard_json": { - Type: resource_monitoring_dashboard_schema.TypeString, - Required: true, - ValidateFunc: resource_monitoring_dashboard_validation.StringIsJSON, - DiffSuppressFunc: monitoringDashboardDiffSuppress, - StateFunc: func(v interface{}) string { - json, _ := resource_monitoring_dashboard_structure.NormalizeJsonString(v) - return json - }, - Description: `The JSON representation of a dashboard, following the format at https://cloud.google.com/monitoring/api/ref_v3/rest/v1/projects.dashboards.`, - }, - "project": { - Type: resource_monitoring_dashboard_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringDashboardCreate(d *resource_monitoring_dashboard_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj, err := resource_monitoring_dashboard_structure.ExpandJsonFromString(d.Get("dashboard_json").(string)) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v1/projects/{{project}}/dashboards") - if err != nil { - return err - } - res, err := sendRequestWithTimeout(config, "POST", project, url, userAgent, obj, d.Timeout(resource_monitoring_dashboard_schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_dashboard_fmt.Errorf("Error creating Dashboard: %s", err) - } - - name, ok := res["name"] - if !ok { - return resource_monitoring_dashboard_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - d.SetId(name.(string)) - - return resourceMonitoringDashboardRead(d, config) -} - -func resourceMonitoringDashboardRead(d *resource_monitoring_dashboard_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url := config.MonitoringBasePath + "v1/" + d.Id() - - project, err := getProject(d, config) - if err != nil { - return err - } - - res, err := sendRequest(config, "GET", project, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_dashboard_fmt.Sprintf("MonitoringDashboard %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_dashboard_fmt.Errorf("Error setting Dashboard: %s", err) - } - - str, err := resource_monitoring_dashboard_structure.FlattenJsonToString(res) - if err != nil { - return resource_monitoring_dashboard_fmt.Errorf("Error reading Dashboard: %s", err) - } - if err = d.Set("dashboard_json", str); err != nil { - return resource_monitoring_dashboard_fmt.Errorf("Error reading Dashboard: %s", err) - } - - return nil -} - -func resourceMonitoringDashboardUpdate(d *resource_monitoring_dashboard_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - o, n := d.GetChange("dashboard_json") - oObj, err := resource_monitoring_dashboard_structure.ExpandJsonFromString(o.(string)) - if err != nil { - return err - } - nObj, err := resource_monitoring_dashboard_structure.ExpandJsonFromString(n.(string)) - if err != nil { - return err - } - - nObj["etag"] = oObj["etag"] - - project, err := getProject(d, config) - if err != nil { - return err - } - - url := config.MonitoringBasePath + "v1/" + d.Id() - _, err = sendRequestWithTimeout(config, "PATCH", project, url, userAgent, nObj, d.Timeout(resource_monitoring_dashboard_schema.TimeoutUpdate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_dashboard_fmt.Errorf("Error updating Dashboard %q: %s", d.Id(), err) - } - - return resourceMonitoringDashboardRead(d, config) -} - -func resourceMonitoringDashboardDelete(d *resource_monitoring_dashboard_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url := config.MonitoringBasePath + "v1/" + d.Id() - - project, err := getProject(d, config) - if err != nil { - return err - } - - _, err = sendRequestWithTimeout(config, "DELETE", project, url, userAgent, nil, d.Timeout(resource_monitoring_dashboard_schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_dashboard_fmt.Sprintf("MonitoringDashboard %q", d.Id())) - } - - return nil -} - -func resourceMonitoringDashboardImport(d *resource_monitoring_dashboard_schema.ResourceData, meta interface{}) ([]*resource_monitoring_dashboard_schema.ResourceData, error) { - config := meta.(*Config) - - parts, err := getImportIdQualifiers([]string{"projects/(?P[^/]+)/dashboards/(?P[^/]+)", "(?P[^/]+)"}, d, config, d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("project", parts["project"]); err != nil { - return nil, resource_monitoring_dashboard_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(resource_monitoring_dashboard_fmt.Sprintf("projects/%s/dashboards/%s", parts["project"], parts["id"])) - - return []*resource_monitoring_dashboard_schema.ResourceData{d}, nil -} - -func resourceMonitoringGroup() *resource_monitoring_group_schema.Resource { - return &resource_monitoring_group_schema.Resource{ - Create: resourceMonitoringGroupCreate, - Read: resourceMonitoringGroupRead, - Update: resourceMonitoringGroupUpdate, - Delete: resourceMonitoringGroupDelete, - - Importer: &resource_monitoring_group_schema.ResourceImporter{ - State: resourceMonitoringGroupImport, - }, - - Timeouts: &resource_monitoring_group_schema.ResourceTimeout{ - Create: resource_monitoring_group_schema.DefaultTimeout(4 * resource_monitoring_group_time.Minute), - Update: resource_monitoring_group_schema.DefaultTimeout(4 * resource_monitoring_group_time.Minute), - Delete: resource_monitoring_group_schema.DefaultTimeout(4 * resource_monitoring_group_time.Minute), - }, - - Schema: map[string]*resource_monitoring_group_schema.Schema{ - "display_name": { - Type: resource_monitoring_group_schema.TypeString, - Required: true, - Description: `A user-assigned name for this group, used only for display -purposes.`, - }, - "filter": { - Type: resource_monitoring_group_schema.TypeString, - Required: true, - Description: `The filter used to determine which monitored resources -belong to this group.`, - }, - "is_cluster": { - Type: resource_monitoring_group_schema.TypeBool, - Optional: true, - Description: `If true, the members of this group are considered to be a -cluster. The system can perform additional analysis on -groups that are clusters.`, - }, - "parent_name": { - Type: resource_monitoring_group_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkRelativePaths, - Description: `The name of the group's parent, if it has one. The format is -"projects/{project_id_or_number}/groups/{group_id}". For -groups with no parent, parentName is the empty string, "".`, - }, - "name": { - Type: resource_monitoring_group_schema.TypeString, - Computed: true, - Description: `A unique identifier for this group. The format is -"projects/{project_id_or_number}/groups/{group_id}".`, - }, - "project": { - Type: resource_monitoring_group_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringGroupCreate(d *resource_monitoring_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_name"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(parentNameProp)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, parentNameProp)) { - obj["parentName"] = parentNameProp - } - isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_cluster"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(isClusterProp)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, isClusterProp)) { - obj["isCluster"] = isClusterProp - } - displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(displayNameProp)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(filterProp)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/groups") - if err != nil { - return err - } - - resource_monitoring_group_log.Printf("[DEBUG] Creating new Group: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_group_fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_group_schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_group_fmt.Errorf("Error creating Group: %s", err) - } - if err := d.Set("name", flattenMonitoringGroupName(res["name"], d, config)); err != nil { - return resource_monitoring_group_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_monitoring_group_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_monitoring_group_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_monitoring_group_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_monitoring_group_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_monitoring_group_log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res) - - return resourceMonitoringGroupRead(d, meta) -} - -func resourceMonitoringGroupRead(d *resource_monitoring_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_group_fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_group_fmt.Sprintf("MonitoringGroup %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_group_fmt.Errorf("Error reading Group: %s", err) - } - - if err := d.Set("parent_name", flattenMonitoringGroupParentName(res["parentName"], d, config)); err != nil { - return resource_monitoring_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("name", flattenMonitoringGroupName(res["name"], d, config)); err != nil { - return resource_monitoring_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("is_cluster", flattenMonitoringGroupIsCluster(res["isCluster"], d, config)); err != nil { - return resource_monitoring_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("display_name", flattenMonitoringGroupDisplayName(res["displayName"], d, config)); err != nil { - return resource_monitoring_group_fmt.Errorf("Error reading Group: %s", err) - } - if err := d.Set("filter", flattenMonitoringGroupFilter(res["filter"], d, config)); err != nil { - return resource_monitoring_group_fmt.Errorf("Error reading Group: %s", err) - } - - return nil -} - -func resourceMonitoringGroupUpdate(d *resource_monitoring_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_group_fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent_name"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(v)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, parentNameProp)) { - obj["parentName"] = parentNameProp - } - isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("is_cluster"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(v)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, isClusterProp)) { - obj["isCluster"] = isClusterProp - } - displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(v)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(resource_monitoring_group_reflect.ValueOf(v)) && (ok || !resource_monitoring_group_reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - resource_monitoring_group_log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_group_schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return resource_monitoring_group_fmt.Errorf("Error updating Group %q: %s", d.Id(), err) - } else { - resource_monitoring_group_log.Printf("[DEBUG] Finished updating Group %q: %#v", d.Id(), res) - } - - return resourceMonitoringGroupRead(d, meta) -} - -func resourceMonitoringGroupDelete(d *resource_monitoring_group_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_group_fmt.Errorf("Error fetching project for Group: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_monitoring_group_log.Printf("[DEBUG] Deleting Group %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_group_schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "Group") - } - - resource_monitoring_group_log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringGroupImport(d *resource_monitoring_group_schema.ResourceData, meta interface{}) ([]*resource_monitoring_group_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_monitoring_group_schema.ResourceData{d}, nil -} - -func flattenMonitoringGroupParentName(v interface{}, d *resource_monitoring_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupName(v interface{}, d *resource_monitoring_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupIsCluster(v interface{}, d *resource_monitoring_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupDisplayName(v interface{}, d *resource_monitoring_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringGroupFilter(v interface{}, d *resource_monitoring_group_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringGroupParentName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGroupIsCluster(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGroupDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringGroupFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceMonitoringMetricDescriptor() *resource_monitoring_metric_descriptor_schema.Resource { - return &resource_monitoring_metric_descriptor_schema.Resource{ - Create: resourceMonitoringMetricDescriptorCreate, - Read: resourceMonitoringMetricDescriptorRead, - Update: resourceMonitoringMetricDescriptorUpdate, - Delete: resourceMonitoringMetricDescriptorDelete, - - Importer: &resource_monitoring_metric_descriptor_schema.ResourceImporter{ - State: resourceMonitoringMetricDescriptorImport, - }, - - Timeouts: &resource_monitoring_metric_descriptor_schema.ResourceTimeout{ - Create: resource_monitoring_metric_descriptor_schema.DefaultTimeout(6 * resource_monitoring_metric_descriptor_time.Minute), - Update: resource_monitoring_metric_descriptor_schema.DefaultTimeout(6 * resource_monitoring_metric_descriptor_time.Minute), - Delete: resource_monitoring_metric_descriptor_schema.DefaultTimeout(6 * resource_monitoring_metric_descriptor_time.Minute), - }, - - Schema: map[string]*resource_monitoring_metric_descriptor_schema.Schema{ - "description": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A detailed description of the metric, which can be used in documentation.`, - }, - "display_name": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A concise name for the metric, which can be displayed in user interfaces. Use sentence case without an ending period, for example "Request count".`, - }, - "metric_kind": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_monitoring_metric_descriptor_validation.StringInSlice([]string{"METRIC_KIND_UNSPECIFIED", "GAUGE", "DELTA", "CUMULATIVE"}, false), - Description: `Whether the metric records instantaneous values, changes to a value, etc. Some combinations of metricKind and valueType might not be supported. Possible values: ["METRIC_KIND_UNSPECIFIED", "GAUGE", "DELTA", "CUMULATIVE"]`, - }, - "type": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The metric type, including its DNS name prefix. The type is not URL-encoded. All service defined metrics must be prefixed with the service name, in the format of {service name}/{relative metric name}, such as cloudsql.googleapis.com/database/cpu/utilization. The relative metric name must have only upper and lower-case letters, digits, '/' and underscores '_' are allowed. Additionally, the maximum number of characters allowed for the relative_metric_name is 100. All user-defined metric types have the DNS name custom.googleapis.com, external.googleapis.com, or logging.googleapis.com/user/.`, - }, - "value_type": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_monitoring_metric_descriptor_validation.StringInSlice([]string{"BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION"}, false), - Description: `Whether the measurement is an integer, a floating-point number, etc. Some combinations of metricKind and valueType might not be supported. Possible values: ["BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION"]`, - }, - "labels": { - Type: resource_monitoring_metric_descriptor_schema.TypeSet, - Optional: true, - Description: `The set of labels that can be used to describe a specific instance of this metric type. In order to delete a label, the entire resource must be deleted, then created with the desired labels.`, - Elem: monitoringMetricDescriptorLabelsSchema(), - }, - "launch_stage": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_monitoring_metric_descriptor_validation.StringInSlice([]string{"LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}, false), - Description: `The launch stage of the metric definition. Possible values: ["LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, - }, - "metadata": { - Type: resource_monitoring_metric_descriptor_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Metadata which can be used to guide usage of the metric.`, - MaxItems: 1, - Elem: &resource_monitoring_metric_descriptor_schema.Resource{ - Schema: map[string]*resource_monitoring_metric_descriptor_schema.Schema{ - "ingest_delay": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Optional: true, - Description: `The delay of data points caused by ingestion. Data points older than this age are guaranteed to be ingested and available to be read, excluding data loss due to errors. In '[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)'.`, - AtLeastOneOf: []string{"metadata.0.sample_period", "metadata.0.ingest_delay"}, - }, - "sample_period": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Optional: true, - Description: `The sampling period of metric data points. For metrics which are written periodically, consecutive data points are stored at this time interval, excluding data loss due to errors. Metrics with a higher granularity have a smaller sampling period. In '[duration format](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf?&_ga=2.264881487.1507873253.1593446723-935052455.1591817775#google.protobuf.Duration)'.`, - AtLeastOneOf: []string{"metadata.0.sample_period", "metadata.0.ingest_delay"}, - }, - }, - }, - }, - "unit": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The units in which the metric value is reported. It is only applicable if the -valueType is INT64, DOUBLE, or DISTRIBUTION. The unit defines the representation of -the stored metric values. - -Different systems may scale the values to be more easily displayed (so a value of -0.02KBy might be displayed as 20By, and a value of 3523KBy might be displayed as -3.5MBy). However, if the unit is KBy, then the value of the metric is always in -thousands of bytes, no matter how it may be displayed. - -If you want a custom metric to record the exact number of CPU-seconds used by a job, -you can create an INT64 CUMULATIVE metric whose unit is s{CPU} (or equivalently -1s{CPU} or just s). If the job uses 12,005 CPU-seconds, then the value is written as -12005. - -Alternatively, if you want a custom metric to record data in a more granular way, you -can create a DOUBLE CUMULATIVE metric whose unit is ks{CPU}, and then write the value -12.005 (which is 12005/1000), or use Kis{CPU} and write 11.723 (which is 12005/1024). -The supported units are a subset of The Unified Code for Units of Measure standard. -More info can be found in the API documentation -(https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors).`, - }, - "monitored_resource_types": { - Type: resource_monitoring_metric_descriptor_schema.TypeList, - Computed: true, - Description: `If present, then a time series, which is identified partially by a metric type and a MonitoredResourceDescriptor, that is associated with this metric type can only be associated with one of the monitored resource types listed here. This field allows time series to be associated with the intersection of this metric type and the monitored resource types in this list.`, - Elem: &resource_monitoring_metric_descriptor_schema.Schema{ - Type: resource_monitoring_metric_descriptor_schema.TypeString, - }, - }, - "name": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Computed: true, - Description: `The resource name of the metric descriptor.`, - }, - "project": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func monitoringMetricDescriptorLabelsSchema() *resource_monitoring_metric_descriptor_schema.Resource { - return &resource_monitoring_metric_descriptor_schema.Resource{ - Schema: map[string]*resource_monitoring_metric_descriptor_schema.Schema{ - "key": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Required: true, - Description: `The key for this label. The key must not exceed 100 characters. The first character of the key must be an upper- or lower-case letter, the remaining characters must be letters, digits or underscores, and the key must match the regular expression [a-zA-Z][a-zA-Z0-9_]*`, - }, - "description": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Optional: true, - Description: `A human-readable description for the label.`, - }, - "value_type": { - Type: resource_monitoring_metric_descriptor_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_metric_descriptor_validation.StringInSlice([]string{"STRING", "BOOL", "INT64", ""}, false), - Description: `The type of data that can be assigned to the label. Default value: "STRING" Possible values: ["STRING", "BOOL", "INT64"]`, - Default: "STRING", - }, - }, - } -} - -func resourceMonitoringMetricDescriptorCreate(d *resource_monitoring_metric_descriptor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - typeProp, err := expandMonitoringMetricDescriptorType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(typeProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - labelsProp, err := expandMonitoringMetricDescriptorLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(labelsProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - metricKindProp, err := expandMonitoringMetricDescriptorMetricKind(d.Get("metric_kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_kind"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(metricKindProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, metricKindProp)) { - obj["metricKind"] = metricKindProp - } - valueTypeProp, err := expandMonitoringMetricDescriptorValueType(d.Get("value_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_type"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(valueTypeProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, valueTypeProp)) { - obj["valueType"] = valueTypeProp - } - unitProp, err := expandMonitoringMetricDescriptorUnit(d.Get("unit"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unit"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(unitProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, unitProp)) { - obj["unit"] = unitProp - } - descriptionProp, err := expandMonitoringMetricDescriptorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(descriptionProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringMetricDescriptorDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(displayNameProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - metadataProp, err := expandMonitoringMetricDescriptorMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(metadataProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - launchStageProp, err := expandMonitoringMetricDescriptorLaunchStage(d.Get("launch_stage"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(launchStageProp)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, launchStageProp)) { - obj["launchStage"] = launchStageProp - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/metricDescriptors") - if err != nil { - return err - } - - resource_monitoring_metric_descriptor_log.Printf("[DEBUG] Creating new MetricDescriptor: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_metric_descriptor_schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error creating MetricDescriptor: %s", err) - } - if err := d.Set("name", flattenMonitoringMetricDescriptorName(res["name"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), PollCheckForExistence, "Creating MetricDescriptor", d.Timeout(resource_monitoring_metric_descriptor_schema.TimeoutCreate), 20) - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error waiting to create MetricDescriptor: %s", err) - } - - resource_monitoring_metric_descriptor_log.Printf("[DEBUG] Finished creating MetricDescriptor %q: %#v", d.Id(), res) - - return resourceMonitoringMetricDescriptorRead(d, meta) -} - -func resourceMonitoringMetricDescriptorPollRead(d *resource_monitoring_metric_descriptor_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_monitoring_metric_descriptor_fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourceMonitoringMetricDescriptorRead(d *resource_monitoring_metric_descriptor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_metric_descriptor_fmt.Sprintf("MonitoringMetricDescriptor %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - - if err := d.Set("name", flattenMonitoringMetricDescriptorName(res["name"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("type", flattenMonitoringMetricDescriptorType(res["type"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("labels", flattenMonitoringMetricDescriptorLabels(res["labels"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("metric_kind", flattenMonitoringMetricDescriptorMetricKind(res["metricKind"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("value_type", flattenMonitoringMetricDescriptorValueType(res["valueType"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("unit", flattenMonitoringMetricDescriptorUnit(res["unit"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("description", flattenMonitoringMetricDescriptorDescription(res["description"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("display_name", flattenMonitoringMetricDescriptorDisplayName(res["displayName"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - if err := d.Set("monitored_resource_types", flattenMonitoringMetricDescriptorMonitoredResourceTypes(res["monitoredResourceTypes"], d, config)); err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error reading MetricDescriptor: %s", err) - } - - return nil -} - -func resourceMonitoringMetricDescriptorUpdate(d *resource_monitoring_metric_descriptor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - typeProp, err := expandMonitoringMetricDescriptorType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - labelsProp, err := expandMonitoringMetricDescriptorLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - metricKindProp, err := expandMonitoringMetricDescriptorMetricKind(d.Get("metric_kind"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metric_kind"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, metricKindProp)) { - obj["metricKind"] = metricKindProp - } - valueTypeProp, err := expandMonitoringMetricDescriptorValueType(d.Get("value_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("value_type"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, valueTypeProp)) { - obj["valueType"] = valueTypeProp - } - unitProp, err := expandMonitoringMetricDescriptorUnit(d.Get("unit"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("unit"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, unitProp)) { - obj["unit"] = unitProp - } - descriptionProp, err := expandMonitoringMetricDescriptorDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringMetricDescriptorDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - metadataProp, err := expandMonitoringMetricDescriptorMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - launchStageProp, err := expandMonitoringMetricDescriptorLaunchStage(d.Get("launch_stage"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) && (ok || !resource_monitoring_metric_descriptor_reflect.DeepEqual(v, launchStageProp)) { - obj["launchStage"] = launchStageProp - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/metricDescriptors") - if err != nil { - return err - } - - resource_monitoring_metric_descriptor_log.Printf("[DEBUG] Updating MetricDescriptor %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_metric_descriptor_schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error updating MetricDescriptor %q: %s", d.Id(), err) - } else { - resource_monitoring_metric_descriptor_log.Printf("[DEBUG] Finished updating MetricDescriptor %q: %#v", d.Id(), res) - } - - err = PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), PollCheckForExistence, "Updating MetricDescriptor", d.Timeout(resource_monitoring_metric_descriptor_schema.TimeoutUpdate), 20) - if err != nil { - return err - } - - return resourceMonitoringMetricDescriptorRead(d, meta) -} - -func resourceMonitoringMetricDescriptorDelete(d *resource_monitoring_metric_descriptor_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error fetching project for MetricDescriptor: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_monitoring_metric_descriptor_log.Printf("[DEBUG] Deleting MetricDescriptor %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_metric_descriptor_schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "MetricDescriptor") - } - - err = PollingWaitTime(resourceMonitoringMetricDescriptorPollRead(d, meta), PollCheckForAbsence, "Deleting MetricDescriptor", d.Timeout(resource_monitoring_metric_descriptor_schema.TimeoutCreate), 20) - if err != nil { - return resource_monitoring_metric_descriptor_fmt.Errorf("Error waiting to delete MetricDescriptor: %s", err) - } - - resource_monitoring_metric_descriptor_log.Printf("[DEBUG] Finished deleting MetricDescriptor %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringMetricDescriptorImport(d *resource_monitoring_metric_descriptor_schema.ResourceData, meta interface{}) ([]*resource_monitoring_metric_descriptor_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_monitoring_metric_descriptor_schema.ResourceData{d}, nil -} - -func flattenMonitoringMetricDescriptorName(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorType(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorLabels(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_monitoring_metric_descriptor_schema.NewSet(resource_monitoring_metric_descriptor_schema.HashResource(monitoringMetricDescriptorLabelsSchema()), []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "key": flattenMonitoringMetricDescriptorLabelsKey(original["key"], d, config), - "value_type": flattenMonitoringMetricDescriptorLabelsValueType(original["valueType"], d, config), - "description": flattenMonitoringMetricDescriptorLabelsDescription(original["description"], d, config), - }) - } - return transformed -} - -func flattenMonitoringMetricDescriptorLabelsKey(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorLabelsValueType(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - if v == nil || isEmptyValue(resource_monitoring_metric_descriptor_reflect.ValueOf(v)) { - return "STRING" - } - - return v -} - -func flattenMonitoringMetricDescriptorLabelsDescription(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorMetricKind(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorValueType(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorUnit(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorDescription(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorDisplayName(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringMetricDescriptorMonitoredResourceTypes(v interface{}, d *resource_monitoring_metric_descriptor_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringMetricDescriptorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_monitoring_metric_descriptor_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandMonitoringMetricDescriptorLabelsKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_metric_descriptor_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedValueType, err := expandMonitoringMetricDescriptorLabelsValueType(original["value_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_metric_descriptor_reflect.ValueOf(transformedValueType); val.IsValid() && !isEmptyValue(val) { - transformed["valueType"] = transformedValueType - } - - transformedDescription, err := expandMonitoringMetricDescriptorLabelsDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_metric_descriptor_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringMetricDescriptorLabelsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLabelsValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLabelsDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorMetricKind(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorValueType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorMetadata(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSamplePeriod, err := expandMonitoringMetricDescriptorMetadataSamplePeriod(original["sample_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_metric_descriptor_reflect.ValueOf(transformedSamplePeriod); val.IsValid() && !isEmptyValue(val) { - transformed["samplePeriod"] = transformedSamplePeriod - } - - transformedIngestDelay, err := expandMonitoringMetricDescriptorMetadataIngestDelay(original["ingest_delay"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_metric_descriptor_reflect.ValueOf(transformedIngestDelay); val.IsValid() && !isEmptyValue(val) { - transformed["ingestDelay"] = transformedIngestDelay - } - - return transformed, nil -} - -func expandMonitoringMetricDescriptorMetadataSamplePeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorMetadataIngestDelay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringMetricDescriptorLaunchStage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -var sensitiveLabels = []string{"auth_token", "service_key", "password"} - -func sensitiveLabelCustomizeDiff(_ resource_monitoring_notification_channel_context.Context, diff *resource_monitoring_notification_channel_schema.ResourceDiff, v interface{}) error { - for _, sl := range sensitiveLabels { - mapLabel := diff.Get("labels." + sl).(string) - authLabel := diff.Get("sensitive_labels.0." + sl).(string) - if mapLabel != "" && authLabel != "" { - return resource_monitoring_notification_channel_fmt.Errorf("Sensitive label [%s] cannot be set in both `labels` and the `sensitive_labels` block.", sl) - } - } - return nil -} - -func resourceMonitoringNotificationChannel() *resource_monitoring_notification_channel_schema.Resource { - return &resource_monitoring_notification_channel_schema.Resource{ - Create: resourceMonitoringNotificationChannelCreate, - Read: resourceMonitoringNotificationChannelRead, - Update: resourceMonitoringNotificationChannelUpdate, - Delete: resourceMonitoringNotificationChannelDelete, - - Importer: &resource_monitoring_notification_channel_schema.ResourceImporter{ - State: resourceMonitoringNotificationChannelImport, - }, - - Timeouts: &resource_monitoring_notification_channel_schema.ResourceTimeout{ - Create: resource_monitoring_notification_channel_schema.DefaultTimeout(4 * resource_monitoring_notification_channel_time.Minute), - Update: resource_monitoring_notification_channel_schema.DefaultTimeout(4 * resource_monitoring_notification_channel_time.Minute), - Delete: resource_monitoring_notification_channel_schema.DefaultTimeout(4 * resource_monitoring_notification_channel_time.Minute), - }, - - CustomizeDiff: sensitiveLabelCustomizeDiff, - - Schema: map[string]*resource_monitoring_notification_channel_schema.Schema{ - "type": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Required: true, - Description: `The type of the notification channel. This field matches the value of the NotificationChannelDescriptor.type field. See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannelDescriptors/list to get the list of valid values such as "email", "slack", etc...`, - }, - "description": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Optional: true, - Description: `An optional human-readable description of this notification channel. This description may provide additional details, beyond the display name, for the channel. This may not exceed 1024 Unicode characters.`, - }, - "display_name": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Optional: true, - Description: `An optional human-readable name for this notification channel. It is recommended that you specify a non-empty and unique name in order to make it easier to identify the channels in your project, though this is not enforced. The display name is limited to 512 Unicode characters.`, - }, - "enabled": { - Type: resource_monitoring_notification_channel_schema.TypeBool, - Optional: true, - Description: `Whether notifications are forwarded to the described channel. This makes it possible to disable delivery of notifications to a particular channel without removing the channel from all alerting policies that reference the channel. This is a more convenient approach when the change is temporary and you want to receive notifications from the same set of alerting policies on the channel at some point in the future.`, - Default: true, - }, - "labels": { - Type: resource_monitoring_notification_channel_schema.TypeMap, - Optional: true, - Description: `Configuration fields that define the channel and its behavior. The -permissible and required labels are specified in the -NotificationChannelDescriptor corresponding to the type field. - -Labels with sensitive data are obfuscated by the API and therefore Terraform cannot -determine if there are upstream changes to these fields. They can also be configured via -the sensitive_labels block, but cannot be configured in both places.`, - Elem: &resource_monitoring_notification_channel_schema.Schema{Type: resource_monitoring_notification_channel_schema.TypeString}, - }, - "sensitive_labels": { - Type: resource_monitoring_notification_channel_schema.TypeList, - Optional: true, - Description: `Different notification type behaviors are configured primarily using the the 'labels' field on this -resource. This block contains the labels which contain secrets or passwords so that they can be marked -sensitive and hidden from plan output. The name of the field, eg: password, will be the key -in the 'labels' map in the api request. - -Credentials may not be specified in both locations and will cause an error. Changing from one location -to a different credential configuration in the config will require an apply to update state.`, - MaxItems: 1, - Elem: &resource_monitoring_notification_channel_schema.Resource{ - Schema: map[string]*resource_monitoring_notification_channel_schema.Schema{ - "auth_token": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Optional: true, - Description: `An authorization token for a notification channel. Channel types that support this field include: slack`, - Sensitive: true, - ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, - }, - "password": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Optional: true, - Description: `An password for a notification channel. Channel types that support this field include: webhook_basicauth`, - Sensitive: true, - ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, - }, - "service_key": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Optional: true, - Description: `An servicekey token for a notification channel. Channel types that support this field include: pagerduty`, - Sensitive: true, - ExactlyOneOf: []string{"sensitive_labels.0.auth_token", "sensitive_labels.0.password", "sensitive_labels.0.service_key"}, - }, - }, - }, - }, - "user_labels": { - Type: resource_monitoring_notification_channel_schema.TypeMap, - Optional: true, - Description: `User-supplied key/value data that does not need to conform to the corresponding NotificationChannelDescriptor's schema, unlike the labels field. This field is intended to be used for organizing and identifying the NotificationChannel objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.`, - Elem: &resource_monitoring_notification_channel_schema.Schema{Type: resource_monitoring_notification_channel_schema.TypeString}, - }, - "name": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Computed: true, - Description: `The full REST resource name for this channel. The syntax is: -projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] -The [CHANNEL_ID] is automatically assigned by the server on creation.`, - }, - "verification_status": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Computed: true, - Description: `Indicates whether this channel has been verified or not. On a ListNotificationChannels or GetNotificationChannel operation, this field is expected to be populated.If the value is UNVERIFIED, then it indicates that the channel is non-functioning (it both requires verification and lacks verification); otherwise, it is assumed that the channel works.If the channel is neither VERIFIED nor UNVERIFIED, it implies that the channel is of a type that does not require verification or that this specific channel has been exempted from verification because it was created prior to verification being required for channels of this type.This field cannot be modified using a standard UpdateNotificationChannel operation. To change the value of this field, you must call VerifyNotificationChannel.`, - }, - "project": { - Type: resource_monitoring_notification_channel_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringNotificationChannelCreate(d *resource_monitoring_notification_channel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(labelsProp)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(typeProp)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(userLabelsProp)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(descriptionProp)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(displayNameProp)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - - obj, err = resourceMonitoringNotificationChannelEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/notificationChannels") - if err != nil { - return err - } - - resource_monitoring_notification_channel_log.Printf("[DEBUG] Creating new NotificationChannel: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_notification_channel_schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error creating NotificationChannel: %s", err) - } - if err := d.Set("name", flattenMonitoringNotificationChannelName(res["name"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_monitoring_notification_channel_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_monitoring_notification_channel_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_monitoring_notification_channel_log.Printf("[DEBUG] Finished creating NotificationChannel %q: %#v", d.Id(), res) - - return resourceMonitoringNotificationChannelRead(d, meta) -} - -func resourceMonitoringNotificationChannelRead(d *resource_monitoring_notification_channel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_notification_channel_fmt.Sprintf("MonitoringNotificationChannel %q", d.Id())) - } - - res, err = resourceMonitoringNotificationChannelDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_monitoring_notification_channel_log.Printf("[DEBUG] Removing MonitoringNotificationChannel because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - - if err := d.Set("labels", flattenMonitoringNotificationChannelLabels(res["labels"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("name", flattenMonitoringNotificationChannelName(res["name"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("verification_status", flattenMonitoringNotificationChannelVerificationStatus(res["verificationStatus"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("type", flattenMonitoringNotificationChannelType(res["type"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("user_labels", flattenMonitoringNotificationChannelUserLabels(res["userLabels"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("description", flattenMonitoringNotificationChannelDescription(res["description"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("display_name", flattenMonitoringNotificationChannelDisplayName(res["displayName"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - if err := d.Set("enabled", flattenMonitoringNotificationChannelEnabled(res["enabled"], d, config)); err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error reading NotificationChannel: %s", err) - } - - return nil -} - -func resourceMonitoringNotificationChannelUpdate(d *resource_monitoring_notification_channel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(v)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(v)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(v)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, userLabelsProp)) { - obj["userLabels"] = userLabelsProp - } - descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(v)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_notification_channel_reflect.ValueOf(v)) && (ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); ok || !resource_monitoring_notification_channel_reflect.DeepEqual(v, enabledProp) { - obj["enabled"] = enabledProp - } - - obj, err = resourceMonitoringNotificationChannelEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - resource_monitoring_notification_channel_log.Printf("[DEBUG] Updating NotificationChannel %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_notification_channel_schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error updating NotificationChannel %q: %s", d.Id(), err) - } else { - resource_monitoring_notification_channel_log.Printf("[DEBUG] Finished updating NotificationChannel %q: %#v", d.Id(), res) - } - - return resourceMonitoringNotificationChannelRead(d, meta) -} - -func resourceMonitoringNotificationChannelDelete(d *resource_monitoring_notification_channel_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_notification_channel_fmt.Errorf("Error fetching project for NotificationChannel: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_monitoring_notification_channel_log.Printf("[DEBUG] Deleting NotificationChannel %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_notification_channel_schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "NotificationChannel") - } - - resource_monitoring_notification_channel_log.Printf("[DEBUG] Finished deleting NotificationChannel %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringNotificationChannelImport(d *resource_monitoring_notification_channel_schema.ResourceData, meta interface{}) ([]*resource_monitoring_notification_channel_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_monitoring_notification_channel_schema.ResourceData{d}, nil -} - -func flattenMonitoringNotificationChannelLabels(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelName(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelVerificationStatus(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelType(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelUserLabels(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelDescription(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelDisplayName(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringNotificationChannelEnabled(v interface{}, d *resource_monitoring_notification_channel_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringNotificationChannelLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringNotificationChannelType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringNotificationChannelUserLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringNotificationChannelDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringNotificationChannelDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringNotificationChannelEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceMonitoringNotificationChannelEncoder(d *resource_monitoring_notification_channel_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - labelmap, ok := obj["labels"] - if !ok { - labelmap = make(map[string]string) - } - - var labels map[string]string - labels = labelmap.(map[string]string) - - for _, sl := range sensitiveLabels { - if auth, _ := d.GetOkExists("sensitive_labels.0." + sl); auth != "" { - labels[sl] = auth.(string) - } - } - - obj["labels"] = labels - - return obj, nil -} - -func resourceMonitoringNotificationChannelDecoder(d *resource_monitoring_notification_channel_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if labelmap, ok := res["labels"]; ok { - labels := labelmap.(map[string]interface{}) - for _, sl := range sensitiveLabels { - if _, apiOk := labels[sl]; apiOk { - if _, exists := d.GetOkExists("sensitive_labels.0." + sl); exists { - delete(labels, sl) - } else { - labels[sl] = d.Get("labels." + sl) - } - } - } - } - - return res, nil -} - -func validateMonitoringSloGoal(v interface{}, k string) (warnings []string, errors []error) { - goal := v.(float64) - if goal <= 0 || goal > 0.999 { - errors = append(errors, resource_monitoring_slo_fmt.Errorf("goal %f must be > 0 and <= 0.999", goal)) - } - return -} - -func validateAvailabilitySli(v interface{}, key string) (ws []string, errs []error) { - if v.(bool) == false { - errs = append(errs, resource_monitoring_slo_fmt.Errorf("%q must be set to true, got: %v", key, v)) - } - return -} - -func resourceMonitoringSlo() *resource_monitoring_slo_schema.Resource { - return &resource_monitoring_slo_schema.Resource{ - Create: resourceMonitoringSloCreate, - Read: resourceMonitoringSloRead, - Update: resourceMonitoringSloUpdate, - Delete: resourceMonitoringSloDelete, - - Importer: &resource_monitoring_slo_schema.ResourceImporter{ - State: resourceMonitoringSloImport, - }, - - Timeouts: &resource_monitoring_slo_schema.ResourceTimeout{ - Create: resource_monitoring_slo_schema.DefaultTimeout(4 * resource_monitoring_slo_time.Minute), - Update: resource_monitoring_slo_schema.DefaultTimeout(4 * resource_monitoring_slo_time.Minute), - Delete: resource_monitoring_slo_schema.DefaultTimeout(4 * resource_monitoring_slo_time.Minute), - }, - - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "goal": { - Type: resource_monitoring_slo_schema.TypeFloat, - Required: true, - ValidateFunc: validateMonitoringSloGoal, - Description: `The fraction of service that must be good in order for this objective -to be met. 0 < goal <= 0.999`, - }, - "service": { - Type: resource_monitoring_slo_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the service to which this SLO belongs.`, - }, - "calendar_period": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_slo_validation.StringInSlice([]string{"DAY", "WEEK", "FORTNIGHT", "MONTH", ""}, false), - Description: `A calendar period, semantically "since the start of the current -". Possible values: ["DAY", "WEEK", "FORTNIGHT", "MONTH"]`, - ExactlyOneOf: []string{"rolling_period_days", "calendar_period"}, - }, - "display_name": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `Name used for UI elements listing this SLO.`, - }, - "rolling_period_days": { - Type: resource_monitoring_slo_schema.TypeInt, - Optional: true, - ValidateFunc: resource_monitoring_slo_validation.IntBetween(1, 30), - Description: `A rolling time period, semantically "in the past X days". -Must be between 1 to 30 days, inclusive.`, - ExactlyOneOf: []string{"rolling_period_days", "calendar_period"}, - }, - "basic_sli": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Basic Service-Level Indicator (SLI) on a well-known service type. -Performance will be computed on the basis of pre-defined metrics. - -SLIs are used to measure and calculate the quality of the Service's -performance with respect to a single aspect of service quality. - -Exactly one of the following must be set: -'basic_sli', 'request_based_sli', 'windows_based_sli'`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "availability": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Availability based SLI, dervied from count of requests made to this service that return successfully.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "enabled": { - Type: resource_monitoring_slo_schema.TypeBool, - Optional: true, - ValidateFunc: validateAvailabilitySli, - Description: `Whether an availability SLI is enabled or not. Must be set to true. Defaults to 'true'.`, - Default: true, - }, - }, - }, - ExactlyOneOf: []string{"basic_sli.0.latency", "basic_sli.0.availability"}, - }, - "latency": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Parameters for a latency threshold SLI.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "threshold": { - Type: resource_monitoring_slo_schema.TypeString, - Required: true, - Description: `A duration string, e.g. 10s. -Good service is defined to be the count of requests made to -this service that return in no more than threshold.`, - }, - }, - }, - ExactlyOneOf: []string{"basic_sli.0.latency", "basic_sli.0.availability"}, - }, - "location": { - Type: resource_monitoring_slo_schema.TypeSet, - Optional: true, - Description: `An optional set of locations to which this SLI is relevant. -Telemetry from other locations will not be used to calculate -performance for this SLI. If omitted, this SLI applies to all -locations in which the Service has activity. For service types -that don't support breaking down by location, setting this -field will result in an error.`, - Elem: &resource_monitoring_slo_schema.Schema{ - Type: resource_monitoring_slo_schema.TypeString, - }, - Set: resource_monitoring_slo_schema.HashString, - }, - "method": { - Type: resource_monitoring_slo_schema.TypeSet, - Optional: true, - Description: `An optional set of RPCs to which this SLI is relevant. -Telemetry from other methods will not be used to calculate -performance for this SLI. If omitted, this SLI applies to all -the Service's methods. For service types that don't support -breaking down by method, setting this field will result in an -error.`, - Elem: &resource_monitoring_slo_schema.Schema{ - Type: resource_monitoring_slo_schema.TypeString, - }, - Set: resource_monitoring_slo_schema.HashString, - }, - "version": { - Type: resource_monitoring_slo_schema.TypeSet, - Optional: true, - Description: `The set of API versions to which this SLI is relevant. -Telemetry from other API versions will not be used to -calculate performance for this SLI. If omitted, -this SLI applies to all API versions. For service types -that don't support breaking down by version, setting this -field will result in an error.`, - Elem: &resource_monitoring_slo_schema.Schema{ - Type: resource_monitoring_slo_schema.TypeString, - }, - Set: resource_monitoring_slo_schema.HashString, - }, - }, - }, - ExactlyOneOf: []string{"basic_sli", "request_based_sli", "windows_based_sli"}, - }, - "request_based_sli": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `A request-based SLI defines a SLI for which atomic units of -service are counted directly. - -A SLI describes a good service. -It is used to measure and calculate the quality of the Service's -performance with respect to a single aspect of service quality. -Exactly one of the following must be set: -'basic_sli', 'request_based_sli', 'windows_based_sli'`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "distribution_cut": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Used when good_service is defined by a count of values aggregated in a -Distribution that fall into a good range. The total_service is the -total count of all values aggregated in the Distribution. -Defines a distribution TimeSeries filter and thresholds used for -measuring good service and total service. - -Exactly one of 'distribution_cut' or 'good_total_ratio' can be set.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "distribution_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Required: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -aggregating values to quantify the good service provided. - -Must have ValueType = DISTRIBUTION and -MetricKind = DELTA or MetricKind = CUMULATIVE.`, - }, - "range": { - Type: resource_monitoring_slo_schema.TypeList, - Required: true, - Description: `Range of numerical values. The computed good_service -will be the count of values x in the Distribution such -that range.min <= x <= range.max. inclusive of min and -max. Open ranges can be defined by setting -just one of min or max.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "max": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `max value for the range (inclusive). If not given, -will be set to "infinity", defining an open range -">= range.min"`, - AtLeastOneOf: []string{"request_based_sli.0.distribution_cut.0.range.0.min", "request_based_sli.0.distribution_cut.0.range.0.max"}, - }, - "min": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `Min value for the range (inclusive). If not given, -will be set to "-infinity", defining an open range -"< range.max"`, - AtLeastOneOf: []string{"request_based_sli.0.distribution_cut.0.range.0.min", "request_based_sli.0.distribution_cut.0.range.0.max"}, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"request_based_sli.0.good_total_ratio", "request_based_sli.0.distribution_cut"}, - }, - "good_total_ratio": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `A means to compute a ratio of 'good_service' to 'total_service'. -Defines computing this ratio with two TimeSeries [monitoring filters](https://cloud.google.com/monitoring/api/v3/filters) -Must specify exactly two of good, bad, and total service filters. -The relationship good_service + bad_service = total_service -will be assumed. - -Exactly one of 'distribution_cut' or 'good_total_ratio' can be set.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "bad_service_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -quantifying bad service provided, either demanded service that -was not provided or demanded service that was of inadequate -quality. - -Must have ValueType = DOUBLE or ValueType = INT64 and -must have MetricKind = DELTA or MetricKind = CUMULATIVE. - -Exactly two of 'good_service_filter','bad_service_filter','total_service_filter' -must be set (good + bad = total is assumed).`, - AtLeastOneOf: []string{"request_based_sli.0.good_total_ratio.0.good_service_filter", "request_based_sli.0.good_total_ratio.0.bad_service_filter", "request_based_sli.0.good_total_ratio.0.total_service_filter"}, - }, - "good_service_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -quantifying good service provided. -Must have ValueType = DOUBLE or ValueType = INT64 and -must have MetricKind = DELTA or MetricKind = CUMULATIVE. - -Exactly two of 'good_service_filter','bad_service_filter','total_service_filter' -must be set (good + bad = total is assumed).`, - AtLeastOneOf: []string{"request_based_sli.0.good_total_ratio.0.good_service_filter", "request_based_sli.0.good_total_ratio.0.bad_service_filter", "request_based_sli.0.good_total_ratio.0.total_service_filter"}, - }, - "total_service_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -quantifying total demanded service. - -Must have ValueType = DOUBLE or ValueType = INT64 and -must have MetricKind = DELTA or MetricKind = CUMULATIVE. - -Exactly two of 'good_service_filter','bad_service_filter','total_service_filter' -must be set (good + bad = total is assumed).`, - AtLeastOneOf: []string{"request_based_sli.0.good_total_ratio.0.good_service_filter", "request_based_sli.0.good_total_ratio.0.bad_service_filter", "request_based_sli.0.good_total_ratio.0.total_service_filter"}, - }, - }, - }, - ExactlyOneOf: []string{"request_based_sli.0.good_total_ratio", "request_based_sli.0.distribution_cut"}, - }, - }, - }, - ExactlyOneOf: []string{"basic_sli", "request_based_sli", "windows_based_sli"}, - }, - "windows_based_sli": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `A windows-based SLI defines the criteria for time windows. -good_service is defined based off the count of these time windows -for which the provided service was of good quality. - -A SLI describes a good service. It is used to measure and calculate -the quality of the Service's performance with respect to a single -aspect of service quality. - -Exactly one of the following must be set: -'basic_sli', 'request_based_sli', 'windows_based_sli'`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "good_bad_metric_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -with ValueType = BOOL. The window is good if any true values -appear in the window. One of 'good_bad_metric_filter', -'good_total_ratio_threshold', 'metric_mean_in_range', -'metric_sum_in_range' must be set for 'windows_based_sli'.`, - ExactlyOneOf: []string{"windows_based_sli.0.good_bad_metric_filter", "windows_based_sli.0.good_total_ratio_threshold", "windows_based_sli.0.metric_mean_in_range", "windows_based_sli.0.metric_sum_in_range"}, - }, - "good_total_ratio_threshold": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Criterion that describes a window as good if its performance is -high enough. One of 'good_bad_metric_filter', -'good_total_ratio_threshold', 'metric_mean_in_range', -'metric_sum_in_range' must be set for 'windows_based_sli'.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "basic_sli_performance": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Basic SLI to evaluate to judge window quality.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "availability": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Availability based SLI, dervied from count of requests made to this service that return successfully.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "enabled": { - Type: resource_monitoring_slo_schema.TypeBool, - Optional: true, - ValidateFunc: validateAvailabilitySli, - Description: `Whether an availability SLI is enabled or not. Must be set to 'true. Defaults to 'true'.`, - Default: true, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.basic_sli_performance.0.latency", "windows_based_sli.0.good_total_ratio_threshold.0.basic_sli_performance.0.availability"}, - }, - "latency": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Parameters for a latency threshold SLI.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "threshold": { - Type: resource_monitoring_slo_schema.TypeString, - Required: true, - Description: `A duration string, e.g. 10s. -Good service is defined to be the count of requests made to -this service that return in no more than threshold.`, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.basic_sli_performance.0.latency", "windows_based_sli.0.good_total_ratio_threshold.0.basic_sli_performance.0.availability"}, - }, - "location": { - Type: resource_monitoring_slo_schema.TypeSet, - Optional: true, - Description: `An optional set of locations to which this SLI is relevant. -Telemetry from other locations will not be used to calculate -performance for this SLI. If omitted, this SLI applies to all -locations in which the Service has activity. For service types -that don't support breaking down by location, setting this -field will result in an error.`, - Elem: &resource_monitoring_slo_schema.Schema{ - Type: resource_monitoring_slo_schema.TypeString, - }, - Set: resource_monitoring_slo_schema.HashString, - }, - "method": { - Type: resource_monitoring_slo_schema.TypeSet, - Optional: true, - Description: `An optional set of RPCs to which this SLI is relevant. -Telemetry from other methods will not be used to calculate -performance for this SLI. If omitted, this SLI applies to all -the Service's methods. For service types that don't support -breaking down by method, setting this field will result in an -error.`, - Elem: &resource_monitoring_slo_schema.Schema{ - Type: resource_monitoring_slo_schema.TypeString, - }, - Set: resource_monitoring_slo_schema.HashString, - }, - "version": { - Type: resource_monitoring_slo_schema.TypeSet, - Optional: true, - Description: `The set of API versions to which this SLI is relevant. -Telemetry from other API versions will not be used to -calculate performance for this SLI. If omitted, -this SLI applies to all API versions. For service types -that don't support breaking down by version, setting this -field will result in an error.`, - Elem: &resource_monitoring_slo_schema.Schema{ - Type: resource_monitoring_slo_schema.TypeString, - }, - Set: resource_monitoring_slo_schema.HashString, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance", "windows_based_sli.0.good_total_ratio_threshold.0.basic_sli_performance"}, - }, - "performance": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Request-based SLI to evaluate to judge window quality.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "distribution_cut": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Used when good_service is defined by a count of values aggregated in a -Distribution that fall into a good range. The total_service is the -total count of all values aggregated in the Distribution. -Defines a distribution TimeSeries filter and thresholds used for -measuring good service and total service.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "distribution_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Required: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -aggregating values to quantify the good service provided. - -Must have ValueType = DISTRIBUTION and -MetricKind = DELTA or MetricKind = CUMULATIVE.`, - }, - "range": { - Type: resource_monitoring_slo_schema.TypeList, - Required: true, - Description: `Range of numerical values. The computed good_service -will be the count of values x in the Distribution such -that range.min <= x <= range.max. inclusive of min and -max. Open ranges can be defined by setting -just one of min or max.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "max": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `max value for the range (inclusive). If not given, -will be set to "infinity", defining an open range -">= range.min"`, - AtLeastOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.min", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.max"}, - }, - "min": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `Min value for the range (inclusive). If not given, -will be set to "-infinity", defining an open range -"< range.max"`, - AtLeastOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.min", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut.0.range.0.max"}, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut"}, - }, - "good_total_ratio": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `A means to compute a ratio of 'good_service' to 'total_service'. -Defines computing this ratio with two TimeSeries [monitoring filters](https://cloud.google.com/monitoring/api/v3/filters) -Must specify exactly two of good, bad, and total service filters. -The relationship good_service + bad_service = total_service -will be assumed.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "bad_service_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -quantifying bad service provided, either demanded service that -was not provided or demanded service that was of inadequate -quality. Exactly two of -good, bad, or total service filter must be defined (where -good + bad = total is assumed) - -Must have ValueType = DOUBLE or ValueType = INT64 and -must have MetricKind = DELTA or MetricKind = CUMULATIVE.`, - AtLeastOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.good_service_filter", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.bad_service_filter", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.total_service_filter"}, - }, - "good_service_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -quantifying good service provided. Exactly two of -good, bad, or total service filter must be defined (where -good + bad = total is assumed) - -Must have ValueType = DOUBLE or ValueType = INT64 and -must have MetricKind = DELTA or MetricKind = CUMULATIVE.`, - AtLeastOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.good_service_filter", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.bad_service_filter", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.total_service_filter"}, - }, - "total_service_filter": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `A TimeSeries [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -quantifying total demanded service. Exactly two of -good, bad, or total service filter must be defined (where -good + bad = total is assumed) - -Must have ValueType = DOUBLE or ValueType = INT64 and -must have MetricKind = DELTA or MetricKind = CUMULATIVE.`, - AtLeastOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.good_service_filter", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.bad_service_filter", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio.0.total_service_filter"}, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance.0.good_total_ratio", "windows_based_sli.0.good_total_ratio_threshold.0.performance.0.distribution_cut"}, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_total_ratio_threshold.0.performance", "windows_based_sli.0.good_total_ratio_threshold.0.basic_sli_performance"}, - }, - "threshold": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `If window performance >= threshold, the window is counted -as good.`, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_bad_metric_filter", "windows_based_sli.0.good_total_ratio_threshold", "windows_based_sli.0.metric_mean_in_range", "windows_based_sli.0.metric_sum_in_range"}, - }, - "metric_mean_in_range": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Criterion that describes a window as good if the metric's value -is in a good range, *averaged* across returned streams. -One of 'good_bad_metric_filter', - -'good_total_ratio_threshold', 'metric_mean_in_range', -'metric_sum_in_range' must be set for 'windows_based_sli'. -Average value X of 'time_series' should satisfy -'range.min <= X <= range.max' for a good window.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "range": { - Type: resource_monitoring_slo_schema.TypeList, - Required: true, - Description: `Range of numerical values. The computed good_service -will be the count of values x in the Distribution such -that range.min <= x <= range.max. inclusive of min and -max. Open ranges can be defined by setting -just one of min or max. Mean value 'X' of 'time_series' -values should satisfy 'range.min <= X <= range.max' for a -good service.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "max": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `max value for the range (inclusive). If not given, -will be set to "infinity", defining an open range -">= range.min"`, - AtLeastOneOf: []string{"windows_based_sli.0.metric_mean_in_range.0.range.0.min", "windows_based_sli.0.metric_mean_in_range.0.range.0.max"}, - }, - "min": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `Min value for the range (inclusive). If not given, -will be set to "-infinity", defining an open range -"< range.max"`, - AtLeastOneOf: []string{"windows_based_sli.0.metric_mean_in_range.0.range.0.min", "windows_based_sli.0.metric_mean_in_range.0.range.0.max"}, - }, - }, - }, - }, - "time_series": { - Type: resource_monitoring_slo_schema.TypeString, - Required: true, - Description: `A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -specifying the TimeSeries to use for evaluating window -The provided TimeSeries must have ValueType = INT64 or -ValueType = DOUBLE and MetricKind = GAUGE. Mean value 'X' -should satisfy 'range.min <= X <= range.max' -under good service.`, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_bad_metric_filter", "windows_based_sli.0.good_total_ratio_threshold", "windows_based_sli.0.metric_mean_in_range", "windows_based_sli.0.metric_sum_in_range"}, - }, - "metric_sum_in_range": { - Type: resource_monitoring_slo_schema.TypeList, - Optional: true, - Description: `Criterion that describes a window as good if the metric's value -is in a good range, *summed* across returned streams. -Summed value 'X' of 'time_series' should satisfy -'range.min <= X <= range.max' for a good window. - -One of 'good_bad_metric_filter', -'good_total_ratio_threshold', 'metric_mean_in_range', -'metric_sum_in_range' must be set for 'windows_based_sli'.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "range": { - Type: resource_monitoring_slo_schema.TypeList, - Required: true, - Description: `Range of numerical values. The computed good_service -will be the count of values x in the Distribution such -that range.min <= x <= range.max. inclusive of min and -max. Open ranges can be defined by setting -just one of min or max. Summed value 'X' should satisfy -'range.min <= X <= range.max' for a good window.`, - MaxItems: 1, - Elem: &resource_monitoring_slo_schema.Resource{ - Schema: map[string]*resource_monitoring_slo_schema.Schema{ - "max": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `max value for the range (inclusive). If not given, -will be set to "infinity", defining an open range -">= range.min"`, - AtLeastOneOf: []string{"windows_based_sli.0.metric_sum_in_range.0.range.0.min", "windows_based_sli.0.metric_sum_in_range.0.range.0.max"}, - }, - "min": { - Type: resource_monitoring_slo_schema.TypeFloat, - Optional: true, - Description: `Min value for the range (inclusive). If not given, -will be set to "-infinity", defining an open range -"< range.max"`, - AtLeastOneOf: []string{"windows_based_sli.0.metric_sum_in_range.0.range.0.min", "windows_based_sli.0.metric_sum_in_range.0.range.0.max"}, - }, - }, - }, - }, - "time_series": { - Type: resource_monitoring_slo_schema.TypeString, - Required: true, - Description: `A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters) -specifying the TimeSeries to use for evaluating window -quality. The provided TimeSeries must have -ValueType = INT64 or ValueType = DOUBLE and -MetricKind = GAUGE. - -Summed value 'X' should satisfy -'range.min <= X <= range.max' for a good window.`, - }, - }, - }, - ExactlyOneOf: []string{"windows_based_sli.0.good_bad_metric_filter", "windows_based_sli.0.good_total_ratio_threshold", "windows_based_sli.0.metric_mean_in_range", "windows_based_sli.0.metric_sum_in_range"}, - }, - "window_period": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Description: `Duration over which window quality is evaluated, given as a -duration string "{X}s" representing X seconds. Must be an -integer fraction of a day and at least 60s.`, - }, - }, - }, - ExactlyOneOf: []string{"basic_sli", "request_based_sli", "windows_based_sli"}, - }, - - "slo_id": { - Type: resource_monitoring_slo_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z0-9\-]+$`), - Description: `The id to use for this ServiceLevelObjective. If omitted, an id will be generated instead.`, - }, - "name": { - Type: resource_monitoring_slo_schema.TypeString, - Computed: true, - Description: `The full resource name for this service. The syntax is: -projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]`, - }, - "project": { - Type: resource_monitoring_slo_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringSloCreate(d *resource_monitoring_slo_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringSloDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(displayNameProp)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - goalProp, err := expandMonitoringSloGoal(d.Get("goal"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("goal"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(goalProp)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, goalProp)) { - obj["goal"] = goalProp - } - rollingPeriodProp, err := expandMonitoringSloRollingPeriodDays(d.Get("rolling_period_days"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rolling_period_days"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(rollingPeriodProp)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, rollingPeriodProp)) { - obj["rollingPeriod"] = rollingPeriodProp - } - calendarPeriodProp, err := expandMonitoringSloCalendarPeriod(d.Get("calendar_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("calendar_period"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(calendarPeriodProp)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, calendarPeriodProp)) { - obj["calendarPeriod"] = calendarPeriodProp - } - serviceLevelIndicatorProp, err := expandMonitoringSloServiceLevelIndicator(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(serviceLevelIndicatorProp)) { - obj["serviceLevelIndicator"] = serviceLevelIndicatorProp - } - nameProp, err := expandMonitoringSloSloId(d.Get("slo_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("slo_id"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(nameProp)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - obj, err = resourceMonitoringSloEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/services/{{service}}/serviceLevelObjectives?serviceLevelObjectiveId={{slo_id}}") - if err != nil { - return err - } - - resource_monitoring_slo_log.Printf("[DEBUG] Creating new Slo: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_slo_fmt.Errorf("Error fetching project for Slo: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_slo_schema.TimeoutCreate)) - if err != nil { - return resource_monitoring_slo_fmt.Errorf("Error creating Slo: %s", err) - } - if err := d.Set("name", flattenMonitoringSloName(res["name"], d, config)); err != nil { - return resource_monitoring_slo_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_monitoring_slo_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_monitoring_slo_log.Printf("[DEBUG] Finished creating Slo %q: %#v", d.Id(), res) - - return resourceMonitoringSloRead(d, meta) -} - -func resourceMonitoringSloRead(d *resource_monitoring_slo_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_slo_fmt.Errorf("Error fetching project for Slo: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_slo_fmt.Sprintf("MonitoringSlo %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", err) - } - - if err := d.Set("name", flattenMonitoringSloName(res["name"], d, config)); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", err) - } - if err := d.Set("display_name", flattenMonitoringSloDisplayName(res["displayName"], d, config)); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", err) - } - if err := d.Set("goal", flattenMonitoringSloGoal(res["goal"], d, config)); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", err) - } - if err := d.Set("rolling_period_days", flattenMonitoringSloRollingPeriodDays(res["rollingPeriod"], d, config)); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", err) - } - if err := d.Set("calendar_period", flattenMonitoringSloCalendarPeriod(res["calendarPeriod"], d, config)); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", err) - } - - if flattenedProp := flattenMonitoringSloServiceLevelIndicator(res["serviceLevelIndicator"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_monitoring_slo_googleapi.Error); ok { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - if err := d.Set("slo_id", flattenMonitoringSloSloId(res["name"], d, config)); err != nil { - return resource_monitoring_slo_fmt.Errorf("Error reading Slo: %s", err) - } - - return nil -} - -func resourceMonitoringSloUpdate(d *resource_monitoring_slo_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_slo_fmt.Errorf("Error fetching project for Slo: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringSloDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(v)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - goalProp, err := expandMonitoringSloGoal(d.Get("goal"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("goal"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(v)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, goalProp)) { - obj["goal"] = goalProp - } - rollingPeriodProp, err := expandMonitoringSloRollingPeriodDays(d.Get("rolling_period_days"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rolling_period_days"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(v)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, rollingPeriodProp)) { - obj["rollingPeriod"] = rollingPeriodProp - } - calendarPeriodProp, err := expandMonitoringSloCalendarPeriod(d.Get("calendar_period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("calendar_period"); !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(v)) && (ok || !resource_monitoring_slo_reflect.DeepEqual(v, calendarPeriodProp)) { - obj["calendarPeriod"] = calendarPeriodProp - } - serviceLevelIndicatorProp, err := expandMonitoringSloServiceLevelIndicator(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_monitoring_slo_reflect.ValueOf(serviceLevelIndicatorProp)) { - obj["serviceLevelIndicator"] = serviceLevelIndicatorProp - } - - obj, err = resourceMonitoringSloEncoder(d, meta, obj) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - resource_monitoring_slo_log.Printf("[DEBUG] Updating Slo %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("goal") { - updateMask = append(updateMask, "goal") - } - - if d.HasChange("rolling_period_days") { - updateMask = append(updateMask, "rollingPeriod") - } - - if d.HasChange("calendar_period") { - updateMask = append(updateMask, "calendarPeriod") - } - - if d.HasChange("basic_sli") { - updateMask = append(updateMask, "serviceLevelIndicator.basicSli") - } - - if d.HasChange("request_based_sli") { - updateMask = append(updateMask, "serviceLevelIndicator.requestBased.goodTotalRatio.badServiceFilter", - "serviceLevelIndicator.requestBased.goodTotalRatio.goodServiceFilter", - "serviceLevelIndicator.requestBased.goodTotalRatio.totalServiceFilter", - "serviceLevelIndicator.requestBased.distributionCut.range", - "serviceLevelIndicator.requestBased.distributionCut.distributionFilter") - } - - if d.HasChange("windows_based_sli") { - updateMask = append(updateMask, "serviceLevelIndicator.windowsBased.windowPeriod", - "serviceLevelIndicator.windowsBased.goodBadMetricFilter", - "serviceLevelIndicator.windowsBased.goodTotalRatioThreshold.threshold", - "serviceLevelIndicator.windowsBased.goodTotalRatioThreshold.performance.goodTotalRatio.badServiceFilter", - "serviceLevelIndicator.windowsBased.goodTotalRatioThreshold.performance.goodTotalRatio.goodServiceFilter", - "serviceLevelIndicator.windowsBased.goodTotalRatioThreshold.performance.goodTotalRatio.totalServiceFilter", - "serviceLevelIndicator.windowsBased.goodTotalRatioThreshold.performance.distributionCut.range", - "serviceLevelIndicator.windowsBased.goodTotalRatioThreshold.performance.distributionCut.distributionFilter", - "serviceLevelIndicator.windowsBased.goodTotalRatioThreshold.basicSliPerformance", - "serviceLevelIndicator.windowsBased.metricMeanInRange.timeSeries", - "serviceLevelIndicator.windowsBased.metricMeanInRange.range", - "serviceLevelIndicator.windowsBased.metricSumInRange.timeSeries", - "serviceLevelIndicator.windowsBased.metricSumInRange.range") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_monitoring_slo_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_slo_schema.TimeoutUpdate)) - - if err != nil { - return resource_monitoring_slo_fmt.Errorf("Error updating Slo %q: %s", d.Id(), err) - } else { - resource_monitoring_slo_log.Printf("[DEBUG] Finished updating Slo %q: %#v", d.Id(), res) - } - - return resourceMonitoringSloRead(d, meta) -} - -func resourceMonitoringSloDelete(d *resource_monitoring_slo_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_slo_fmt.Errorf("Error fetching project for Slo: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "monitoring/project/{{project}}/service/{{service}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_monitoring_slo_log.Printf("[DEBUG] Deleting Slo %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_slo_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Slo") - } - - resource_monitoring_slo_log.Printf("[DEBUG] Finished deleting Slo %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringSloImport(d *resource_monitoring_slo_schema.ResourceData, meta interface{}) ([]*resource_monitoring_slo_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_monitoring_slo_schema.ResourceData{d}, nil -} - -func flattenMonitoringSloName(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloDisplayName(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloGoal(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringSloRollingPeriodDays(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - i, ok := v.(int) - if !ok { - return nil, resource_monitoring_slo_fmt.Errorf("unexpected value is not int: %v", v) - } - if i == 0 { - return "", nil - } - - return resource_monitoring_slo_fmt.Sprintf("%ds", i*86400), nil -} - -func flattenMonitoringSloCalendarPeriod(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicator(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["basic_sli"] = - flattenMonitoringSloServiceLevelIndicatorBasicSli(original["basicSli"], d, config) - transformed["request_based_sli"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSli(original["requestBased"], d, config) - transformed["windows_based_sli"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSli(original["windowsBased"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["method"] = - flattenMonitoringSloServiceLevelIndicatorBasicSliMethod(original["method"], d, config) - transformed["location"] = - flattenMonitoringSloServiceLevelIndicatorBasicSliLocation(original["location"], d, config) - transformed["version"] = - flattenMonitoringSloServiceLevelIndicatorBasicSliVersion(original["version"], d, config) - transformed["latency"] = - flattenMonitoringSloServiceLevelIndicatorBasicSliLatency(original["latency"], d, config) - transformed["availability"] = - flattenMonitoringSloServiceLevelIndicatorBasicSliAvailability(original["availability"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorBasicSliMethod(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_monitoring_slo_schema.NewSet(resource_monitoring_slo_schema.HashString, v.([]interface{})) -} - -func flattenMonitoringSloServiceLevelIndicatorBasicSliLocation(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_monitoring_slo_schema.NewSet(resource_monitoring_slo_schema.HashString, v.([]interface{})) -} - -func flattenMonitoringSloServiceLevelIndicatorBasicSliVersion(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_monitoring_slo_schema.NewSet(resource_monitoring_slo_schema.HashString, v.([]interface{})) -} - -func flattenMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["threshold"] = - flattenMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(original["threshold"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = true - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["good_total_ratio"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(original["goodTotalRatio"], d, config) - transformed["distribution_cut"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(original["distributionCut"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["good_service_filter"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(original["goodServiceFilter"], d, config) - transformed["bad_service_filter"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(original["badServiceFilter"], d, config) - transformed["total_service_filter"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(original["totalServiceFilter"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["distribution_filter"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(original["distributionFilter"], d, config) - transformed["range"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(original["range"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(original["min"], d, config) - transformed["max"] = - flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(original["max"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["window_period"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(original["windowPeriod"], d, config) - transformed["good_bad_metric_filter"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(original["goodBadMetricFilter"], d, config) - transformed["good_total_ratio_threshold"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(original["goodTotalRatioThreshold"], d, config) - transformed["metric_mean_in_range"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(original["metricMeanInRange"], d, config) - transformed["metric_sum_in_range"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(original["metricSumInRange"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["threshold"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(original["threshold"], d, config) - transformed["performance"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(original["performance"], d, config) - transformed["basic_sli_performance"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(original["basicSliPerformance"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["good_total_ratio"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(original["goodTotalRatio"], d, config) - transformed["distribution_cut"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(original["distributionCut"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["good_service_filter"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(original["goodServiceFilter"], d, config) - transformed["bad_service_filter"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(original["badServiceFilter"], d, config) - transformed["total_service_filter"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(original["totalServiceFilter"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["distribution_filter"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(original["distributionFilter"], d, config) - transformed["range"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(original["range"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(original["min"], d, config) - transformed["max"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(original["max"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["method"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(original["method"], d, config) - transformed["location"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(original["location"], d, config) - transformed["version"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(original["version"], d, config) - transformed["latency"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(original["latency"], d, config) - transformed["availability"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(original["availability"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_monitoring_slo_schema.NewSet(resource_monitoring_slo_schema.HashString, v.([]interface{})) -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_monitoring_slo_schema.NewSet(resource_monitoring_slo_schema.HashString, v.([]interface{})) -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_monitoring_slo_schema.NewSet(resource_monitoring_slo_schema.HashString, v.([]interface{})) -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["threshold"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(original["threshold"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = true - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["time_series"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(original["timeSeries"], d, config) - transformed["range"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(original["range"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(original["min"], d, config) - transformed["max"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(original["max"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["time_series"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(original["timeSeries"], d, config) - transformed["range"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(original["range"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(original["min"], d, config) - transformed["max"] = - flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(original["max"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringSloSloId(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandMonitoringSloDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloGoal(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenMonitoringSloRollingPeriodDays(v interface{}, d *resource_monitoring_slo_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - if v.(string) == "" { - return nil - } - dur, err := resource_monitoring_slo_time.ParseDuration(v.(string)) - if err != nil { - return nil - } - return int(dur / (resource_monitoring_slo_time.Hour * 24)) -} - -func expandMonitoringSloCalendarPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicator(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedBasicSli, err := expandMonitoringSloServiceLevelIndicatorBasicSli(d.Get("basic_sli"), d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedBasicSli); val.IsValid() && !isEmptyValue(val) { - transformed["basicSli"] = transformedBasicSli - } - - transformedRequestBasedSli, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSli(d.Get("request_based_sli"), d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedRequestBasedSli); val.IsValid() && !isEmptyValue(val) { - transformed["requestBased"] = transformedRequestBasedSli - } - - transformedWindowsBasedSli, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSli(d.Get("windows_based_sli"), d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedWindowsBasedSli); val.IsValid() && !isEmptyValue(val) { - transformed["windowsBased"] = transformedWindowsBasedSli - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSli(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandMonitoringSloServiceLevelIndicatorBasicSliMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedLocation, err := expandMonitoringSloServiceLevelIndicatorBasicSliLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - transformedVersion, err := expandMonitoringSloServiceLevelIndicatorBasicSliVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedLatency, err := expandMonitoringSloServiceLevelIndicatorBasicSliLatency(original["latency"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedLatency); val.IsValid() && !isEmptyValue(val) { - transformed["latency"] = transformedLatency - } - - transformedAvailability, err := expandMonitoringSloServiceLevelIndicatorBasicSliAvailability(original["availability"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedAvailability); val.IsValid() && !isEmptyValue(val) { - transformed["availability"] = transformedAvailability - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSliMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_monitoring_slo_schema.Set).List() - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSliLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_monitoring_slo_schema.Set).List() - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSliVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_monitoring_slo_schema.Set).List() - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSliLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThreshold, err := expandMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(original["threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["threshold"] = transformedThreshold - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSliLatencyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSliAvailability(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandMonitoringSloServiceLevelIndicatorBasicSliAvailabilityEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorBasicSliAvailabilityEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSli(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGoodTotalRatio, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(original["good_total_ratio"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedGoodTotalRatio); val.IsValid() && !isEmptyValue(val) { - transformed["goodTotalRatio"] = transformedGoodTotalRatio - } - - transformedDistributionCut, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(original["distribution_cut"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedDistributionCut); val.IsValid() && !isEmptyValue(val) { - transformed["distributionCut"] = transformedDistributionCut - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGoodServiceFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(original["good_service_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedGoodServiceFilter); val.IsValid() && !isEmptyValue(val) { - transformed["goodServiceFilter"] = transformedGoodServiceFilter - } - - transformedBadServiceFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(original["bad_service_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedBadServiceFilter); val.IsValid() && !isEmptyValue(val) { - transformed["badServiceFilter"] = transformedBadServiceFilter - } - - transformedTotalServiceFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(original["total_service_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedTotalServiceFilter); val.IsValid() && !isEmptyValue(val) { - transformed["totalServiceFilter"] = transformedTotalServiceFilter - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioGoodServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioBadServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliGoodTotalRatioTotalServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCut(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDistributionFilter, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(original["distribution_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedDistributionFilter); val.IsValid() && !isEmptyValue(val) { - transformed["distributionFilter"] = transformedDistributionFilter - } - - transformedRange, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(original["range"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { - transformed["range"] = transformedRange - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutDistributionFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMin, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(original["min"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { - transformed["min"] = transformedMin - } - - transformedMax, err := expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(original["max"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { - transformed["max"] = transformedMax - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorRequestBasedSliDistributionCutRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSli(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWindowPeriod, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(original["window_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedWindowPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["windowPeriod"] = transformedWindowPeriod - } - - transformedGoodBadMetricFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(original["good_bad_metric_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedGoodBadMetricFilter); val.IsValid() && !isEmptyValue(val) { - transformed["goodBadMetricFilter"] = transformedGoodBadMetricFilter - } - - transformedGoodTotalRatioThreshold, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(original["good_total_ratio_threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedGoodTotalRatioThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["goodTotalRatioThreshold"] = transformedGoodTotalRatioThreshold - } - - transformedMetricMeanInRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(original["metric_mean_in_range"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMetricMeanInRange); val.IsValid() && !isEmptyValue(val) { - transformed["metricMeanInRange"] = transformedMetricMeanInRange - } - - transformedMetricSumInRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(original["metric_sum_in_range"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMetricSumInRange); val.IsValid() && !isEmptyValue(val) { - transformed["metricSumInRange"] = transformedMetricSumInRange - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliWindowPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodBadMetricFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThreshold, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(original["threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["threshold"] = transformedThreshold - } - - transformedPerformance, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(original["performance"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedPerformance); val.IsValid() && !isEmptyValue(val) { - transformed["performance"] = transformedPerformance - } - - transformedBasicSliPerformance, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(original["basic_sli_performance"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedBasicSliPerformance); val.IsValid() && !isEmptyValue(val) { - transformed["basicSliPerformance"] = transformedBasicSliPerformance - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGoodTotalRatio, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(original["good_total_ratio"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedGoodTotalRatio); val.IsValid() && !isEmptyValue(val) { - transformed["goodTotalRatio"] = transformedGoodTotalRatio - } - - transformedDistributionCut, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(original["distribution_cut"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedDistributionCut); val.IsValid() && !isEmptyValue(val) { - transformed["distributionCut"] = transformedDistributionCut - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatio(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedGoodServiceFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(original["good_service_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedGoodServiceFilter); val.IsValid() && !isEmptyValue(val) { - transformed["goodServiceFilter"] = transformedGoodServiceFilter - } - - transformedBadServiceFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(original["bad_service_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedBadServiceFilter); val.IsValid() && !isEmptyValue(val) { - transformed["badServiceFilter"] = transformedBadServiceFilter - } - - transformedTotalServiceFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(original["total_service_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedTotalServiceFilter); val.IsValid() && !isEmptyValue(val) { - transformed["totalServiceFilter"] = transformedTotalServiceFilter - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioGoodServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioBadServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceGoodTotalRatioTotalServiceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCut(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDistributionFilter, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(original["distribution_filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedDistributionFilter); val.IsValid() && !isEmptyValue(val) { - transformed["distributionFilter"] = transformedDistributionFilter - } - - transformedRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(original["range"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { - transformed["range"] = transformedRange - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutDistributionFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMin, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(original["min"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { - transformed["min"] = transformedMin - } - - transformedMax, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(original["max"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { - transformed["max"] = transformedMax - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdPerformanceDistributionCutRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMethod, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(original["method"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMethod); val.IsValid() && !isEmptyValue(val) { - transformed["method"] = transformedMethod - } - - transformedLocation, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - transformedVersion, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - transformedLatency, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(original["latency"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedLatency); val.IsValid() && !isEmptyValue(val) { - transformed["latency"] = transformedLatency - } - - transformedAvailability, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(original["availability"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedAvailability); val.IsValid() && !isEmptyValue(val) { - transformed["availability"] = transformedAvailability - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_monitoring_slo_schema.Set).List() - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_monitoring_slo_schema.Set).List() - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_monitoring_slo_schema.Set).List() - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatency(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThreshold, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(original["threshold"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedThreshold); val.IsValid() && !isEmptyValue(val) { - transformed["threshold"] = transformedThreshold - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceLatencyThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailability(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailabilityEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliGoodTotalRatioThresholdBasicSliPerformanceAvailabilityEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTimeSeries, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(original["time_series"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedTimeSeries); val.IsValid() && !isEmptyValue(val) { - transformed["timeSeries"] = transformedTimeSeries - } - - transformedRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(original["range"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { - transformed["range"] = transformedRange - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeTimeSeries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMin, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(original["min"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { - transformed["min"] = transformedMin - } - - transformedMax, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(original["max"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { - transformed["max"] = transformedMax - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricMeanInRangeRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTimeSeries, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(original["time_series"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedTimeSeries); val.IsValid() && !isEmptyValue(val) { - transformed["timeSeries"] = transformedTimeSeries - } - - transformedRange, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(original["range"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedRange); val.IsValid() && !isEmptyValue(val) { - transformed["range"] = transformedRange - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeTimeSeries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMin, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(original["min"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMin); val.IsValid() && !isEmptyValue(val) { - transformed["min"] = transformedMin - } - - transformedMax, err := expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(original["max"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_slo_reflect.ValueOf(transformedMax); val.IsValid() && !isEmptyValue(val) { - transformed["max"] = transformedMax - } - - return transformed, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloServiceLevelIndicatorWindowsBasedSliMetricSumInRangeRangeMax(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringSloSloId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceMonitoringSloEncoder(d *resource_monitoring_slo_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - delete(obj, "sloId") - Sli := obj["serviceLevelIndicator"].(map[string]interface{}) - if basicSli, ok := Sli["basicSli"].(map[string]interface{}); ok { - - if availability, ok := basicSli["availability"]; ok { - transAvailability := availability.(map[string]interface{}) - delete(transAvailability, "enabled") - basicSli["availability"] = transAvailability - } - } - - if windowBasedSli, ok := Sli["windowsBased"].(map[string]interface{}); ok { - if goodTotalRatioThreshold, ok := windowBasedSli["goodTotalRatioThreshold"].(map[string]interface{}); ok { - if basicSli, ok := goodTotalRatioThreshold["basicSliPerformance"].(map[string]interface{}); ok { - - if availability, ok := basicSli["availability"]; ok { - transAvailability := availability.(map[string]interface{}) - delete(transAvailability, "enabled") - basicSli["availability"] = transAvailability - } - } - } - } - - return obj, nil -} - -func resourceMonitoringUptimeCheckConfig() *resource_monitoring_uptime_check_config_schema.Resource { - return &resource_monitoring_uptime_check_config_schema.Resource{ - Create: resourceMonitoringUptimeCheckConfigCreate, - Read: resourceMonitoringUptimeCheckConfigRead, - Update: resourceMonitoringUptimeCheckConfigUpdate, - Delete: resourceMonitoringUptimeCheckConfigDelete, - - Importer: &resource_monitoring_uptime_check_config_schema.ResourceImporter{ - State: resourceMonitoringUptimeCheckConfigImport, - }, - - Timeouts: &resource_monitoring_uptime_check_config_schema.ResourceTimeout{ - Create: resource_monitoring_uptime_check_config_schema.DefaultTimeout(4 * resource_monitoring_uptime_check_config_time.Minute), - Update: resource_monitoring_uptime_check_config_schema.DefaultTimeout(4 * resource_monitoring_uptime_check_config_time.Minute), - Delete: resource_monitoring_uptime_check_config_schema.DefaultTimeout(4 * resource_monitoring_uptime_check_config_time.Minute), - }, - - Schema: map[string]*resource_monitoring_uptime_check_config_schema.Schema{ - "display_name": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Required: true, - Description: `A human-friendly name for the uptime check configuration. The display name should be unique within a Stackdriver Workspace in order to make it easier to identify; however, uniqueness is not enforced.`, - }, - "timeout": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Required: true, - Description: `The maximum amount of time to wait for the request to complete (must be between 1 and 60 seconds). Accepted formats https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration`, - }, - "content_matchers": { - Type: resource_monitoring_uptime_check_config_schema.TypeList, - Optional: true, - Description: `The expected content on the page the check is run against. Currently, only the first entry in the list is supported, and other entries will be ignored. The server will look for an exact match of the string in the page response's content. This field is optional and should only be specified if a content match is required.`, - Elem: &resource_monitoring_uptime_check_config_schema.Resource{ - Schema: map[string]*resource_monitoring_uptime_check_config_schema.Schema{ - "content": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Required: true, - Description: `String or regex content to match (max 1024 bytes)`, - }, - "matcher": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_uptime_check_config_validation.StringInSlice([]string{"CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", "NOT_MATCHES_REGEX", ""}, false), - Description: `The type of content matcher that will be applied to the server output, compared to the content string when the check is run. Default value: "CONTAINS_STRING" Possible values: ["CONTAINS_STRING", "NOT_CONTAINS_STRING", "MATCHES_REGEX", "NOT_MATCHES_REGEX"]`, - Default: "CONTAINS_STRING", - }, - }, - }, - }, - "http_check": { - Type: resource_monitoring_uptime_check_config_schema.TypeList, - Optional: true, - Description: `Contains information needed to make an HTTP or HTTPS check.`, - MaxItems: 1, - Elem: &resource_monitoring_uptime_check_config_schema.Resource{ - Schema: map[string]*resource_monitoring_uptime_check_config_schema.Schema{ - "auth_info": { - Type: resource_monitoring_uptime_check_config_schema.TypeList, - Optional: true, - Description: `The authentication information. Optional when creating an HTTP check; defaults to empty.`, - MaxItems: 1, - Elem: &resource_monitoring_uptime_check_config_schema.Resource{ - Schema: map[string]*resource_monitoring_uptime_check_config_schema.Schema{ - "password": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Required: true, - Description: `The password to authenticate.`, - Sensitive: true, - }, - "username": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Required: true, - Description: `The username to authenticate.`, - }, - }, - }, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "body": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - Description: `The request body associated with the HTTP POST request. If contentType is URL_ENCODED, the body passed in must be URL-encoded. Users can provide a Content-Length header via the headers field or the API will do so. If the requestMethod is GET and body is not empty, the API will return an error. The maximum byte size is 1 megabyte. Note - As with all bytes fields JSON representations are base64 encoded. e.g. "foo=bar" in URL-encoded form is "foo%3Dbar" and in base64 encoding is "Zm9vJTI1M0RiYXI=".`, - }, - "content_type": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - ValidateFunc: resource_monitoring_uptime_check_config_validation.StringInSlice([]string{"TYPE_UNSPECIFIED", "URL_ENCODED", ""}, false), - Description: `The content type to use for the check. Possible values: ["TYPE_UNSPECIFIED", "URL_ENCODED"]`, - }, - "headers": { - Type: resource_monitoring_uptime_check_config_schema.TypeMap, - Computed: true, - Optional: true, - Description: `The list of headers to send as part of the uptime check request. If two headers have the same key and different values, they should be entered as a single header, with the value being a comma-separated list of all the desired values as described at https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). Entering two separate headers with the same key in a Create call will cause the first to be overwritten by the second. The maximum number of headers allowed is 100.`, - Elem: &resource_monitoring_uptime_check_config_schema.Schema{Type: resource_monitoring_uptime_check_config_schema.TypeString}, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "mask_headers": { - Type: resource_monitoring_uptime_check_config_schema.TypeBool, - Optional: true, - Description: `Boolean specifying whether to encrypt the header information. Encryption should be specified for any headers related to authentication that you do not wish to be seen when retrieving the configuration. The server will be responsible for encrypting the headers. On Get/List calls, if mask_headers is set to True then the headers will be obscured with ******.`, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "path": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - Description: `The path to the page to run the check against. Will be combined with the host (specified within the MonitoredResource) and port to construct the full URL. Optional (defaults to "/").`, - Default: "/", - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "port": { - Type: resource_monitoring_uptime_check_config_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) and path to construct the full URL. Optional (defaults to 80 without SSL, or 443 with SSL).`, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "request_method": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_monitoring_uptime_check_config_validation.StringInSlice([]string{"METHOD_UNSPECIFIED", "GET", "POST", ""}, false), - Description: `The HTTP request method to use for the check. If set to METHOD_UNSPECIFIED then requestMethod defaults to GET. Default value: "GET" Possible values: ["METHOD_UNSPECIFIED", "GET", "POST"]`, - Default: "GET", - }, - "use_ssl": { - Type: resource_monitoring_uptime_check_config_schema.TypeBool, - Optional: true, - Description: `If true, use HTTPS instead of HTTP to run the check.`, - AtLeastOneOf: []string{"http_check.0.auth_info", "http_check.0.port", "http_check.0.headers", "http_check.0.path", "http_check.0.use_ssl", "http_check.0.mask_headers"}, - }, - "validate_ssl": { - Type: resource_monitoring_uptime_check_config_schema.TypeBool, - Optional: true, - Description: `Boolean specifying whether to include SSL certificate validation as a part of the Uptime check. Only applies to checks where monitoredResource is set to uptime_url. If useSsl is false, setting validateSsl to true has no effect.`, - }, - }, - }, - ExactlyOneOf: []string{"http_check", "tcp_check"}, - }, - "monitored_resource": { - Type: resource_monitoring_uptime_check_config_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The monitored resource (https://cloud.google.com/monitoring/api/resources) associated with the configuration. The following monitored resource types are supported for uptime checks: uptime_url gce_instance gae_app aws_ec2_instance aws_elb_load_balancer`, - MaxItems: 1, - Elem: &resource_monitoring_uptime_check_config_schema.Resource{ - Schema: map[string]*resource_monitoring_uptime_check_config_schema.Schema{ - "labels": { - Type: resource_monitoring_uptime_check_config_schema.TypeMap, - Required: true, - ForceNew: true, - Description: `Values for all of the labels listed in the associated monitored resource descriptor. For example, Compute Engine VM instances use the labels "project_id", "instance_id", and "zone".`, - Elem: &resource_monitoring_uptime_check_config_schema.Schema{Type: resource_monitoring_uptime_check_config_schema.TypeString}, - }, - "type": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The monitored resource type. This field must match the type field of a MonitoredResourceDescriptor (https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors#MonitoredResourceDescriptor) object. For example, the type of a Compute Engine VM instance is gce_instance. For a list of types, see Monitoring resource types (https://cloud.google.com/monitoring/api/resources) and Logging resource types (https://cloud.google.com/logging/docs/api/v2/resource-list).`, - }, - }, - }, - ExactlyOneOf: []string{"monitored_resource", "resource_group"}, - }, - "period": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `How often, in seconds, the uptime check is performed. Currently, the only supported values are 60s (1 minute), 300s (5 minutes), 600s (10 minutes), and 900s (15 minutes). Optional, defaults to 300s.`, - Default: "300s", - }, - "resource_group": { - Type: resource_monitoring_uptime_check_config_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The group resource associated with the configuration.`, - MaxItems: 1, - Elem: &resource_monitoring_uptime_check_config_schema.Resource{ - Schema: map[string]*resource_monitoring_uptime_check_config_schema.Schema{ - "group_id": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The group of resources being monitored. Should be the 'name' of a group`, - AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, - }, - "resource_type": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_monitoring_uptime_check_config_validation.StringInSlice([]string{"RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER", ""}, false), - Description: `The resource type of the group members. Possible values: ["RESOURCE_TYPE_UNSPECIFIED", "INSTANCE", "AWS_ELB_LOAD_BALANCER"]`, - AtLeastOneOf: []string{"resource_group.0.resource_type", "resource_group.0.group_id"}, - }, - }, - }, - ExactlyOneOf: []string{"monitored_resource", "resource_group"}, - }, - "selected_regions": { - Type: resource_monitoring_uptime_check_config_schema.TypeList, - Optional: true, - Description: `The list of regions from which the check will be run. Some regions contain one location, and others contain more than one. If this field is specified, enough regions to include a minimum of 3 locations must be provided, or an error message is returned. Not specifying this field will result in uptime checks running from all regions.`, - Elem: &resource_monitoring_uptime_check_config_schema.Schema{ - Type: resource_monitoring_uptime_check_config_schema.TypeString, - }, - }, - "tcp_check": { - Type: resource_monitoring_uptime_check_config_schema.TypeList, - Optional: true, - Description: `Contains information needed to make a TCP check.`, - MaxItems: 1, - Elem: &resource_monitoring_uptime_check_config_schema.Resource{ - Schema: map[string]*resource_monitoring_uptime_check_config_schema.Schema{ - "port": { - Type: resource_monitoring_uptime_check_config_schema.TypeInt, - Required: true, - Description: `The port to the page to run the check against. Will be combined with host (specified within the MonitoredResource) to construct the full URL.`, - }, - }, - }, - ExactlyOneOf: []string{"http_check", "tcp_check"}, - }, - "name": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Computed: true, - Description: `A unique resource name for this UptimeCheckConfig. The format is projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID].`, - }, - "uptime_check_id": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Computed: true, - Description: `The id of the uptime check`, - }, - "project": { - Type: resource_monitoring_uptime_check_config_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceMonitoringUptimeCheckConfigCreate(d *resource_monitoring_uptime_check_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringUptimeCheckConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(displayNameProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - periodProp, err := expandMonitoringUptimeCheckConfigPeriod(d.Get("period"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("period"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(periodProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, periodProp)) { - obj["period"] = periodProp - } - timeoutProp, err := expandMonitoringUptimeCheckConfigTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(timeoutProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - contentMatchersProp, err := expandMonitoringUptimeCheckConfigContentMatchers(d.Get("content_matchers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_matchers"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(contentMatchersProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, contentMatchersProp)) { - obj["contentMatchers"] = contentMatchersProp - } - selectedRegionsProp, err := expandMonitoringUptimeCheckConfigSelectedRegions(d.Get("selected_regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selected_regions"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(selectedRegionsProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, selectedRegionsProp)) { - obj["selectedRegions"] = selectedRegionsProp - } - httpCheckProp, err := expandMonitoringUptimeCheckConfigHttpCheck(d.Get("http_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_check"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(httpCheckProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, httpCheckProp)) { - obj["httpCheck"] = httpCheckProp - } - tcpCheckProp, err := expandMonitoringUptimeCheckConfigTcpCheck(d.Get("tcp_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_check"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(tcpCheckProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, tcpCheckProp)) { - obj["tcpCheck"] = tcpCheckProp - } - resourceGroupProp, err := expandMonitoringUptimeCheckConfigResourceGroup(d.Get("resource_group"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("resource_group"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(resourceGroupProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, resourceGroupProp)) { - obj["resourceGroup"] = resourceGroupProp - } - monitoredResourceProp, err := expandMonitoringUptimeCheckConfigMonitoredResource(d.Get("monitored_resource"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("monitored_resource"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(monitoredResourceProp)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, monitoredResourceProp)) { - obj["monitoredResource"] = monitoredResourceProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/projects/{{project}}/uptimeCheckConfigs") - if err != nil { - return err - } - - resource_monitoring_uptime_check_config_log.Printf("[DEBUG] Creating new UptimeCheckConfig: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_uptime_check_config_schema.TimeoutCreate), isMonitoringConcurrentEditError) - if err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error creating UptimeCheckConfig: %s", err) - } - if err := d.Set("name", flattenMonitoringUptimeCheckConfigName(res["name"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_monitoring_uptime_check_config_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_monitoring_uptime_check_config_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_monitoring_uptime_check_config_log.Printf("[DEBUG] Finished creating UptimeCheckConfig %q: %#v", d.Id(), res) - - return resourceMonitoringUptimeCheckConfigRead(d, meta) -} - -func resourceMonitoringUptimeCheckConfigRead(d *resource_monitoring_uptime_check_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, resource_monitoring_uptime_check_config_fmt.Sprintf("MonitoringUptimeCheckConfig %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - - if err := d.Set("name", flattenMonitoringUptimeCheckConfigName(res["name"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("uptime_check_id", flattenMonitoringUptimeCheckConfigUptimeCheckId(res["id"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("display_name", flattenMonitoringUptimeCheckConfigDisplayName(res["displayName"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("period", flattenMonitoringUptimeCheckConfigPeriod(res["period"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("timeout", flattenMonitoringUptimeCheckConfigTimeout(res["timeout"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("content_matchers", flattenMonitoringUptimeCheckConfigContentMatchers(res["contentMatchers"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("selected_regions", flattenMonitoringUptimeCheckConfigSelectedRegions(res["selectedRegions"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("http_check", flattenMonitoringUptimeCheckConfigHttpCheck(res["httpCheck"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("tcp_check", flattenMonitoringUptimeCheckConfigTcpCheck(res["tcpCheck"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("resource_group", flattenMonitoringUptimeCheckConfigResourceGroup(res["resourceGroup"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - if err := d.Set("monitored_resource", flattenMonitoringUptimeCheckConfigMonitoredResource(res["monitoredResource"], d, config)); err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error reading UptimeCheckConfig: %s", err) - } - - return nil -} - -func resourceMonitoringUptimeCheckConfigUpdate(d *resource_monitoring_uptime_check_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandMonitoringUptimeCheckConfigDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(v)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - timeoutProp, err := expandMonitoringUptimeCheckConfigTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(v)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - contentMatchersProp, err := expandMonitoringUptimeCheckConfigContentMatchers(d.Get("content_matchers"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("content_matchers"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(v)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, contentMatchersProp)) { - obj["contentMatchers"] = contentMatchersProp - } - selectedRegionsProp, err := expandMonitoringUptimeCheckConfigSelectedRegions(d.Get("selected_regions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("selected_regions"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(v)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, selectedRegionsProp)) { - obj["selectedRegions"] = selectedRegionsProp - } - httpCheckProp, err := expandMonitoringUptimeCheckConfigHttpCheck(d.Get("http_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("http_check"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(v)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, httpCheckProp)) { - obj["httpCheck"] = httpCheckProp - } - tcpCheckProp, err := expandMonitoringUptimeCheckConfigTcpCheck(d.Get("tcp_check"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tcp_check"); !isEmptyValue(resource_monitoring_uptime_check_config_reflect.ValueOf(v)) && (ok || !resource_monitoring_uptime_check_config_reflect.DeepEqual(v, tcpCheckProp)) { - obj["tcpCheck"] = tcpCheckProp - } - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - resource_monitoring_uptime_check_config_log.Printf("[DEBUG] Updating UptimeCheckConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("timeout") { - updateMask = append(updateMask, "timeout") - } - - if d.HasChange("content_matchers") { - updateMask = append(updateMask, "contentMatchers") - } - - if d.HasChange("selected_regions") { - updateMask = append(updateMask, "selectedRegions") - } - - if d.HasChange("http_check") { - updateMask = append(updateMask, "httpCheck") - } - - if d.HasChange("tcp_check") { - updateMask = append(updateMask, "tcpCheck") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_monitoring_uptime_check_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_uptime_check_config_schema.TimeoutUpdate), isMonitoringConcurrentEditError) - - if err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error updating UptimeCheckConfig %q: %s", d.Id(), err) - } else { - resource_monitoring_uptime_check_config_log.Printf("[DEBUG] Finished updating UptimeCheckConfig %q: %#v", d.Id(), res) - } - - return resourceMonitoringUptimeCheckConfigRead(d, meta) -} - -func resourceMonitoringUptimeCheckConfigDelete(d *resource_monitoring_uptime_check_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_monitoring_uptime_check_config_fmt.Errorf("Error fetching project for UptimeCheckConfig: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{MonitoringBasePath}}v3/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_monitoring_uptime_check_config_log.Printf("[DEBUG] Deleting UptimeCheckConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_monitoring_uptime_check_config_schema.TimeoutDelete), isMonitoringConcurrentEditError) - if err != nil { - return handleNotFoundError(err, d, "UptimeCheckConfig") - } - - resource_monitoring_uptime_check_config_log.Printf("[DEBUG] Finished deleting UptimeCheckConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceMonitoringUptimeCheckConfigImport(d *resource_monitoring_uptime_check_config_schema.ResourceData, meta interface{}) ([]*resource_monitoring_uptime_check_config_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_monitoring_uptime_check_config_schema.ResourceData{d}, nil -} - -func flattenMonitoringUptimeCheckConfigName(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigUptimeCheckId(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - parts := resource_monitoring_uptime_check_config_strings.Split(d.Get("name").(string), "/") - return parts[len(parts)-1] -} - -func flattenMonitoringUptimeCheckConfigDisplayName(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigPeriod(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigTimeout(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigContentMatchers(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "content": flattenMonitoringUptimeCheckConfigContentMatchersContent(original["content"], d, config), - "matcher": flattenMonitoringUptimeCheckConfigContentMatchersMatcher(original["matcher"], d, config), - }) - } - return transformed -} - -func flattenMonitoringUptimeCheckConfigContentMatchersContent(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigContentMatchersMatcher(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigSelectedRegions(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheck(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_method"] = - flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(original["requestMethod"], d, config) - transformed["content_type"] = - flattenMonitoringUptimeCheckConfigHttpCheckContentType(original["contentType"], d, config) - transformed["auth_info"] = - flattenMonitoringUptimeCheckConfigHttpCheckAuthInfo(original["authInfo"], d, config) - transformed["port"] = - flattenMonitoringUptimeCheckConfigHttpCheckPort(original["port"], d, config) - transformed["headers"] = - flattenMonitoringUptimeCheckConfigHttpCheckHeaders(original["headers"], d, config) - transformed["path"] = - flattenMonitoringUptimeCheckConfigHttpCheckPath(original["path"], d, config) - transformed["use_ssl"] = - flattenMonitoringUptimeCheckConfigHttpCheckUseSsl(original["useSsl"], d, config) - transformed["validate_ssl"] = - flattenMonitoringUptimeCheckConfigHttpCheckValidateSsl(original["validateSsl"], d, config) - transformed["mask_headers"] = - flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["maskHeaders"], d, config) - transformed["body"] = - flattenMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckContentType(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfo(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["password"] = - flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(original["password"], d, config) - transformed["username"] = - flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(original["username"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return d.Get("http_check.0.auth_info.0.password") -} - -func flattenMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckPort(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_monitoring_uptime_check_config_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckHeaders(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckPath(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckUseSsl(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckValidateSsl(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckMaskHeaders(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigTcpCheck(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["port"] = - flattenMonitoringUptimeCheckConfigTcpCheckPort(original["port"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringUptimeCheckConfigTcpCheckPort(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_monitoring_uptime_check_config_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenMonitoringUptimeCheckConfigResourceGroup(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["resource_type"] = - flattenMonitoringUptimeCheckConfigResourceGroupResourceType(original["resourceType"], d, config) - transformed["group_id"] = - flattenMonitoringUptimeCheckConfigResourceGroupGroupId(original["groupId"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringUptimeCheckConfigResourceGroupResourceType(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigResourceGroupGroupId(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - project := d.Get("project").(string) - return resource_monitoring_uptime_check_config_fmt.Sprintf("projects/%s/groups/%s", project, v) -} - -func flattenMonitoringUptimeCheckConfigMonitoredResource(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenMonitoringUptimeCheckConfigMonitoredResourceType(original["type"], d, config) - transformed["labels"] = - flattenMonitoringUptimeCheckConfigMonitoredResourceLabels(original["labels"], d, config) - return []interface{}{transformed} -} - -func flattenMonitoringUptimeCheckConfigMonitoredResourceType(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenMonitoringUptimeCheckConfigMonitoredResourceLabels(v interface{}, d *resource_monitoring_uptime_check_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandMonitoringUptimeCheckConfigDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedContent, err := expandMonitoringUptimeCheckConfigContentMatchersContent(original["content"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedContent); val.IsValid() && !isEmptyValue(val) { - transformed["content"] = transformedContent - } - - transformedMatcher, err := expandMonitoringUptimeCheckConfigContentMatchersMatcher(original["matcher"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["matcher"] = transformedMatcher - } - - req = append(req, transformed) - } - return req, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchersContent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigContentMatchersMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigSelectedRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestMethod, err := expandMonitoringUptimeCheckConfigHttpCheckRequestMethod(original["request_method"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedRequestMethod); val.IsValid() && !isEmptyValue(val) { - transformed["requestMethod"] = transformedRequestMethod - } - - transformedContentType, err := expandMonitoringUptimeCheckConfigHttpCheckContentType(original["content_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedContentType); val.IsValid() && !isEmptyValue(val) { - transformed["contentType"] = transformedContentType - } - - transformedAuthInfo, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfo(original["auth_info"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedAuthInfo); val.IsValid() && !isEmptyValue(val) { - transformed["authInfo"] = transformedAuthInfo - } - - transformedPort, err := expandMonitoringUptimeCheckConfigHttpCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedHeaders, err := expandMonitoringUptimeCheckConfigHttpCheckHeaders(original["headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["headers"] = transformedHeaders - } - - transformedPath, err := expandMonitoringUptimeCheckConfigHttpCheckPath(original["path"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { - transformed["path"] = transformedPath - } - - transformedUseSsl, err := expandMonitoringUptimeCheckConfigHttpCheckUseSsl(original["use_ssl"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedUseSsl); val.IsValid() && !isEmptyValue(val) { - transformed["useSsl"] = transformedUseSsl - } - - transformedValidateSsl, err := expandMonitoringUptimeCheckConfigHttpCheckValidateSsl(original["validate_ssl"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedValidateSsl); val.IsValid() && !isEmptyValue(val) { - transformed["validateSsl"] = transformedValidateSsl - } - - transformedMaskHeaders, err := expandMonitoringUptimeCheckConfigHttpCheckMaskHeaders(original["mask_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedMaskHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["maskHeaders"] = transformedMaskHeaders - } - - transformedBody, err := expandMonitoringUptimeCheckConfigHttpCheckBody(original["body"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedBody); val.IsValid() && !isEmptyValue(val) { - transformed["body"] = transformedBody - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckRequestMethod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckContentType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAuthInfo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPassword, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(original["password"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedPassword); val.IsValid() && !isEmptyValue(val) { - transformed["password"] = transformedPassword - } - - transformedUsername, err := expandMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(original["username"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedUsername); val.IsValid() && !isEmptyValue(val) { - transformed["username"] = transformedUsername - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAuthInfoPassword(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckAuthInfoUsername(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckHeaders(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckUseSsl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckValidateSsl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckMaskHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigHttpCheckBody(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigTcpCheck(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPort, err := expandMonitoringUptimeCheckConfigTcpCheckPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigTcpCheckPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigResourceGroup(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedResourceType, err := expandMonitoringUptimeCheckConfigResourceGroupResourceType(original["resource_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedResourceType); val.IsValid() && !isEmptyValue(val) { - transformed["resourceType"] = transformedResourceType - } - - transformedGroupId, err := expandMonitoringUptimeCheckConfigResourceGroupGroupId(original["group_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedGroupId); val.IsValid() && !isEmptyValue(val) { - transformed["groupId"] = transformedGroupId - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigResourceGroupResourceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigResourceGroupGroupId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandMonitoringUptimeCheckConfigMonitoredResource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandMonitoringUptimeCheckConfigMonitoredResourceType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedLabels, err := expandMonitoringUptimeCheckConfigMonitoredResourceLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_monitoring_uptime_check_config_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - return transformed, nil -} - -func expandMonitoringUptimeCheckConfigMonitoredResourceType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandMonitoringUptimeCheckConfigMonitoredResourceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceNetworkManagementConnectivityTest() *resource_network_management_connectivity_test_resource_schema.Resource { - return &resource_network_management_connectivity_test_resource_schema.Resource{ - Create: resourceNetworkManagementConnectivityTestCreate, - Read: resourceNetworkManagementConnectivityTestRead, - Update: resourceNetworkManagementConnectivityTestUpdate, - Delete: resourceNetworkManagementConnectivityTestDelete, - - Importer: &resource_network_management_connectivity_test_resource_schema.ResourceImporter{ - State: resourceNetworkManagementConnectivityTestImport, - }, - - Timeouts: &resource_network_management_connectivity_test_resource_schema.ResourceTimeout{ - Create: resource_network_management_connectivity_test_resource_schema.DefaultTimeout(4 * resource_network_management_connectivity_test_resource_time.Minute), - Update: resource_network_management_connectivity_test_resource_schema.DefaultTimeout(4 * resource_network_management_connectivity_test_resource_time.Minute), - Delete: resource_network_management_connectivity_test_resource_schema.DefaultTimeout(4 * resource_network_management_connectivity_test_resource_time.Minute), - }, - - Schema: map[string]*resource_network_management_connectivity_test_resource_schema.Schema{ - "destination": { - Type: resource_network_management_connectivity_test_resource_schema.TypeList, - Required: true, - Description: `Required. Destination specification of the Connectivity Test. - -You can use a combination of destination IP address, Compute -Engine VM instance, or VPC network to uniquely identify the -destination location. - -Even if the destination IP address is not unique, the source IP -location is unique. Usually, the analysis can infer the destination -endpoint from route information. - -If the destination you specify is a VM instance and the instance has -multiple network interfaces, then you must also specify either a -destination IP address or VPC network to identify the destination -interface. - -A reachability analysis proceeds even if the destination location -is ambiguous. However, the result can include endpoints that you -don't intend to test.`, - MaxItems: 1, - Elem: &resource_network_management_connectivity_test_resource_schema.Resource{ - Schema: map[string]*resource_network_management_connectivity_test_resource_schema.Schema{ - "instance": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `A Compute Engine instance URI.`, - }, - "ip_address": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `The IP address of the endpoint, which can be an external or -internal IP. An IPv6 address is only allowed when the test's -destination is a global load balancer VIP.`, - }, - "network": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `A Compute Engine network URI.`, - }, - "port": { - Type: resource_network_management_connectivity_test_resource_schema.TypeInt, - Optional: true, - Description: `The IP protocol port of the endpoint. Only applicable when -protocol is TCP or UDP.`, - }, - "project_id": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `Project ID where the endpoint is located. The Project ID can be -derived from the URI if you provide a VM instance or network URI. -The following are two cases where you must provide the project ID: -1. Only the IP address is specified, and the IP address is within -a GCP project. 2. When you are using Shared VPC and the IP address -that you provide is from the service project. In this case, the -network that the IP address resides in is defined in the host -project.`, - }, - }, - }, - }, - "name": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Unique name for the connectivity test.`, - }, - "source": { - Type: resource_network_management_connectivity_test_resource_schema.TypeList, - Required: true, - Description: `Required. Source specification of the Connectivity Test. - -You can use a combination of source IP address, virtual machine -(VM) instance, or Compute Engine network to uniquely identify the -source location. - -Examples: If the source IP address is an internal IP address within -a Google Cloud Virtual Private Cloud (VPC) network, then you must -also specify the VPC network. Otherwise, specify the VM instance, -which already contains its internal IP address and VPC network -information. - -If the source of the test is within an on-premises network, then -you must provide the destination VPC network. - -If the source endpoint is a Compute Engine VM instance with multiple -network interfaces, the instance itself is not sufficient to -identify the endpoint. So, you must also specify the source IP -address or VPC network. - -A reachability analysis proceeds even if the source location is -ambiguous. However, the test result may include endpoints that -you don't intend to test.`, - MaxItems: 1, - Elem: &resource_network_management_connectivity_test_resource_schema.Resource{ - Schema: map[string]*resource_network_management_connectivity_test_resource_schema.Schema{ - "instance": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `A Compute Engine instance URI.`, - }, - "ip_address": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `The IP address of the endpoint, which can be an external or -internal IP. An IPv6 address is only allowed when the test's -destination is a global load balancer VIP.`, - }, - "network": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `A Compute Engine network URI.`, - }, - "network_type": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - ValidateFunc: resource_network_management_connectivity_test_resource_validation.StringInSlice([]string{"GCP_NETWORK", "NON_GCP_NETWORK", ""}, false), - Description: `Type of the network where the endpoint is located. Possible values: ["GCP_NETWORK", "NON_GCP_NETWORK"]`, - }, - "port": { - Type: resource_network_management_connectivity_test_resource_schema.TypeInt, - Optional: true, - Description: `The IP protocol port of the endpoint. Only applicable when -protocol is TCP or UDP.`, - }, - "project_id": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `Project ID where the endpoint is located. The Project ID can be -derived from the URI if you provide a VM instance or network URI. -The following are two cases where you must provide the project ID: - -1. Only the IP address is specified, and the IP address is - within a GCP project. -2. When you are using Shared VPC and the IP address - that you provide is from the service project. In this case, - the network that the IP address resides in is defined in the - host project.`, - }, - }, - }, - }, - "description": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `The user-supplied description of the Connectivity Test. -Maximum of 512 characters.`, - }, - "labels": { - Type: resource_network_management_connectivity_test_resource_schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user-provided metadata.`, - Elem: &resource_network_management_connectivity_test_resource_schema.Schema{Type: resource_network_management_connectivity_test_resource_schema.TypeString}, - }, - "protocol": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Description: `IP Protocol of the test. When not provided, "TCP" is assumed.`, - Default: "TCP", - }, - "related_projects": { - Type: resource_network_management_connectivity_test_resource_schema.TypeList, - Optional: true, - Description: `Other projects that may be relevant for reachability analysis. -This is applicable to scenarios where a test can cross project -boundaries.`, - Elem: &resource_network_management_connectivity_test_resource_schema.Schema{ - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - }, - }, - "project": { - Type: resource_network_management_connectivity_test_resource_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNetworkManagementConnectivityTestCreate(d *resource_network_management_connectivity_test_resource_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNetworkManagementConnectivityTestName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(nameProp)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandNetworkManagementConnectivityTestDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(descriptionProp)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceProp, err := expandNetworkManagementConnectivityTestSource(d.Get("source"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(sourceProp)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, sourceProp)) { - obj["source"] = sourceProp - } - destinationProp, err := expandNetworkManagementConnectivityTestDestination(d.Get("destination"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(destinationProp)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, destinationProp)) { - obj["destination"] = destinationProp - } - protocolProp, err := expandNetworkManagementConnectivityTestProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(protocolProp)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - relatedProjectsProp, err := expandNetworkManagementConnectivityTestRelatedProjects(d.Get("related_projects"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_projects"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(relatedProjectsProp)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, relatedProjectsProp)) { - obj["relatedProjects"] = relatedProjectsProp - } - labelsProp, err := expandNetworkManagementConnectivityTestLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(labelsProp)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests?testId={{name}}") - if err != nil { - return err - } - - resource_network_management_connectivity_test_resource_log.Printf("[DEBUG] Creating new ConnectivityTest: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_network_management_connectivity_test_resource_schema.TimeoutCreate)) - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error creating ConnectivityTest: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = networkManagementOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating ConnectivityTest", userAgent, - d.Timeout(resource_network_management_connectivity_test_resource_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error waiting to create ConnectivityTest: %s", err) - } - - if err := d.Set("name", flattenNetworkManagementConnectivityTestName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_network_management_connectivity_test_resource_log.Printf("[DEBUG] Finished creating ConnectivityTest %q: %#v", d.Id(), res) - - return resourceNetworkManagementConnectivityTestRead(d, meta) -} - -func resourceNetworkManagementConnectivityTestRead(d *resource_network_management_connectivity_test_resource_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_network_management_connectivity_test_resource_fmt.Sprintf("NetworkManagementConnectivityTest %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - - if err := d.Set("name", flattenNetworkManagementConnectivityTestName(res["name"], d, config)); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("description", flattenNetworkManagementConnectivityTestDescription(res["description"], d, config)); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("source", flattenNetworkManagementConnectivityTestSource(res["source"], d, config)); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("destination", flattenNetworkManagementConnectivityTestDestination(res["destination"], d, config)); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("protocol", flattenNetworkManagementConnectivityTestProtocol(res["protocol"], d, config)); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("related_projects", flattenNetworkManagementConnectivityTestRelatedProjects(res["relatedProjects"], d, config)); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - if err := d.Set("labels", flattenNetworkManagementConnectivityTestLabels(res["labels"], d, config)); err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error reading ConnectivityTest: %s", err) - } - - return nil -} - -func resourceNetworkManagementConnectivityTestUpdate(d *resource_network_management_connectivity_test_resource_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkManagementConnectivityTestDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(v)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - sourceProp, err := expandNetworkManagementConnectivityTestSource(d.Get("source"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(v)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, sourceProp)) { - obj["source"] = sourceProp - } - destinationProp, err := expandNetworkManagementConnectivityTestDestination(d.Get("destination"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("destination"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(v)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, destinationProp)) { - obj["destination"] = destinationProp - } - protocolProp, err := expandNetworkManagementConnectivityTestProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(v)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - relatedProjectsProp, err := expandNetworkManagementConnectivityTestRelatedProjects(d.Get("related_projects"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("related_projects"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(v)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, relatedProjectsProp)) { - obj["relatedProjects"] = relatedProjectsProp - } - labelsProp, err := expandNetworkManagementConnectivityTestLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_management_connectivity_test_resource_reflect.ValueOf(v)) && (ok || !resource_network_management_connectivity_test_resource_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return err - } - - resource_network_management_connectivity_test_resource_log.Printf("[DEBUG] Updating ConnectivityTest %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("source") { - updateMask = append(updateMask, "source.ipAddress", - "source.port", - "source.instance", - "source.network", - "source.networkType", - "source.projectId") - } - - if d.HasChange("destination") { - updateMask = append(updateMask, "destination.ipAddress", - "destination.port", - "destination.instance", - "destination.network", - "destination.projectId") - } - - if d.HasChange("protocol") { - updateMask = append(updateMask, "protocol") - } - - if d.HasChange("related_projects") { - updateMask = append(updateMask, "relatedProjects") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_network_management_connectivity_test_resource_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_network_management_connectivity_test_resource_schema.TimeoutUpdate)) - - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error updating ConnectivityTest %q: %s", d.Id(), err) - } else { - resource_network_management_connectivity_test_resource_log.Printf("[DEBUG] Finished updating ConnectivityTest %q: %#v", d.Id(), res) - } - - err = networkManagementOperationWaitTime( - config, res, project, "Updating ConnectivityTest", userAgent, - d.Timeout(resource_network_management_connectivity_test_resource_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNetworkManagementConnectivityTestRead(d, meta) -} - -func resourceNetworkManagementConnectivityTestDelete(d *resource_network_management_connectivity_test_resource_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_management_connectivity_test_resource_fmt.Errorf("Error fetching project for ConnectivityTest: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NetworkManagementBasePath}}projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_network_management_connectivity_test_resource_log.Printf("[DEBUG] Deleting ConnectivityTest %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_network_management_connectivity_test_resource_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ConnectivityTest") - } - - err = networkManagementOperationWaitTime( - config, res, project, "Deleting ConnectivityTest", userAgent, - d.Timeout(resource_network_management_connectivity_test_resource_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_network_management_connectivity_test_resource_log.Printf("[DEBUG] Finished deleting ConnectivityTest %q: %#v", d.Id(), res) - return nil -} - -func resourceNetworkManagementConnectivityTestImport(d *resource_network_management_connectivity_test_resource_schema.ResourceData, meta interface{}) ([]*resource_network_management_connectivity_test_resource_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/connectivityTests/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/connectivityTests/{{name}}") - if err != nil { - return nil, resource_network_management_connectivity_test_resource_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_network_management_connectivity_test_resource_schema.ResourceData{d}, nil -} - -func flattenNetworkManagementConnectivityTestName(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenNetworkManagementConnectivityTestDescription(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSource(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ip_address"] = - flattenNetworkManagementConnectivityTestSourceIpAddress(original["ipAddress"], d, config) - transformed["port"] = - flattenNetworkManagementConnectivityTestSourcePort(original["port"], d, config) - transformed["instance"] = - flattenNetworkManagementConnectivityTestSourceInstance(original["instance"], d, config) - transformed["network"] = - flattenNetworkManagementConnectivityTestSourceNetwork(original["network"], d, config) - transformed["network_type"] = - flattenNetworkManagementConnectivityTestSourceNetworkType(original["networkType"], d, config) - transformed["project_id"] = - flattenNetworkManagementConnectivityTestSourceProjectId(original["projectId"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkManagementConnectivityTestSourceIpAddress(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourcePort(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_network_management_connectivity_test_resource_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNetworkManagementConnectivityTestSourceInstance(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourceNetwork(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourceNetworkType(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestSourceProjectId(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestination(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ip_address"] = - flattenNetworkManagementConnectivityTestDestinationIpAddress(original["ipAddress"], d, config) - transformed["port"] = - flattenNetworkManagementConnectivityTestDestinationPort(original["port"], d, config) - transformed["instance"] = - flattenNetworkManagementConnectivityTestDestinationInstance(original["instance"], d, config) - transformed["network"] = - flattenNetworkManagementConnectivityTestDestinationNetwork(original["network"], d, config) - transformed["project_id"] = - flattenNetworkManagementConnectivityTestDestinationProjectId(original["projectId"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkManagementConnectivityTestDestinationIpAddress(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestinationPort(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_network_management_connectivity_test_resource_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNetworkManagementConnectivityTestDestinationInstance(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestinationNetwork(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestDestinationProjectId(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestProtocol(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestRelatedProjects(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkManagementConnectivityTestLabels(v interface{}, d *resource_network_management_connectivity_test_resource_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNetworkManagementConnectivityTestName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - - f, err := parseGlobalFieldValue("tests", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_network_management_connectivity_test_resource_fmt.Errorf("Invalid value for zone: %s", err) - } - return f.RelativeLink(), nil -} - -func expandNetworkManagementConnectivityTestDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpAddress, err := expandNetworkManagementConnectivityTestSourceIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - transformedPort, err := expandNetworkManagementConnectivityTestSourcePort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedInstance, err := expandNetworkManagementConnectivityTestSourceInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - transformedNetwork, err := expandNetworkManagementConnectivityTestSourceNetwork(original["network"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { - transformed["network"] = transformedNetwork - } - - transformedNetworkType, err := expandNetworkManagementConnectivityTestSourceNetworkType(original["network_type"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedNetworkType); val.IsValid() && !isEmptyValue(val) { - transformed["networkType"] = transformedNetworkType - } - - transformedProjectId, err := expandNetworkManagementConnectivityTestSourceProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - return transformed, nil -} - -func expandNetworkManagementConnectivityTestSourceIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourcePort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceNetworkType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestSourceProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestination(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIpAddress, err := expandNetworkManagementConnectivityTestDestinationIpAddress(original["ip_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedIpAddress); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddress"] = transformedIpAddress - } - - transformedPort, err := expandNetworkManagementConnectivityTestDestinationPort(original["port"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - transformedInstance, err := expandNetworkManagementConnectivityTestDestinationInstance(original["instance"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedInstance); val.IsValid() && !isEmptyValue(val) { - transformed["instance"] = transformedInstance - } - - transformedNetwork, err := expandNetworkManagementConnectivityTestDestinationNetwork(original["network"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedNetwork); val.IsValid() && !isEmptyValue(val) { - transformed["network"] = transformedNetwork - } - - transformedProjectId, err := expandNetworkManagementConnectivityTestDestinationProjectId(original["project_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_management_connectivity_test_resource_reflect.ValueOf(transformedProjectId); val.IsValid() && !isEmptyValue(val) { - transformed["projectId"] = transformedProjectId - } - - return transformed, nil -} - -func expandNetworkManagementConnectivityTestDestinationIpAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestDestinationProjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestRelatedProjects(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkManagementConnectivityTestLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceNetworkServicesEdgeCacheKeyset() *resource_network_services_edge_cache_keyset_schema.Resource { - return &resource_network_services_edge_cache_keyset_schema.Resource{ - Create: resourceNetworkServicesEdgeCacheKeysetCreate, - Read: resourceNetworkServicesEdgeCacheKeysetRead, - Update: resourceNetworkServicesEdgeCacheKeysetUpdate, - Delete: resourceNetworkServicesEdgeCacheKeysetDelete, - - Importer: &resource_network_services_edge_cache_keyset_schema.ResourceImporter{ - State: resourceNetworkServicesEdgeCacheKeysetImport, - }, - - Timeouts: &resource_network_services_edge_cache_keyset_schema.ResourceTimeout{ - Create: resource_network_services_edge_cache_keyset_schema.DefaultTimeout(30 * resource_network_services_edge_cache_keyset_time.Minute), - Update: resource_network_services_edge_cache_keyset_schema.DefaultTimeout(30 * resource_network_services_edge_cache_keyset_time.Minute), - Delete: resource_network_services_edge_cache_keyset_schema.DefaultTimeout(30 * resource_network_services_edge_cache_keyset_time.Minute), - }, - - Schema: map[string]*resource_network_services_edge_cache_keyset_schema.Schema{ - "name": { - Type: resource_network_services_edge_cache_keyset_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is created. -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, -and all following characters must be a dash, underscore, letter or digit.`, - }, - "public_key": { - Type: resource_network_services_edge_cache_keyset_schema.TypeList, - Required: true, - Description: `An ordered list of Ed25519 public keys to use for validating signed requests. -You must specify at least one (1) key, and may have up to three (3) keys. - -Ed25519 public keys are not secret, and only allow Google to validate a request was signed by your corresponding private key. -You should ensure that the private key is kept secret, and that only authorized users can add public keys to a keyset.`, - MinItems: 1, - MaxItems: 3, - Elem: &resource_network_services_edge_cache_keyset_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_keyset_schema.Schema{ - "id": { - Type: resource_network_services_edge_cache_keyset_schema.TypeString, - Required: true, - Description: `The ID of the public key. The ID must be 1-63 characters long, and comply with RFC1035. -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* -which means the first character must be a letter, and all following characters must be a dash, underscore, letter or digit.`, - }, - "value": { - Type: resource_network_services_edge_cache_keyset_schema.TypeString, - Required: true, - Description: `The base64-encoded value of the Ed25519 public key. The base64 encoding can be padded (44 bytes) or unpadded (43 bytes). -Representations or encodings of the public key other than this will be rejected with an error.`, - Sensitive: true, - }, - }, - }, - }, - "description": { - Type: resource_network_services_edge_cache_keyset_schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "labels": { - Type: resource_network_services_edge_cache_keyset_schema.TypeMap, - Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, - Elem: &resource_network_services_edge_cache_keyset_schema.Schema{Type: resource_network_services_edge_cache_keyset_schema.TypeString}, - }, - "project": { - Type: resource_network_services_edge_cache_keyset_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNetworkServicesEdgeCacheKeysetCreate(d *resource_network_services_edge_cache_keyset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheKeysetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_services_edge_cache_keyset_reflect.ValueOf(descriptionProp)) && (ok || !resource_network_services_edge_cache_keyset_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheKeysetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_services_edge_cache_keyset_reflect.ValueOf(labelsProp)) && (ok || !resource_network_services_edge_cache_keyset_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - publicKeysProp, err := expandNetworkServicesEdgeCacheKeysetPublicKey(d.Get("public_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("public_key"); !isEmptyValue(resource_network_services_edge_cache_keyset_reflect.ValueOf(publicKeysProp)) && (ok || !resource_network_services_edge_cache_keyset_reflect.DeepEqual(v, publicKeysProp)) { - obj["publicKeys"] = publicKeysProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets?edgeCacheKeysetId={{name}}") - if err != nil { - return err - } - - resource_network_services_edge_cache_keyset_log.Printf("[DEBUG] Creating new EdgeCacheKeyset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_keyset_schema.TimeoutCreate)) - if err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error creating EdgeCacheKeyset: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = networkServicesOperationWaitTime( - config, res, project, "Creating EdgeCacheKeyset", userAgent, - d.Timeout(resource_network_services_edge_cache_keyset_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error waiting to create EdgeCacheKeyset: %s", err) - } - - resource_network_services_edge_cache_keyset_log.Printf("[DEBUG] Finished creating EdgeCacheKeyset %q: %#v", d.Id(), res) - - return resourceNetworkServicesEdgeCacheKeysetRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheKeysetRead(d *resource_network_services_edge_cache_keyset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_network_services_edge_cache_keyset_fmt.Sprintf("NetworkServicesEdgeCacheKeyset %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - - if err := d.Set("description", flattenNetworkServicesEdgeCacheKeysetDescription(res["description"], d, config)); err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - if err := d.Set("labels", flattenNetworkServicesEdgeCacheKeysetLabels(res["labels"], d, config)); err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - if err := d.Set("public_key", flattenNetworkServicesEdgeCacheKeysetPublicKey(res["publicKeys"], d, config)); err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error reading EdgeCacheKeyset: %s", err) - } - - return nil -} - -func resourceNetworkServicesEdgeCacheKeysetUpdate(d *resource_network_services_edge_cache_keyset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheKeysetDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_services_edge_cache_keyset_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_keyset_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheKeysetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_services_edge_cache_keyset_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_keyset_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - publicKeysProp, err := expandNetworkServicesEdgeCacheKeysetPublicKey(d.Get("public_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("public_key"); !isEmptyValue(resource_network_services_edge_cache_keyset_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_keyset_reflect.DeepEqual(v, publicKeysProp)) { - obj["publicKeys"] = publicKeysProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return err - } - - resource_network_services_edge_cache_keyset_log.Printf("[DEBUG] Updating EdgeCacheKeyset %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("public_key") { - updateMask = append(updateMask, "publicKeys") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_network_services_edge_cache_keyset_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_keyset_schema.TimeoutUpdate)) - - if err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error updating EdgeCacheKeyset %q: %s", d.Id(), err) - } else { - resource_network_services_edge_cache_keyset_log.Printf("[DEBUG] Finished updating EdgeCacheKeyset %q: %#v", d.Id(), res) - } - - err = networkServicesOperationWaitTime( - config, res, project, "Updating EdgeCacheKeyset", userAgent, - d.Timeout(resource_network_services_edge_cache_keyset_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNetworkServicesEdgeCacheKeysetRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheKeysetDelete(d *resource_network_services_edge_cache_keyset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_keyset_fmt.Errorf("Error fetching project for EdgeCacheKeyset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_network_services_edge_cache_keyset_log.Printf("[DEBUG] Deleting EdgeCacheKeyset %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_keyset_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EdgeCacheKeyset") - } - - err = networkServicesOperationWaitTime( - config, res, project, "Deleting EdgeCacheKeyset", userAgent, - d.Timeout(resource_network_services_edge_cache_keyset_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_network_services_edge_cache_keyset_log.Printf("[DEBUG] Finished deleting EdgeCacheKeyset %q: %#v", d.Id(), res) - return nil -} - -func resourceNetworkServicesEdgeCacheKeysetImport(d *resource_network_services_edge_cache_keyset_schema.ResourceData, meta interface{}) ([]*resource_network_services_edge_cache_keyset_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/edgeCacheKeysets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheKeysets/{{name}}") - if err != nil { - return nil, resource_network_services_edge_cache_keyset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_network_services_edge_cache_keyset_schema.ResourceData{d}, nil -} - -func flattenNetworkServicesEdgeCacheKeysetDescription(v interface{}, d *resource_network_services_edge_cache_keyset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetLabels(v interface{}, d *resource_network_services_edge_cache_keyset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetPublicKey(v interface{}, d *resource_network_services_edge_cache_keyset_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "id": flattenNetworkServicesEdgeCacheKeysetPublicKeyId(original["id"], d, config), - "value": flattenNetworkServicesEdgeCacheKeysetPublicKeyValue(original["value"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheKeysetPublicKeyId(v interface{}, d *resource_network_services_edge_cache_keyset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheKeysetPublicKeyValue(v interface{}, d *resource_network_services_edge_cache_keyset_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNetworkServicesEdgeCacheKeysetDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheKeysetLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNetworkServicesEdgeCacheKeysetPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandNetworkServicesEdgeCacheKeysetPublicKeyId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_keyset_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedValue, err := expandNetworkServicesEdgeCacheKeysetPublicKeyValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_keyset_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheKeysetPublicKeyId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheKeysetPublicKeyValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceNetworkServicesEdgeCacheOrigin() *resource_network_services_edge_cache_origin_schema.Resource { - return &resource_network_services_edge_cache_origin_schema.Resource{ - Create: resourceNetworkServicesEdgeCacheOriginCreate, - Read: resourceNetworkServicesEdgeCacheOriginRead, - Update: resourceNetworkServicesEdgeCacheOriginUpdate, - Delete: resourceNetworkServicesEdgeCacheOriginDelete, - - Importer: &resource_network_services_edge_cache_origin_schema.ResourceImporter{ - State: resourceNetworkServicesEdgeCacheOriginImport, - }, - - Timeouts: &resource_network_services_edge_cache_origin_schema.ResourceTimeout{ - Create: resource_network_services_edge_cache_origin_schema.DefaultTimeout(60 * resource_network_services_edge_cache_origin_time.Minute), - Update: resource_network_services_edge_cache_origin_schema.DefaultTimeout(60 * resource_network_services_edge_cache_origin_time.Minute), - Delete: resource_network_services_edge_cache_origin_schema.DefaultTimeout(60 * resource_network_services_edge_cache_origin_time.Minute), - }, - - Schema: map[string]*resource_network_services_edge_cache_origin_schema.Schema{ - "name": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is created. -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, -and all following characters must be a dash, underscore, letter or digit.`, - }, - "origin_address": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Required: true, - Description: `A fully qualified domain name (FQDN) or IP address reachable over the public Internet, or the address of a Google Cloud Storage bucket. - -This address will be used as the origin for cache requests - e.g. FQDN: media-backend.example.com IPv4:35.218.1.1 IPv6:[2607:f8b0:4012:809::200e] Cloud Storage: gs://bucketname - -When providing an FQDN (hostname), it must be publicly resolvable (e.g. via Google public DNS) and IP addresses must be publicly routable. -If a Cloud Storage bucket is provided, it must be in the canonical "gs://bucketname" format. Other forms, such as "storage.googleapis.com", will be rejected.`, - }, - "description": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "failover_origin": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareResourceNames, - Description: `The Origin resource to try when the current origin cannot be reached. -After maxAttempts is reached, the configured failoverOrigin will be used to fulfil the request. - -The value of timeout.maxAttemptsTimeout dictates the timeout across all origins. -A reference to a Topic resource.`, - }, - "labels": { - Type: resource_network_services_edge_cache_origin_schema.TypeMap, - Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, - Elem: &resource_network_services_edge_cache_origin_schema.Schema{Type: resource_network_services_edge_cache_origin_schema.TypeString}, - }, - "max_attempts": { - Type: resource_network_services_edge_cache_origin_schema.TypeInt, - Optional: true, - ValidateFunc: resource_network_services_edge_cache_origin_validation.IntBetween(0, 4), - Description: `The maximum number of attempts to cache fill from this origin. Another attempt is made when a cache fill fails with one of the retryConditions. - -Once maxAttempts to this origin have failed the failoverOrigin will be used, if one is specified. That failoverOrigin may specify its own maxAttempts, -retryConditions and failoverOrigin to control its own cache fill failures. - -The total number of allowed attempts to cache fill across this and failover origins is limited to four. -The total time allowed for cache fill attempts across this and failover origins can be controlled with maxAttemptsTimeout. - -The last valid response from an origin will be returned to the client. -If no origin returns a valid response, an HTTP 503 will be returned to the client. - -Defaults to 1. Must be a value greater than 0 and less than 4.`, - }, - "port": { - Type: resource_network_services_edge_cache_origin_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The port to connect to the origin on. -Defaults to port 443 for HTTP2 and HTTPS protocols, and port 80 for HTTP.`, - }, - "protocol": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_network_services_edge_cache_origin_validation.StringInSlice([]string{"HTTP2", "HTTPS", "HTTP", ""}, false), - Description: `The protocol to use to connect to the configured origin. Defaults to HTTP2, and it is strongly recommended that users use HTTP2 for both security & performance. - -When using HTTP2 or HTTPS as the protocol, a valid, publicly-signed, unexpired TLS (SSL) certificate must be presented by the origin server. Possible values: ["HTTP2", "HTTPS", "HTTP"]`, - }, - "retry_conditions": { - Type: resource_network_services_edge_cache_origin_schema.TypeList, - Computed: true, - Optional: true, - Description: `Specifies one or more retry conditions for the configured origin. - -If the failure mode during a connection attempt to the origin matches the configured retryCondition(s), -the origin request will be retried up to maxAttempts times. The failoverOrigin, if configured, will then be used to satisfy the request. - -The default retryCondition is "CONNECT_FAILURE". - -retryConditions apply to this origin, and not subsequent failoverOrigin(s), -which may specify their own retryConditions and maxAttempts. - -Valid values are: - -- CONNECT_FAILURE: Retry on failures connecting to origins, for example due to connection timeouts. -- HTTP_5XX: Retry if the origin responds with any 5xx response code, or if the origin does not respond at all, example: disconnects, reset, read timeout, connection failure, and refused streams. -- GATEWAY_ERROR: Similar to 5xx, but only applies to response codes 502, 503 or 504. -- RETRIABLE_4XX: Retry for retriable 4xx response codes, which include HTTP 409 (Conflict) and HTTP 429 (Too Many Requests) -- NOT_FOUND: Retry if the origin returns a HTTP 404 (Not Found). This can be useful when generating video content, and the segment is not available yet. Possible values: ["CONNECT_FAILURE", "HTTP_5XX", "GATEWAY_ERROR", "RETRIABLE_4XX", "NOT_FOUND"]`, - Elem: &resource_network_services_edge_cache_origin_schema.Schema{ - Type: resource_network_services_edge_cache_origin_schema.TypeString, - ValidateFunc: resource_network_services_edge_cache_origin_validation.StringInSlice([]string{"CONNECT_FAILURE", "HTTP_5XX", "GATEWAY_ERROR", "RETRIABLE_4XX", "NOT_FOUND"}, false), - }, - }, - "timeout": { - Type: resource_network_services_edge_cache_origin_schema.TypeList, - Optional: true, - Description: `The connection and HTTP timeout configuration for this origin.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_origin_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_origin_schema.Schema{ - "connect_timeout": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Optional: true, - Description: `The maximum duration to wait for the origin connection to be established, including DNS lookup, TLS handshake and TCP/QUIC connection establishment. - -Defaults to 5 seconds. The timeout must be a value between 1s and 15s.`, - AtLeastOneOf: []string{"timeout.0.connect_timeout", "timeout.0.max_attempts_timeout", "timeout.0.response_timeout"}, - }, - "max_attempts_timeout": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Optional: true, - Description: `The maximum time across all connection attempts to the origin, including failover origins, before returning an error to the client. A HTTP 503 will be returned if the timeout is reached before a response is returned. - -Defaults to 5 seconds. The timeout must be a value between 1s and 15s.`, - AtLeastOneOf: []string{"timeout.0.connect_timeout", "timeout.0.max_attempts_timeout", "timeout.0.response_timeout"}, - }, - "response_timeout": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Optional: true, - Description: `The maximum duration to wait for data to arrive when reading from the HTTP connection/stream. - -Defaults to 5 seconds. The timeout must be a value between 1s and 30s.`, - AtLeastOneOf: []string{"timeout.0.connect_timeout", "timeout.0.max_attempts_timeout", "timeout.0.response_timeout"}, - }, - }, - }, - }, - "project": { - Type: resource_network_services_edge_cache_origin_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNetworkServicesEdgeCacheOriginCreate(d *resource_network_services_edge_cache_origin_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheOriginDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(descriptionProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheOriginLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(labelsProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - originAddressProp, err := expandNetworkServicesEdgeCacheOriginOriginAddress(d.Get("origin_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("origin_address"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(originAddressProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, originAddressProp)) { - obj["originAddress"] = originAddressProp - } - protocolProp, err := expandNetworkServicesEdgeCacheOriginProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(protocolProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - portProp, err := expandNetworkServicesEdgeCacheOriginPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(portProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - maxAttemptsProp, err := expandNetworkServicesEdgeCacheOriginMaxAttempts(d.Get("max_attempts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("max_attempts"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(maxAttemptsProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, maxAttemptsProp)) { - obj["maxAttempts"] = maxAttemptsProp - } - failoverOriginProp, err := expandNetworkServicesEdgeCacheOriginFailoverOrigin(d.Get("failover_origin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("failover_origin"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(failoverOriginProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, failoverOriginProp)) { - obj["failoverOrigin"] = failoverOriginProp - } - retryConditionsProp, err := expandNetworkServicesEdgeCacheOriginRetryConditions(d.Get("retry_conditions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_conditions"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(retryConditionsProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, retryConditionsProp)) { - obj["retryConditions"] = retryConditionsProp - } - timeoutProp, err := expandNetworkServicesEdgeCacheOriginTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(timeoutProp)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins?edgeCacheOriginId={{name}}") - if err != nil { - return err - } - - resource_network_services_edge_cache_origin_log.Printf("[DEBUG] Creating new EdgeCacheOrigin: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_origin_schema.TimeoutCreate)) - if err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error creating EdgeCacheOrigin: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") - if err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = networkServicesOperationWaitTime( - config, res, project, "Creating EdgeCacheOrigin", userAgent, - d.Timeout(resource_network_services_edge_cache_origin_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_network_services_edge_cache_origin_fmt.Errorf("Error waiting to create EdgeCacheOrigin: %s", err) - } - - resource_network_services_edge_cache_origin_log.Printf("[DEBUG] Finished creating EdgeCacheOrigin %q: %#v", d.Id(), res) - - return resourceNetworkServicesEdgeCacheOriginRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheOriginRead(d *resource_network_services_edge_cache_origin_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_network_services_edge_cache_origin_fmt.Sprintf("NetworkServicesEdgeCacheOrigin %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - - if err := d.Set("description", flattenNetworkServicesEdgeCacheOriginDescription(res["description"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - if err := d.Set("labels", flattenNetworkServicesEdgeCacheOriginLabels(res["labels"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - if err := d.Set("origin_address", flattenNetworkServicesEdgeCacheOriginOriginAddress(res["originAddress"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - if err := d.Set("protocol", flattenNetworkServicesEdgeCacheOriginProtocol(res["protocol"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - if err := d.Set("port", flattenNetworkServicesEdgeCacheOriginPort(res["port"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - if err := d.Set("max_attempts", flattenNetworkServicesEdgeCacheOriginMaxAttempts(res["maxAttempts"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - if err := d.Set("failover_origin", flattenNetworkServicesEdgeCacheOriginFailoverOrigin(res["failoverOrigin"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - if err := d.Set("retry_conditions", flattenNetworkServicesEdgeCacheOriginRetryConditions(res["retryConditions"], d, config)); err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error reading EdgeCacheOrigin: %s", err) - } - - return nil -} - -func resourceNetworkServicesEdgeCacheOriginUpdate(d *resource_network_services_edge_cache_origin_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheOriginDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheOriginLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - originAddressProp, err := expandNetworkServicesEdgeCacheOriginOriginAddress(d.Get("origin_address"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("origin_address"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, originAddressProp)) { - obj["originAddress"] = originAddressProp - } - protocolProp, err := expandNetworkServicesEdgeCacheOriginProtocol(d.Get("protocol"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("protocol"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, protocolProp)) { - obj["protocol"] = protocolProp - } - portProp, err := expandNetworkServicesEdgeCacheOriginPort(d.Get("port"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("port"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, portProp)) { - obj["port"] = portProp - } - maxAttemptsProp, err := expandNetworkServicesEdgeCacheOriginMaxAttempts(d.Get("max_attempts"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("max_attempts"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, maxAttemptsProp)) { - obj["maxAttempts"] = maxAttemptsProp - } - failoverOriginProp, err := expandNetworkServicesEdgeCacheOriginFailoverOrigin(d.Get("failover_origin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("failover_origin"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, failoverOriginProp)) { - obj["failoverOrigin"] = failoverOriginProp - } - retryConditionsProp, err := expandNetworkServicesEdgeCacheOriginRetryConditions(d.Get("retry_conditions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_conditions"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, retryConditionsProp)) { - obj["retryConditions"] = retryConditionsProp - } - timeoutProp, err := expandNetworkServicesEdgeCacheOriginTimeout(d.Get("timeout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("timeout"); !isEmptyValue(resource_network_services_edge_cache_origin_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_origin_reflect.DeepEqual(v, timeoutProp)) { - obj["timeout"] = timeoutProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") - if err != nil { - return err - } - - resource_network_services_edge_cache_origin_log.Printf("[DEBUG] Updating EdgeCacheOrigin %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("origin_address") { - updateMask = append(updateMask, "originAddress") - } - - if d.HasChange("protocol") { - updateMask = append(updateMask, "protocol") - } - - if d.HasChange("port") { - updateMask = append(updateMask, "port") - } - - if d.HasChange("max_attempts") { - updateMask = append(updateMask, "maxAttempts") - } - - if d.HasChange("failover_origin") { - updateMask = append(updateMask, "failoverOrigin") - } - - if d.HasChange("retry_conditions") { - updateMask = append(updateMask, "retryConditions") - } - - if d.HasChange("timeout") { - updateMask = append(updateMask, "timeout") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_network_services_edge_cache_origin_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_origin_schema.TimeoutUpdate)) - - if err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error updating EdgeCacheOrigin %q: %s", d.Id(), err) - } else { - resource_network_services_edge_cache_origin_log.Printf("[DEBUG] Finished updating EdgeCacheOrigin %q: %#v", d.Id(), res) - } - - err = networkServicesOperationWaitTime( - config, res, project, "Updating EdgeCacheOrigin", userAgent, - d.Timeout(resource_network_services_edge_cache_origin_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNetworkServicesEdgeCacheOriginRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheOriginDelete(d *resource_network_services_edge_cache_origin_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_origin_fmt.Errorf("Error fetching project for EdgeCacheOrigin: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_network_services_edge_cache_origin_log.Printf("[DEBUG] Deleting EdgeCacheOrigin %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_origin_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EdgeCacheOrigin") - } - - err = networkServicesOperationWaitTime( - config, res, project, "Deleting EdgeCacheOrigin", userAgent, - d.Timeout(resource_network_services_edge_cache_origin_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_network_services_edge_cache_origin_log.Printf("[DEBUG] Finished deleting EdgeCacheOrigin %q: %#v", d.Id(), res) - return nil -} - -func resourceNetworkServicesEdgeCacheOriginImport(d *resource_network_services_edge_cache_origin_schema.ResourceData, meta interface{}) ([]*resource_network_services_edge_cache_origin_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/edgeCacheOrigins/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheOrigins/{{name}}") - if err != nil { - return nil, resource_network_services_edge_cache_origin_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_network_services_edge_cache_origin_schema.ResourceData{d}, nil -} - -func flattenNetworkServicesEdgeCacheOriginDescription(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheOriginLabels(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheOriginOriginAddress(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheOriginProtocol(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheOriginPort(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_network_services_edge_cache_origin_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNetworkServicesEdgeCacheOriginMaxAttempts(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_network_services_edge_cache_origin_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNetworkServicesEdgeCacheOriginFailoverOrigin(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheOriginRetryConditions(v interface{}, d *resource_network_services_edge_cache_origin_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNetworkServicesEdgeCacheOriginDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNetworkServicesEdgeCacheOriginOriginAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginMaxAttempts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginFailoverOrigin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginRetryConditions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedConnectTimeout, err := expandNetworkServicesEdgeCacheOriginTimeoutConnectTimeout(original["connect_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_origin_reflect.ValueOf(transformedConnectTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["connectTimeout"] = transformedConnectTimeout - } - - transformedMaxAttemptsTimeout, err := expandNetworkServicesEdgeCacheOriginTimeoutMaxAttemptsTimeout(original["max_attempts_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_origin_reflect.ValueOf(transformedMaxAttemptsTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["maxAttemptsTimeout"] = transformedMaxAttemptsTimeout - } - - transformedResponseTimeout, err := expandNetworkServicesEdgeCacheOriginTimeoutResponseTimeout(original["response_timeout"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_origin_reflect.ValueOf(transformedResponseTimeout); val.IsValid() && !isEmptyValue(val) { - transformed["responseTimeout"] = transformedResponseTimeout - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheOriginTimeoutConnectTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginTimeoutMaxAttemptsTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheOriginTimeoutResponseTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceNetworkServicesEdgeCacheService() *resource_network_services_edge_cache_service_schema.Resource { - return &resource_network_services_edge_cache_service_schema.Resource{ - Create: resourceNetworkServicesEdgeCacheServiceCreate, - Read: resourceNetworkServicesEdgeCacheServiceRead, - Update: resourceNetworkServicesEdgeCacheServiceUpdate, - Delete: resourceNetworkServicesEdgeCacheServiceDelete, - - Importer: &resource_network_services_edge_cache_service_schema.ResourceImporter{ - State: resourceNetworkServicesEdgeCacheServiceImport, - }, - - Timeouts: &resource_network_services_edge_cache_service_schema.ResourceTimeout{ - Create: resource_network_services_edge_cache_service_schema.DefaultTimeout(30 * resource_network_services_edge_cache_service_time.Minute), - Update: resource_network_services_edge_cache_service_schema.DefaultTimeout(30 * resource_network_services_edge_cache_service_time.Minute), - Delete: resource_network_services_edge_cache_service_schema.DefaultTimeout(30 * resource_network_services_edge_cache_service_time.Minute), - }, - - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the resource; provided by the client when the resource is created. -The name must be 1-64 characters long, and match the regular expression [a-zA-Z][a-zA-Z0-9_-]* which means the first character must be a letter, -and all following characters must be a dash, underscore, letter or digit.`, - }, - "routing": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Required: true, - Description: `Defines how requests are routed, modified, cached and/or which origin content is filled from.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "host_rule": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Required: true, - Description: `The list of hostRules to match against. These rules define which hostnames the EdgeCacheService will match against, and which route configurations apply.`, - MinItems: 1, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "hosts": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Required: true, - Description: `The list of host patterns to match. - -Host patterns must be valid hostnames with optional port numbers in the format host:port. * matches any string of ([a-z0-9-.]*). -The only accepted ports are :80 and :443. - -Hosts are matched against the HTTP Host header, or for HTTP/2 and HTTP/3, the ":authority" header, from the incoming request.`, - MinItems: 1, - MaxItems: 10, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "path_matcher": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The name of the pathMatcher associated with this hostRule.`, - }, - "description": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `A human-readable description of the hostRule.`, - }, - }, - }, - }, - "path_matcher": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Required: true, - Description: `The list of pathMatchers referenced via name by hostRules. PathMatcher is used to match the path portion of the URL when a HostRule matches the URL's host portion.`, - MinItems: 1, - MaxItems: 10, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The name to which this PathMatcher is referred by the HostRule.`, - }, - "route_rule": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Required: true, - Description: `The routeRules to match against. routeRules support advanced routing behaviour, and can match on paths, headers and query parameters, as well as status codes and HTTP methods.`, - MinItems: 1, - MaxItems: 64, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "match_rule": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Required: true, - Description: `The list of criteria for matching attributes of a request to this routeRule. This list has OR semantics: the request matches this routeRule when any of the matchRules are satisfied. However predicates -within a given matchRule have AND semantics. All predicates within a matchRule must match for the request to match the rule.`, - MinItems: 1, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "full_path_match": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the path of the request must exactly match the value specified in fullPathMatch after removing any query parameters and anchor that may be part of the original URL.`, - }, - "header_match": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Specifies a list of header match criteria, all of which must match corresponding headers in the request.`, - MinItems: 1, - MaxItems: 3, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "header_name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The header name to match on.`, - }, - "exact_match": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `The value of the header should exactly match contents of exactMatch.`, - }, - "invert_match": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `If set to false (default), the headerMatch is considered a match if the match criteria above are met. -If set to true, the headerMatch is considered a match if the match criteria above are NOT met.`, - }, - "prefix_match": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `The value of the header must start with the contents of prefixMatch.`, - }, - "present_match": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Optional: true, - Description: `A header with the contents of headerName must exist. The match takes place whether or not the request's header has a value.`, - }, - "suffix_match": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `The value of the header must end with the contents of suffixMatch.`, - }, - }, - }, - }, - "ignore_case": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Specifies that prefixMatch and fullPathMatch matches are case sensitive.`, - }, - "path_template_match": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the path of the request -must match the wildcard pattern specified in pathTemplateMatch -after removing any query parameters and anchor that may be part -of the original URL. - -pathTemplateMatch must be between 1 and 255 characters -(inclusive). The pattern specified by pathTemplateMatch may -have at most 5 wildcard operators and at most 5 variable -captures in total.`, - }, - "prefix_match": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /.`, - }, - "query_parameter_match": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Specifies a list of query parameter match criteria, all of which must match corresponding query parameters in the request.`, - MinItems: 1, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The name of the query parameter to match. The query parameter must exist in the request, in the absence of which the request match fails.`, - }, - "exact_match": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `The queryParameterMatch matches if the value of the parameter exactly matches the contents of exactMatch.`, - }, - "present_match": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Optional: true, - Description: `Specifies that the queryParameterMatch matches if the request contains the query parameter, irrespective of whether the parameter has a value or not.`, - }, - }, - }, - }, - }, - }, - }, - "priority": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The priority of this route rule, where 1 is the highest priority. - -You cannot configure two or more routeRules with the same priority. Priority for each rule must be set to a number between 1 and 999 inclusive. - -Priority numbers can have gaps, which enable you to add or remove rules in the future without affecting the rest of the rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers -to which you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the future without any impact on existing rules.`, - }, - "description": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `A human-readable description of the routeRule.`, - }, - "header_action": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `The header actions, including adding & removing headers, for requests that match this route.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "request_header_to_add": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Describes a header to add.`, - MinItems: 1, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "header_name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The name of the header to add.`, - }, - "header_value": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Whether to replace all existing headers with the same name.`, - }, - }, - }, - }, - "request_header_to_remove": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request prior to forwarding the request to the origin.`, - MinItems: 1, - MaxItems: 10, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "header_name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The name of the header to remove.`, - }, - }, - }, - }, - "response_header_to_add": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Headers to add to the response prior to sending it back to the client. - -Response headers are only sent to the client, and do not have an effect on the cache serving the response.`, - MinItems: 1, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "header_name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The name of the header to add.`, - }, - "header_value": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `The value of the header to add.`, - }, - "replace": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Whether to replace all existing headers with the same name.`, - }, - }, - }, - }, - "response_header_to_remove": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `A list of header names for headers that need to be removed from the request prior to forwarding the request to the origin.`, - MinItems: 1, - MaxItems: 10, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "header_name": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `Headers to remove from the response prior to sending it back to the client. - -Response headers are only sent to the client, and do not have an effect on the cache serving the response.`, - }, - }, - }, - }, - }, - }, - }, - "origin": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Origin resource that requests to this route should fetch from when a matching response is not in cache. Origins can be defined as short names ("my-origin") or fully-qualified resource URLs - e.g. "networkservices.googleapis.com/projects/my-project/global/edgecacheorigins/my-origin" - -Only one of origin or urlRedirect can be set.`, - }, - "route_action": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `In response to a matching path, the routeAction performs advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request to the selected origin.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "cdn_policy": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `The policy to use for defining caching and signed request behaviour for requests that match this route.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "cache_key_policy": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Defines the request parameters that contribute to the cache key.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "exclude_host": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `If true, requests to different hosts will be cached separately. - -Note: this should only be enabled if hosts share the same origin and content Removing the host from the cache key may inadvertently result in different objects being cached than intended, depending on which route the first user matched.`, - }, - "exclude_query_string": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Optional: true, - Description: `If true, exclude query string parameters from the cache key - -If false (the default), include the query string parameters in -the cache key according to includeQueryParameters and -excludeQueryParameters. If neither includeQueryParameters nor -excludeQueryParameters is set, the entire query string will be -included.`, - }, - "excluded_query_parameters": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Names of query string parameters to exclude from cache keys. All other parameters will be included. - -Either specify includedQueryParameters or excludedQueryParameters, not both. '&' and '=' will be percent encoded and not treated as delimiters.`, - MaxItems: 10, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "include_protocol": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `If true, http and https requests will be cached separately.`, - }, - "included_header_names": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Names of HTTP request headers to include in cache keys. The value of the header field will be used as part of the cache key. - -- Header names must be valid HTTP RFC 7230 header field values. -- Header field names are case insensitive -- To include the HTTP method, use ":method" - -Note that specifying several headers, and/or headers that have a large range of values (e.g. per-user) will dramatically impact the cache hit rate, and may result in a higher eviction rate and reduced performance.`, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "included_query_parameters": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Names of query string parameters to include in cache keys. All other parameters will be excluded. - -Either specify includedQueryParameters or excludedQueryParameters, not both. '&' and '=' will be percent encoded and not treated as delimiters.`, - MaxItems: 10, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - }, - }, - }, - "cache_mode": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_network_services_edge_cache_service_validation.StringInSlice([]string{"CACHE_ALL_STATIC", "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "BYPASS_CACHE", ""}, false), - Description: `Cache modes allow users to control the behaviour of the cache, what content it should cache automatically, whether to respect origin headers, or whether to unconditionally cache all responses. - -For all cache modes, Cache-Control headers will be passed to the client. Use clientTtl to override what is sent to the client. Possible values: ["CACHE_ALL_STATIC", "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", "BYPASS_CACHE"]`, - }, - "client_ttl": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `Specifies a separate client (e.g. browser client) TTL, separate from the TTL used by the edge caches. Leaving this empty will use the same cache TTL for both the CDN and the client-facing response. - -- The TTL must be > 0 and <= 86400s (1 day) -- The clientTtl cannot be larger than the defaultTtl (if set) -- Fractions of a second are not allowed. -- Omit this field to use the defaultTtl, or the max-age set by the origin, as the client-facing TTL. - -When the cache mode is set to "USE_ORIGIN_HEADERS" or "BYPASS_CACHE", you must omit this field. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "default_ttl": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `Specifies the default TTL for cached content served by this origin for responses that do not have an existing valid TTL (max-age or s-max-age). - -Defaults to 3600s (1 hour). - -- The TTL must be >= 0 and <= 2592000s (1 month) -- Setting a TTL of "0" means "always revalidate" (equivalent to must-revalidate) -- The value of defaultTTL cannot be set to a value greater than that of maxTTL. -- Fractions of a second are not allowed. -- When the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will overwrite the TTL set in all responses. - -Note that infrequently accessed objects may be evicted from the cache before the defined TTL. Objects that expire will be revalidated with the origin. - -When the cache mode is set to "USE_ORIGIN_HEADERS" or "BYPASS_CACHE", you must omit this field. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "max_ttl": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `Specifies the maximum allowed TTL for cached content served by this origin. - -Defaults to 86400s (1 day). - -Cache directives that attempt to set a max-age or s-maxage higher than this, or an Expires header more than maxTtl seconds in the future will be capped at the value of maxTTL, as if it were the value of an s-maxage Cache-Control directive. - -- The TTL must be >= 0 and <= 2592000s (1 month) -- Setting a TTL of "0" means "always revalidate" -- The value of maxTtl must be equal to or greater than defaultTtl. -- Fractions of a second are not allowed. -- When the cache mode is set to "USE_ORIGIN_HEADERS", "FORCE_CACHE_ALL", or "BYPASS_CACHE", you must omit this field. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "negative_caching": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Optional: true, - Description: `Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. This can reduce the load on your origin and improve end-user experience by reducing response latency. - -By default, the CDNPolicy will apply the following default TTLs to these status codes: - -- HTTP 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m -- HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal Reasons): 120s -- HTTP 405 (Method Not Found), 414 (URI Too Long), 501 (Not Implemented): 60s - -These defaults can be overridden in negativeCachingPolicy`, - }, - "negative_caching_policy": { - Type: resource_network_services_edge_cache_service_schema.TypeMap, - Optional: true, - Description: `Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. - -- Omitting the policy and leaving negativeCaching enabled will use the default TTLs for each status code, defined in negativeCaching. -- TTLs must be >= 0 (where 0 is "always revalidate") and <= 86400s (1 day) - -Note that when specifying an explicit negativeCachingPolicy, you should take care to specify a cache TTL for all response codes that you wish to cache. The CDNPolicy will not apply any default negative caching when a policy exists.`, - Elem: &resource_network_services_edge_cache_service_schema.Schema{Type: resource_network_services_edge_cache_service_schema.TypeString}, - }, - "signed_request_keyset": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Computed: true, - Optional: true, - Description: `The EdgeCacheKeyset containing the set of public keys used to validate signed requests at the edge.`, - }, - "signed_request_mode": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_network_services_edge_cache_service_validation.StringInSlice([]string{"DISABLED", "REQUIRE_SIGNATURES", ""}, false), - Description: `Whether to enforce signed requests. The default value is DISABLED, which means all content is public, and does not authorize access. - -You must also set a signedRequestKeyset to enable signed requests. - -When set to REQUIRE_SIGNATURES, all matching requests will have their signature validated. Requests that were not signed with the corresponding private key, or that are otherwise invalid (expired, do not match the signature, IP address, or header) will be rejected with a HTTP 403 and (if enabled) logged. Possible values: ["DISABLED", "REQUIRE_SIGNATURES"]`, - }, - }, - }, - }, - "cors_policy": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `CORSPolicy defines Cross-Origin-Resource-Sharing configuration, including which CORS response headers will be set.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "max_age": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Required: true, - Description: `Specifies how long results of a preflight request can be cached by a client in seconds. Note that many browser clients enforce a maximum TTL of 600s (10 minutes). - -- Setting the value to -1 forces a pre-flight check for all requests (not recommended) -- A maximum TTL of 86400s can be set, but note that (as above) some clients may force pre-flight checks at a more regular interval. -- This translates to the Access-Control-Max-Age header. - -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "allow_credentials": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Optional: true, - Description: `In response to a preflight request, setting this to true indicates that the actual request can include user credentials. - -This translates to the Access-Control-Allow-Credentials response header.`, - }, - "allow_headers": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers response header.`, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "allow_methods": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Methods response header.`, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "allow_origins": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Specifies the list of origins that will be allowed to do CORS requests. - -This translates to the Access-Control-Allow-Origin response header.`, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "disabled": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Optional: true, - Description: `If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect.`, - }, - "expose_headers": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Specifies the content for the Access-Control-Allow-Headers response header.`, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - }, - }, - }, - "url_rewrite": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `The URL rewrite configuration for requests that match this route.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "host_rewrite": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected origin, the request's host header is replaced with contents of hostRewrite.`, - }, - "path_prefix_rewrite": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected origin, the matching portion of the request's path is replaced by pathPrefixRewrite.`, - }, - "path_template_rewrite": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `Prior to forwarding the request to the selected origin, if the -request matched a pathTemplateMatch, the matching portion of the -request's path is replaced re-written using the pattern specified -by pathTemplateRewrite. - -pathTemplateRewrite must be between 1 and 255 characters -(inclusive), must start with a '/', and must only use variables -captured by the route's pathTemplate matchers. - -pathTemplateRewrite may only be used when all of a route's -MatchRules specify pathTemplate. - -Only one of pathPrefixRewrite and pathTemplateRewrite may be -specified.`, - }, - }, - }, - }, - }, - }, - }, - "url_redirect": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `The URL redirect configuration for requests that match this route.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "host_redirect": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `The host that will be used in the redirect response instead of the one that was supplied in the request.`, - }, - "https_redirect": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `If set to true, the URL scheme in the redirected request is set to https. If set to false, the URL scheme of the redirected request will remain the same as that of the request. - -This can only be set if there is at least one (1) edgeSslCertificate set on the service.`, - }, - "path_redirect": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `The path that will be used in the redirect response instead of the one that was supplied in the request. - -pathRedirect cannot be supplied together with prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect. - -The path value must be between 1 and 1024 characters.`, - }, - "prefix_redirect": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `The prefix that replaces the prefixMatch specified in the routeRule, retaining the remaining portion of the URL before redirecting the request. - -prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or neither. If neither is supplied, the path of the original request will be used for the redirect.`, - }, - "redirect_response_code": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_network_services_edge_cache_service_validation.StringInSlice([]string{"MOVED_PERMANENTLY_DEFAULT", "FOUND", "SEE_OTHER", "TEMPORARY_REDIRECT", "PERMANENT_REDIRECT", ""}, false), - Description: `The HTTP Status code to use for this RedirectAction. - -The supported values are: - -- 'MOVED_PERMANENTLY_DEFAULT', which is the default value and corresponds to 301. -- 'FOUND', which corresponds to 302. -- 'SEE_OTHER' which corresponds to 303. -- 'TEMPORARY_REDIRECT', which corresponds to 307. in this case, the request method will be retained. -- 'PERMANENT_REDIRECT', which corresponds to 308. in this case, the request method will be retained. Possible values: ["MOVED_PERMANENTLY_DEFAULT", "FOUND", "SEE_OTHER", "TEMPORARY_REDIRECT", "PERMANENT_REDIRECT"]`, - }, - "strip_query": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `If set to true, any accompanying query portion of the original URL is removed prior to redirecting the request. If set to false, the query portion of the original URL is retained.`, - }, - }, - }, - }, - }, - }, - }, - "description": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - }, - }, - }, - }, - }, - }, - "description": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `A human-readable description of the resource.`, - }, - "disable_quic": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `HTTP/3 (IETF QUIC) and Google QUIC are enabled by default.`, - }, - "edge_security_policy": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `Resource URL that points at the Cloud Armor edge security policy that is applied on each request against the EdgeCacheService.`, - }, - "edge_ssl_certificates": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `URLs to sslCertificate resources that are used to authenticate connections between users and the EdgeCacheService. - -Note that only "global" certificates with a "scope" of "EDGE_CACHE" can be attached to an EdgeCacheService.`, - MaxItems: 5, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "labels": { - Type: resource_network_services_edge_cache_service_schema.TypeMap, - Optional: true, - Description: `Set of label tags associated with the EdgeCache resource.`, - Elem: &resource_network_services_edge_cache_service_schema.Schema{Type: resource_network_services_edge_cache_service_schema.TypeString}, - }, - "log_config": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Optional: true, - Description: `Specifies the logging options for the traffic served by this service. If logging is enabled, logs will be exported to Cloud Logging.`, - MaxItems: 1, - Elem: &resource_network_services_edge_cache_service_schema.Resource{ - Schema: map[string]*resource_network_services_edge_cache_service_schema.Schema{ - "enable": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Specifies whether to enable logging for traffic served by this service.`, - }, - "sample_rate": { - Type: resource_network_services_edge_cache_service_schema.TypeFloat, - Optional: true, - Description: `Configures the sampling rate of requests, where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0, and the value of the field must be in [0, 1]. - -This field can only be specified if logging is enabled for this service.`, - }, - }, - }, - }, - "require_tls": { - Type: resource_network_services_edge_cache_service_schema.TypeBool, - Computed: true, - Optional: true, - Description: `Require TLS (HTTPS) for all clients connecting to this service. - -Clients who connect over HTTP (port 80) will receive a HTTP 301 to the same URL over HTTPS (port 443). -You must have at least one (1) edgeSslCertificate specified to enable this.`, - }, - "ssl_policy": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Description: `URL of the SslPolicy resource that will be associated with the EdgeCacheService. - -If not set, the EdgeCacheService has no SSL policy configured, and will default to the "COMPATIBLE" policy.`, - }, - "ipv4_addresses": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Computed: true, - Description: `The IPv4 addresses associated with this service. Addresses are static for the lifetime of the service.`, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "ipv6_addresses": { - Type: resource_network_services_edge_cache_service_schema.TypeList, - Computed: true, - Description: `The IPv6 addresses associated with this service. Addresses are static for the lifetime of the service.`, - Elem: &resource_network_services_edge_cache_service_schema.Schema{ - Type: resource_network_services_edge_cache_service_schema.TypeString, - }, - }, - "project": { - Type: resource_network_services_edge_cache_service_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNetworkServicesEdgeCacheServiceCreate(d *resource_network_services_edge_cache_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(descriptionProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheServiceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(labelsProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - disableQuicProp, err := expandNetworkServicesEdgeCacheServiceDisableQuic(d.Get("disable_quic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_quic"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(disableQuicProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, disableQuicProp)) { - obj["disableQuic"] = disableQuicProp - } - requireTlsProp, err := expandNetworkServicesEdgeCacheServiceRequireTls(d.Get("require_tls"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("require_tls"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(requireTlsProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, requireTlsProp)) { - obj["requireTls"] = requireTlsProp - } - edgeSslCertificatesProp, err := expandNetworkServicesEdgeCacheServiceEdgeSslCertificates(d.Get("edge_ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_ssl_certificates"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(edgeSslCertificatesProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, edgeSslCertificatesProp)) { - obj["edgeSslCertificates"] = edgeSslCertificatesProp - } - sslPolicyProp, err := expandNetworkServicesEdgeCacheServiceSslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(sslPolicyProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - routingProp, err := expandNetworkServicesEdgeCacheServiceRouting(d.Get("routing"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("routing"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(routingProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, routingProp)) { - obj["routing"] = routingProp - } - logConfigProp, err := expandNetworkServicesEdgeCacheServiceLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(logConfigProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - edgeSecurityPolicyProp, err := expandNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(edgeSecurityPolicyProp)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, edgeSecurityPolicyProp)) { - obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices?edgeCacheServiceId={{name}}") - if err != nil { - return err - } - - resource_network_services_edge_cache_service_log.Printf("[DEBUG] Creating new EdgeCacheService: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_service_schema.TimeoutCreate)) - if err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error creating EdgeCacheService: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheServices/{{name}}") - if err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = networkServicesOperationWaitTime( - config, res, project, "Creating EdgeCacheService", userAgent, - d.Timeout(resource_network_services_edge_cache_service_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_network_services_edge_cache_service_fmt.Errorf("Error waiting to create EdgeCacheService: %s", err) - } - - resource_network_services_edge_cache_service_log.Printf("[DEBUG] Finished creating EdgeCacheService %q: %#v", d.Id(), res) - - return resourceNetworkServicesEdgeCacheServiceRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheServiceRead(d *resource_network_services_edge_cache_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_network_services_edge_cache_service_fmt.Sprintf("NetworkServicesEdgeCacheService %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - - if err := d.Set("description", flattenNetworkServicesEdgeCacheServiceDescription(res["description"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("labels", flattenNetworkServicesEdgeCacheServiceLabels(res["labels"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("disable_quic", flattenNetworkServicesEdgeCacheServiceDisableQuic(res["disableQuic"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("require_tls", flattenNetworkServicesEdgeCacheServiceRequireTls(res["requireTls"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("edge_ssl_certificates", flattenNetworkServicesEdgeCacheServiceEdgeSslCertificates(res["edgeSslCertificates"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("ssl_policy", flattenNetworkServicesEdgeCacheServiceSslPolicy(res["sslPolicy"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("ipv4_addresses", flattenNetworkServicesEdgeCacheServiceIpv4Addresses(res["ipv4Addresses"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("ipv6_addresses", flattenNetworkServicesEdgeCacheServiceIpv6Addresses(res["ipv6Addresses"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("routing", flattenNetworkServicesEdgeCacheServiceRouting(res["routing"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("log_config", flattenNetworkServicesEdgeCacheServiceLogConfig(res["logConfig"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - if err := d.Set("edge_security_policy", flattenNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(res["edgeSecurityPolicy"], d, config)); err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error reading EdgeCacheService: %s", err) - } - - return nil -} - -func resourceNetworkServicesEdgeCacheServiceUpdate(d *resource_network_services_edge_cache_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandNetworkServicesEdgeCacheServiceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandNetworkServicesEdgeCacheServiceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - disableQuicProp, err := expandNetworkServicesEdgeCacheServiceDisableQuic(d.Get("disable_quic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disable_quic"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, disableQuicProp)) { - obj["disableQuic"] = disableQuicProp - } - requireTlsProp, err := expandNetworkServicesEdgeCacheServiceRequireTls(d.Get("require_tls"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("require_tls"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, requireTlsProp)) { - obj["requireTls"] = requireTlsProp - } - edgeSslCertificatesProp, err := expandNetworkServicesEdgeCacheServiceEdgeSslCertificates(d.Get("edge_ssl_certificates"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_ssl_certificates"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, edgeSslCertificatesProp)) { - obj["edgeSslCertificates"] = edgeSslCertificatesProp - } - sslPolicyProp, err := expandNetworkServicesEdgeCacheServiceSslPolicy(d.Get("ssl_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ssl_policy"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, sslPolicyProp)) { - obj["sslPolicy"] = sslPolicyProp - } - routingProp, err := expandNetworkServicesEdgeCacheServiceRouting(d.Get("routing"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("routing"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, routingProp)) { - obj["routing"] = routingProp - } - logConfigProp, err := expandNetworkServicesEdgeCacheServiceLogConfig(d.Get("log_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("log_config"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, logConfigProp)) { - obj["logConfig"] = logConfigProp - } - edgeSecurityPolicyProp, err := expandNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(d.Get("edge_security_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("edge_security_policy"); !isEmptyValue(resource_network_services_edge_cache_service_reflect.ValueOf(v)) && (ok || !resource_network_services_edge_cache_service_reflect.DeepEqual(v, edgeSecurityPolicyProp)) { - obj["edgeSecurityPolicy"] = edgeSecurityPolicyProp - } - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") - if err != nil { - return err - } - - resource_network_services_edge_cache_service_log.Printf("[DEBUG] Updating EdgeCacheService %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("disable_quic") { - updateMask = append(updateMask, "disableQuic") - } - - if d.HasChange("require_tls") { - updateMask = append(updateMask, "requireTls") - } - - if d.HasChange("edge_ssl_certificates") { - updateMask = append(updateMask, "edgeSslCertificates") - } - - if d.HasChange("ssl_policy") { - updateMask = append(updateMask, "sslPolicy") - } - - if d.HasChange("routing") { - updateMask = append(updateMask, "routing") - } - - if d.HasChange("log_config") { - updateMask = append(updateMask, "logConfig") - } - - if d.HasChange("edge_security_policy") { - updateMask = append(updateMask, "edgeSecurityPolicy") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_network_services_edge_cache_service_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_service_schema.TimeoutUpdate)) - - if err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error updating EdgeCacheService %q: %s", d.Id(), err) - } else { - resource_network_services_edge_cache_service_log.Printf("[DEBUG] Finished updating EdgeCacheService %q: %#v", d.Id(), res) - } - - err = networkServicesOperationWaitTime( - config, res, project, "Updating EdgeCacheService", userAgent, - d.Timeout(resource_network_services_edge_cache_service_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNetworkServicesEdgeCacheServiceRead(d, meta) -} - -func resourceNetworkServicesEdgeCacheServiceDelete(d *resource_network_services_edge_cache_service_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_network_services_edge_cache_service_fmt.Errorf("Error fetching project for EdgeCacheService: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NetworkServicesBasePath}}projects/{{project}}/locations/global/edgeCacheServices/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_network_services_edge_cache_service_log.Printf("[DEBUG] Deleting EdgeCacheService %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_network_services_edge_cache_service_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "EdgeCacheService") - } - - err = networkServicesOperationWaitTime( - config, res, project, "Deleting EdgeCacheService", userAgent, - d.Timeout(resource_network_services_edge_cache_service_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_network_services_edge_cache_service_log.Printf("[DEBUG] Finished deleting EdgeCacheService %q: %#v", d.Id(), res) - return nil -} - -func resourceNetworkServicesEdgeCacheServiceImport(d *resource_network_services_edge_cache_service_schema.ResourceData, meta interface{}) ([]*resource_network_services_edge_cache_service_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/global/edgeCacheServices/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/global/edgeCacheServices/{{name}}") - if err != nil { - return nil, resource_network_services_edge_cache_service_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_network_services_edge_cache_service_schema.ResourceData{d}, nil -} - -func flattenNetworkServicesEdgeCacheServiceDescription(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceLabels(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceDisableQuic(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRequireTls(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceEdgeSslCertificates(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceSslPolicy(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceIpv4Addresses(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceIpv6Addresses(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRouting(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_rule"] = - flattenNetworkServicesEdgeCacheServiceRoutingHostRule(original["hostRules"], d, config) - transformed["path_matcher"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcher(original["pathMatchers"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "description": flattenNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(original["description"], d, config), - "hosts": flattenNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(original["hosts"], d, config), - "path_matcher": flattenNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(original["pathMatcher"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherName(original["name"], d, config), - "description": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(original["description"], d, config), - "route_rule": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(original["routeRules"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherName(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "priority": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(original["priority"], d, config), - "description": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(original["description"], d, config), - "match_rule": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(original["matchRules"], d, config), - "header_action": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(original["headerAction"], d, config), - "route_action": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(original["routeAction"], d, config), - "origin": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(original["origin"], d, config), - "url_redirect": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(original["urlRedirect"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ignore_case": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(original["ignoreCase"], d, config), - "header_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(original["headerMatches"], d, config), - "query_parameter_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(original["queryParameterMatches"], d, config), - "prefix_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(original["prefixMatch"], d, config), - "path_template_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(original["pathTemplateMatch"], d, config), - "full_path_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(original["fullPathMatch"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(original["headerName"], d, config), - "present_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(original["presentMatch"], d, config), - "exact_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(original["exactMatch"], d, config), - "prefix_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(original["prefixMatch"], d, config), - "suffix_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(original["suffixMatch"], d, config), - "invert_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(original["invertMatch"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(original["name"], d, config), - "present_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(original["presentMatch"], d, config), - "exact_match": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(original["exactMatch"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["request_header_to_add"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(original["requestHeadersToAdd"], d, config) - transformed["response_header_to_add"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(original["responseHeadersToAdd"], d, config) - transformed["request_header_to_remove"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(original["requestHeadersToRemove"], d, config) - transformed["response_header_to_remove"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(original["responseHeadersToRemove"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(original["headerName"], d, config), - "header_value": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(original["headerName"], d, config), - "header_value": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(original["headerValue"], d, config), - "replace": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(original["replace"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(original["headerName"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "header_name": flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(original["headerName"], d, config), - }) - } - return transformed -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cdn_policy"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(original["cdnPolicy"], d, config) - transformed["url_rewrite"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(original["urlRewrite"], d, config) - transformed["cors_policy"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(original["corsPolicy"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cache_mode"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(original["cacheMode"], d, config) - transformed["client_ttl"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(original["clientTtl"], d, config) - transformed["default_ttl"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(original["defaultTtl"], d, config) - transformed["max_ttl"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(original["maxTtl"], d, config) - transformed["cache_key_policy"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(original["cacheKeyPolicy"], d, config) - transformed["negative_caching"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(original["negativeCaching"], d, config) - transformed["negative_caching_policy"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(original["negativeCachingPolicy"], d, config) - transformed["signed_request_mode"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(original["signedRequestMode"], d, config) - transformed["signed_request_keyset"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(original["signedRequestKeyset"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["include_protocol"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(original["includeProtocol"], d, config) - transformed["exclude_query_string"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(original["excludeQueryString"], d, config) - transformed["exclude_host"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(original["excludeHost"], d, config) - transformed["included_query_parameters"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(original["includedQueryParameters"], d, config) - transformed["excluded_query_parameters"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(original["excludedQueryParameters"], d, config) - transformed["included_header_names"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(original["includedHeaderNames"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["path_prefix_rewrite"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(original["pathPrefixRewrite"], d, config) - transformed["host_rewrite"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(original["hostRewrite"], d, config) - transformed["path_template_rewrite"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(original["pathTemplateRewrite"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["max_age"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(original["maxAge"], d, config) - transformed["allow_credentials"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(original["allowCredentials"], d, config) - transformed["allow_origins"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(original["allowOrigins"], d, config) - transformed["allow_methods"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(original["allowMethods"], d, config) - transformed["allow_headers"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(original["allowHeaders"], d, config) - transformed["expose_headers"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(original["exposeHeaders"], d, config) - transformed["disabled"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host_redirect"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(original["hostRedirect"], d, config) - transformed["path_redirect"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(original["pathRedirect"], d, config) - transformed["prefix_redirect"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(original["prefixRedirect"], d, config) - transformed["redirect_response_code"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(original["redirectResponseCode"], d, config) - transformed["https_redirect"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(original["httpsRedirect"], d, config) - transformed["strip_query"] = - flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(original["stripQuery"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable"] = - flattenNetworkServicesEdgeCacheServiceLogConfigEnable(original["enable"], d, config) - transformed["sample_rate"] = - flattenNetworkServicesEdgeCacheServiceLogConfigSampleRate(original["sampleRate"], d, config) - return []interface{}{transformed} -} - -func flattenNetworkServicesEdgeCacheServiceLogConfigEnable(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceLogConfigSampleRate(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(v interface{}, d *resource_network_services_edge_cache_service_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNetworkServicesEdgeCacheServiceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNetworkServicesEdgeCacheServiceDisableQuic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRequireTls(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceEdgeSslCertificates(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceSslPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRouting(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRule, err := expandNetworkServicesEdgeCacheServiceRoutingHostRule(original["host_rule"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHostRule); val.IsValid() && !isEmptyValue(val) { - transformed["hostRules"] = transformedHostRule - } - - transformedPathMatcher, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcher(original["path_matcher"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["pathMatchers"] = transformedPathMatcher - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingHostRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDescription, err := expandNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedHosts, err := expandNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(original["hosts"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHosts); val.IsValid() && !isEmptyValue(val) { - transformed["hosts"] = transformedHosts - } - - transformedPathMatcher, err := expandNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(original["path_matcher"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPathMatcher); val.IsValid() && !isEmptyValue(val) { - transformed["pathMatcher"] = transformedPathMatcher - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingHostRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingHostRuleHosts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingHostRulePathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcher(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedDescription, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedRouteRule, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(original["route_rule"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedRouteRule); val.IsValid() && !isEmptyValue(val) { - transformed["routeRules"] = transformedRouteRule - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPriority, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(original["priority"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPriority); val.IsValid() && !isEmptyValue(val) { - transformed["priority"] = transformedPriority - } - - transformedDescription, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedMatchRule, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(original["match_rule"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedMatchRule); val.IsValid() && !isEmptyValue(val) { - transformed["matchRules"] = transformedMatchRule - } - - transformedHeaderAction, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(original["header_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderAction); val.IsValid() && !isEmptyValue(val) { - transformed["headerAction"] = transformedHeaderAction - } - - transformedRouteAction, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(original["route_action"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedRouteAction); val.IsValid() && !isEmptyValue(val) { - transformed["routeAction"] = transformedRouteAction - } - - transformedOrigin, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(original["origin"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedOrigin); val.IsValid() && !isEmptyValue(val) { - transformed["origin"] = transformedOrigin - } - - transformedUrlRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(original["url_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedUrlRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["urlRedirect"] = transformedUrlRedirect - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRulePriority(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIgnoreCase, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(original["ignore_case"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedIgnoreCase); val.IsValid() && !isEmptyValue(val) { - transformed["ignoreCase"] = transformedIgnoreCase - } - - transformedHeaderMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(original["header_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderMatch); val.IsValid() && !isEmptyValue(val) { - transformed["headerMatches"] = transformedHeaderMatch - } - - transformedQueryParameterMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(original["query_parameter_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedQueryParameterMatch); val.IsValid() && !isEmptyValue(val) { - transformed["queryParameterMatches"] = transformedQueryParameterMatch - } - - transformedPrefixMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(original["prefix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["prefixMatch"] = transformedPrefixMatch - } - - transformedPathTemplateMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(original["path_template_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPathTemplateMatch); val.IsValid() && !isEmptyValue(val) { - transformed["pathTemplateMatch"] = transformedPathTemplateMatch - } - - transformedFullPathMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(original["full_path_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedFullPathMatch); val.IsValid() && !isEmptyValue(val) { - transformed["fullPathMatch"] = transformedFullPathMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleIgnoreCase(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedPresentMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(original["present_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { - transformed["presentMatch"] = transformedPresentMatch - } - - transformedExactMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(original["exact_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { - transformed["exactMatch"] = transformedExactMatch - } - - transformedPrefixMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(original["prefix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPrefixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["prefixMatch"] = transformedPrefixMatch - } - - transformedSuffixMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(original["suffix_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedSuffixMatch); val.IsValid() && !isEmptyValue(val) { - transformed["suffixMatch"] = transformedSuffixMatch - } - - transformedInvertMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(original["invert_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedInvertMatch); val.IsValid() && !isEmptyValue(val) { - transformed["invertMatch"] = transformedInvertMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchPrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchSuffixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleHeaderMatchInvertMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - transformedPresentMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(original["present_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPresentMatch); val.IsValid() && !isEmptyValue(val) { - transformed["presentMatch"] = transformedPresentMatch - } - - transformedExactMatch, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(original["exact_match"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedExactMatch); val.IsValid() && !isEmptyValue(val) { - transformed["exactMatch"] = transformedExactMatch - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchPresentMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleQueryParameterMatchExactMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePrefixMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRulePathTemplateMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleMatchRuleFullPathMatch(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRequestHeaderToAdd, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(original["request_header_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedRequestHeaderToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToAdd"] = transformedRequestHeaderToAdd - } - - transformedResponseHeaderToAdd, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(original["response_header_to_add"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedResponseHeaderToAdd); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToAdd"] = transformedResponseHeaderToAdd - } - - transformedRequestHeaderToRemove, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(original["request_header_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedRequestHeaderToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["requestHeadersToRemove"] = transformedRequestHeaderToRemove - } - - transformedResponseHeaderToRemove, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(original["response_header_to_remove"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedResponseHeaderToRemove); val.IsValid() && !isEmptyValue(val) { - transformed["responseHeadersToRemove"] = transformedResponseHeaderToRemove - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAdd(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - transformedHeaderValue, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(original["header_value"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderValue); val.IsValid() && !isEmptyValue(val) { - transformed["headerValue"] = transformedHeaderValue - } - - transformedReplace, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(original["replace"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedReplace); val.IsValid() && !isEmptyValue(val) { - transformed["replace"] = transformedReplace - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddHeaderValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToAddReplace(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionRequestHeaderToRemoveHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemove(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHeaderName, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(original["header_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHeaderName); val.IsValid() && !isEmptyValue(val) { - transformed["headerName"] = transformedHeaderName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleHeaderActionResponseHeaderToRemoveHeaderName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteAction(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCdnPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(original["cdn_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedCdnPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["cdnPolicy"] = transformedCdnPolicy - } - - transformedUrlRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(original["url_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedUrlRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["urlRewrite"] = transformedUrlRewrite - } - - transformedCorsPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(original["cors_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedCorsPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["corsPolicy"] = transformedCorsPolicy - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCacheMode, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(original["cache_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedCacheMode); val.IsValid() && !isEmptyValue(val) { - transformed["cacheMode"] = transformedCacheMode - } - - transformedClientTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(original["client_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedClientTtl); val.IsValid() && !isEmptyValue(val) { - transformed["clientTtl"] = transformedClientTtl - } - - transformedDefaultTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(original["default_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedDefaultTtl); val.IsValid() && !isEmptyValue(val) { - transformed["defaultTtl"] = transformedDefaultTtl - } - - transformedMaxTtl, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(original["max_ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedMaxTtl); val.IsValid() && !isEmptyValue(val) { - transformed["maxTtl"] = transformedMaxTtl - } - - transformedCacheKeyPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(original["cache_key_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedCacheKeyPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["cacheKeyPolicy"] = transformedCacheKeyPolicy - } - - transformedNegativeCaching, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(original["negative_caching"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedNegativeCaching); val.IsValid() && !isEmptyValue(val) { - transformed["negativeCaching"] = transformedNegativeCaching - } - - transformedNegativeCachingPolicy, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(original["negative_caching_policy"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedNegativeCachingPolicy); val.IsValid() && !isEmptyValue(val) { - transformed["negativeCachingPolicy"] = transformedNegativeCachingPolicy - } - - transformedSignedRequestMode, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(original["signed_request_mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedSignedRequestMode); val.IsValid() && !isEmptyValue(val) { - transformed["signedRequestMode"] = transformedSignedRequestMode - } - - transformedSignedRequestKeyset, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(original["signed_request_keyset"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedSignedRequestKeyset); val.IsValid() && !isEmptyValue(val) { - transformed["signedRequestKeyset"] = transformedSignedRequestKeyset - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyClientTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyDefaultTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyMaxTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIncludeProtocol, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(original["include_protocol"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedIncludeProtocol); val.IsValid() && !isEmptyValue(val) { - transformed["includeProtocol"] = transformedIncludeProtocol - } - - transformedExcludeQueryString, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(original["exclude_query_string"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedExcludeQueryString); val.IsValid() && !isEmptyValue(val) { - transformed["excludeQueryString"] = transformedExcludeQueryString - } - - transformedExcludeHost, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(original["exclude_host"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedExcludeHost); val.IsValid() && !isEmptyValue(val) { - transformed["excludeHost"] = transformedExcludeHost - } - - transformedIncludedQueryParameters, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(original["included_query_parameters"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedIncludedQueryParameters); val.IsValid() && !isEmptyValue(val) { - transformed["includedQueryParameters"] = transformedIncludedQueryParameters - } - - transformedExcludedQueryParameters, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(original["excluded_query_parameters"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedExcludedQueryParameters); val.IsValid() && !isEmptyValue(val) { - transformed["excludedQueryParameters"] = transformedExcludedQueryParameters - } - - transformedIncludedHeaderNames, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(original["included_header_names"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedIncludedHeaderNames); val.IsValid() && !isEmptyValue(val) { - transformed["includedHeaderNames"] = transformedIncludedHeaderNames - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludeProtocol(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeQueryString(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludeHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedQueryParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyExcludedQueryParameters(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyCacheKeyPolicyIncludedHeaderNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCaching(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicyNegativeCachingPolicy(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCdnPolicySignedRequestKeyset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPathPrefixRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(original["path_prefix_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPathPrefixRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathPrefixRewrite"] = transformedPathPrefixRewrite - } - - transformedHostRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(original["host_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHostRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["hostRewrite"] = transformedHostRewrite - } - - transformedPathTemplateRewrite, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(original["path_template_rewrite"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPathTemplateRewrite); val.IsValid() && !isEmptyValue(val) { - transformed["pathTemplateRewrite"] = transformedPathTemplateRewrite - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathPrefixRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewriteHostRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionUrlRewritePathTemplateRewrite(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMaxAge, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(original["max_age"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedMaxAge); val.IsValid() && !isEmptyValue(val) { - transformed["maxAge"] = transformedMaxAge - } - - transformedAllowCredentials, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(original["allow_credentials"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedAllowCredentials); val.IsValid() && !isEmptyValue(val) { - transformed["allowCredentials"] = transformedAllowCredentials - } - - transformedAllowOrigins, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(original["allow_origins"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedAllowOrigins); val.IsValid() && !isEmptyValue(val) { - transformed["allowOrigins"] = transformedAllowOrigins - } - - transformedAllowMethods, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(original["allow_methods"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedAllowMethods); val.IsValid() && !isEmptyValue(val) { - transformed["allowMethods"] = transformedAllowMethods - } - - transformedAllowHeaders, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(original["allow_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedAllowHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["allowHeaders"] = transformedAllowHeaders - } - - transformedExposeHeaders, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(original["expose_headers"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedExposeHeaders); val.IsValid() && !isEmptyValue(val) { - transformed["exposeHeaders"] = transformedExposeHeaders - } - - transformedDisabled, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(original["disabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) { - transformed["disabled"] = transformedDisabled - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyMaxAge(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowCredentials(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowOrigins(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowMethods(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyAllowHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyExposeHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleRouteActionCorsPolicyDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleOrigin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHostRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(original["host_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHostRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["hostRedirect"] = transformedHostRedirect - } - - transformedPathRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(original["path_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPathRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["pathRedirect"] = transformedPathRedirect - } - - transformedPrefixRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(original["prefix_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedPrefixRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["prefixRedirect"] = transformedPrefixRedirect - } - - transformedRedirectResponseCode, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(original["redirect_response_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedRedirectResponseCode); val.IsValid() && !isEmptyValue(val) { - transformed["redirectResponseCode"] = transformedRedirectResponseCode - } - - transformedHttpsRedirect, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(original["https_redirect"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedHttpsRedirect); val.IsValid() && !isEmptyValue(val) { - transformed["httpsRedirect"] = transformedHttpsRedirect - } - - transformedStripQuery, err := expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(original["strip_query"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedStripQuery); val.IsValid() && !isEmptyValue(val) { - transformed["stripQuery"] = transformedStripQuery - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHostRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPathRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectPrefixRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectRedirectResponseCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectHttpsRedirect(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceRoutingPathMatcherRouteRuleUrlRedirectStripQuery(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceLogConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnable, err := expandNetworkServicesEdgeCacheServiceLogConfigEnable(original["enable"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedEnable); val.IsValid() && !isEmptyValue(val) { - transformed["enable"] = transformedEnable - } - - transformedSampleRate, err := expandNetworkServicesEdgeCacheServiceLogConfigSampleRate(original["sample_rate"], d, config) - if err != nil { - return nil, err - } else if val := resource_network_services_edge_cache_service_reflect.ValueOf(transformedSampleRate); val.IsValid() && !isEmptyValue(val) { - transformed["sampleRate"] = transformedSampleRate - } - - return transformed, nil -} - -func expandNetworkServicesEdgeCacheServiceLogConfigEnable(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceLogConfigSampleRate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNetworkServicesEdgeCacheServiceEdgeSecurityPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceNotebooksEnvironment() *resource_notebooks_environment_schema.Resource { - return &resource_notebooks_environment_schema.Resource{ - Create: resourceNotebooksEnvironmentCreate, - Read: resourceNotebooksEnvironmentRead, - Update: resourceNotebooksEnvironmentUpdate, - Delete: resourceNotebooksEnvironmentDelete, - - Importer: &resource_notebooks_environment_schema.ResourceImporter{ - State: resourceNotebooksEnvironmentImport, - }, - - Timeouts: &resource_notebooks_environment_schema.ResourceTimeout{ - Create: resource_notebooks_environment_schema.DefaultTimeout(4 * resource_notebooks_environment_time.Minute), - Update: resource_notebooks_environment_schema.DefaultTimeout(4 * resource_notebooks_environment_time.Minute), - Delete: resource_notebooks_environment_schema.DefaultTimeout(4 * resource_notebooks_environment_time.Minute), - }, - - Schema: map[string]*resource_notebooks_environment_schema.Schema{ - "location": { - Type: resource_notebooks_environment_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the machine resides.`, - }, - "name": { - Type: resource_notebooks_environment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the Environment instance. -Format: projects/{project_id}/locations/{location}/environments/{environmentId}`, - }, - "container_image": { - Type: resource_notebooks_environment_schema.TypeList, - Optional: true, - Description: `Use a container image to start the notebook instance.`, - MaxItems: 1, - Elem: &resource_notebooks_environment_schema.Resource{ - Schema: map[string]*resource_notebooks_environment_schema.Schema{ - "repository": { - Type: resource_notebooks_environment_schema.TypeString, - Required: true, - Description: `The path to the container image repository. -For example: gcr.io/{project_id}/{imageName}`, - }, - "tag": { - Type: resource_notebooks_environment_schema.TypeString, - Optional: true, - Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "description": { - Type: resource_notebooks_environment_schema.TypeString, - Optional: true, - Description: `A brief description of this environment.`, - }, - "display_name": { - Type: resource_notebooks_environment_schema.TypeString, - Optional: true, - Description: `Display name of this environment for the UI.`, - }, - "post_startup_script": { - Type: resource_notebooks_environment_schema.TypeString, - Optional: true, - Description: `Path to a Bash script that automatically runs after a notebook instance fully boots up. -The path must be a URL or Cloud Storage path. Example: "gs://path-to-file/file-name"`, - }, - "vm_image": { - Type: resource_notebooks_environment_schema.TypeList, - Optional: true, - Description: `Use a Compute Engine VM image to start the notebook instance.`, - MaxItems: 1, - Elem: &resource_notebooks_environment_schema.Resource{ - Schema: map[string]*resource_notebooks_environment_schema.Schema{ - "project": { - Type: resource_notebooks_environment_schema.TypeString, - Required: true, - Description: `The name of the Google Cloud project that this VM image belongs to. -Format: projects/{project_id}`, - }, - "image_family": { - Type: resource_notebooks_environment_schema.TypeString, - Optional: true, - Description: `Use this VM image family to find the image; the newest image in this family will be used.`, - }, - "image_name": { - Type: resource_notebooks_environment_schema.TypeString, - Optional: true, - Description: `Use VM image name to find the image.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "create_time": { - Type: resource_notebooks_environment_schema.TypeString, - Computed: true, - Description: `Instance creation time`, - }, - "project": { - Type: resource_notebooks_environment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNotebooksEnvironmentCreate(d *resource_notebooks_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandNotebooksEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(displayNameProp)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandNotebooksEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(descriptionProp)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - postStartupScriptProp, err := expandNotebooksEnvironmentPostStartupScript(d.Get("post_startup_script"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("post_startup_script"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(postStartupScriptProp)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, postStartupScriptProp)) { - obj["postStartupScript"] = postStartupScriptProp - } - vmImageProp, err := expandNotebooksEnvironmentVmImage(d.Get("vm_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vm_image"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(vmImageProp)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, vmImageProp)) { - obj["vmImage"] = vmImageProp - } - containerImageProp, err := expandNotebooksEnvironmentContainerImage(d.Get("container_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("container_image"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(containerImageProp)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, containerImageProp)) { - obj["containerImage"] = containerImageProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments?environmentId={{name}}") - if err != nil { - return err - } - - resource_notebooks_environment_log.Printf("[DEBUG] Creating new Environment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_environment_schema.TimeoutCreate)) - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error creating Environment: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = notebooksOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Environment", userAgent, - d.Timeout(resource_notebooks_environment_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_notebooks_environment_fmt.Errorf("Error waiting to create Environment: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_notebooks_environment_log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), res) - - return resourceNotebooksEnvironmentRead(d, meta) -} - -func resourceNotebooksEnvironmentRead(d *resource_notebooks_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_notebooks_environment_fmt.Sprintf("NotebooksEnvironment %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_notebooks_environment_fmt.Errorf("Error reading Environment: %s", err) - } - - if err := d.Set("display_name", flattenNotebooksEnvironmentDisplayName(res["displayName"], d, config)); err != nil { - return resource_notebooks_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("description", flattenNotebooksEnvironmentDescription(res["description"], d, config)); err != nil { - return resource_notebooks_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("post_startup_script", flattenNotebooksEnvironmentPostStartupScript(res["postStartupScript"], d, config)); err != nil { - return resource_notebooks_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("create_time", flattenNotebooksEnvironmentCreateTime(res["createTime"], d, config)); err != nil { - return resource_notebooks_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("vm_image", flattenNotebooksEnvironmentVmImage(res["vmImage"], d, config)); err != nil { - return resource_notebooks_environment_fmt.Errorf("Error reading Environment: %s", err) - } - if err := d.Set("container_image", flattenNotebooksEnvironmentContainerImage(res["containerImage"], d, config)); err != nil { - return resource_notebooks_environment_fmt.Errorf("Error reading Environment: %s", err) - } - - return nil -} - -func resourceNotebooksEnvironmentUpdate(d *resource_notebooks_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandNotebooksEnvironmentDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(v)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - descriptionProp, err := expandNotebooksEnvironmentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(v)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - postStartupScriptProp, err := expandNotebooksEnvironmentPostStartupScript(d.Get("post_startup_script"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("post_startup_script"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(v)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, postStartupScriptProp)) { - obj["postStartupScript"] = postStartupScriptProp - } - vmImageProp, err := expandNotebooksEnvironmentVmImage(d.Get("vm_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vm_image"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(v)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, vmImageProp)) { - obj["vmImage"] = vmImageProp - } - containerImageProp, err := expandNotebooksEnvironmentContainerImage(d.Get("container_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("container_image"); !isEmptyValue(resource_notebooks_environment_reflect.ValueOf(v)) && (ok || !resource_notebooks_environment_reflect.DeepEqual(v, containerImageProp)) { - obj["containerImage"] = containerImageProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return err - } - - resource_notebooks_environment_log.Printf("[DEBUG] Updating Environment %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_environment_schema.TimeoutUpdate)) - - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error updating Environment %q: %s", d.Id(), err) - } else { - resource_notebooks_environment_log.Printf("[DEBUG] Finished updating Environment %q: %#v", d.Id(), res) - } - - err = notebooksOperationWaitTime( - config, res, project, "Updating Environment", userAgent, - d.Timeout(resource_notebooks_environment_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNotebooksEnvironmentRead(d, meta) -} - -func resourceNotebooksEnvironmentDelete(d *resource_notebooks_environment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_environment_fmt.Errorf("Error fetching project for Environment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_notebooks_environment_log.Printf("[DEBUG] Deleting Environment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_environment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Environment") - } - - err = notebooksOperationWaitTime( - config, res, project, "Deleting Environment", userAgent, - d.Timeout(resource_notebooks_environment_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_notebooks_environment_log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), res) - return nil -} - -func resourceNotebooksEnvironmentImport(d *resource_notebooks_environment_schema.ResourceData, meta interface{}) ([]*resource_notebooks_environment_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/environments/{{name}}") - if err != nil { - return nil, resource_notebooks_environment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_notebooks_environment_schema.ResourceData{d}, nil -} - -func flattenNotebooksEnvironmentDisplayName(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentDescription(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentPostStartupScript(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentCreateTime(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentVmImage(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project"] = - flattenNotebooksEnvironmentVmImageProject(original["project"], d, config) - transformed["image_name"] = - flattenNotebooksEnvironmentVmImageImageName(original["imageName"], d, config) - transformed["image_family"] = - flattenNotebooksEnvironmentVmImageImageFamily(original["imageFamily"], d, config) - return []interface{}{transformed} -} - -func flattenNotebooksEnvironmentVmImageProject(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentVmImageImageName(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentVmImageImageFamily(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentContainerImage(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["repository"] = - flattenNotebooksEnvironmentContainerImageRepository(original["repository"], d, config) - transformed["tag"] = - flattenNotebooksEnvironmentContainerImageTag(original["tag"], d, config) - return []interface{}{transformed} -} - -func flattenNotebooksEnvironmentContainerImageRepository(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksEnvironmentContainerImageTag(v interface{}, d *resource_notebooks_environment_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNotebooksEnvironmentDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentPostStartupScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentVmImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProject, err := expandNotebooksEnvironmentVmImageProject(original["project"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_environment_reflect.ValueOf(transformedProject); val.IsValid() && !isEmptyValue(val) { - transformed["project"] = transformedProject - } - - transformedImageName, err := expandNotebooksEnvironmentVmImageImageName(original["image_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_environment_reflect.ValueOf(transformedImageName); val.IsValid() && !isEmptyValue(val) { - transformed["imageName"] = transformedImageName - } - - transformedImageFamily, err := expandNotebooksEnvironmentVmImageImageFamily(original["image_family"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_environment_reflect.ValueOf(transformedImageFamily); val.IsValid() && !isEmptyValue(val) { - transformed["imageFamily"] = transformedImageFamily - } - - return transformed, nil -} - -func expandNotebooksEnvironmentVmImageProject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentVmImageImageName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentVmImageImageFamily(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentContainerImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRepository, err := expandNotebooksEnvironmentContainerImageRepository(original["repository"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_environment_reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { - transformed["repository"] = transformedRepository - } - - transformedTag, err := expandNotebooksEnvironmentContainerImageTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_environment_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandNotebooksEnvironmentContainerImageRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksEnvironmentContainerImageTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -const notebooksInstanceGoogleProvidedLabel = "goog-caip-notebook" - -func NotebooksInstanceLabelDiffSuppress(k, old, new string, d *resource_notebooks_instance_schema.ResourceData) bool { - - if resource_notebooks_instance_strings.Contains(k, notebooksInstanceGoogleProvidedLabel) && new == "" { - return true - } - - if resource_notebooks_instance_strings.Contains(k, "labels.%") { - return true - } - - return false -} - -func resourceNotebooksInstance() *resource_notebooks_instance_schema.Resource { - return &resource_notebooks_instance_schema.Resource{ - Create: resourceNotebooksInstanceCreate, - Read: resourceNotebooksInstanceRead, - Update: resourceNotebooksInstanceUpdate, - Delete: resourceNotebooksInstanceDelete, - - Importer: &resource_notebooks_instance_schema.ResourceImporter{ - State: resourceNotebooksInstanceImport, - }, - - Timeouts: &resource_notebooks_instance_schema.ResourceTimeout{ - Create: resource_notebooks_instance_schema.DefaultTimeout(15 * resource_notebooks_instance_time.Minute), - Update: resource_notebooks_instance_schema.DefaultTimeout(15 * resource_notebooks_instance_time.Minute), - Delete: resource_notebooks_instance_schema.DefaultTimeout(15 * resource_notebooks_instance_time.Minute), - }, - - Schema: map[string]*resource_notebooks_instance_schema.Schema{ - "location": { - Type: resource_notebooks_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to the zone where the machine resides.`, - }, - "machine_type": { - Type: resource_notebooks_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to a machine type which defines VM kind.`, - }, - "name": { - Type: resource_notebooks_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name specified for the Notebook instance.`, - }, - "accelerator_config": { - Type: resource_notebooks_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The hardware accelerator used on this instance. If you use accelerators, -make sure that your configuration has enough vCPUs and memory to support the -machineType you have selected.`, - MaxItems: 1, - Elem: &resource_notebooks_instance_schema.Resource{ - Schema: map[string]*resource_notebooks_instance_schema.Schema{ - "core_count": { - Type: resource_notebooks_instance_schema.TypeInt, - Required: true, - ForceNew: true, - Description: `Count of cores of this accelerator.`, - }, - "type": { - Type: resource_notebooks_instance_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_notebooks_instance_validation.StringInSlice([]string{"ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"}, false), - Description: `Type of this accelerator. Possible values: ["ACCELERATOR_TYPE_UNSPECIFIED", "NVIDIA_TESLA_K80", "NVIDIA_TESLA_P100", "NVIDIA_TESLA_V100", "NVIDIA_TESLA_P4", "NVIDIA_TESLA_T4", "NVIDIA_TESLA_T4_VWS", "NVIDIA_TESLA_P100_VWS", "NVIDIA_TESLA_P4_VWS", "NVIDIA_TESLA_A100", "TPU_V2", "TPU_V3"]`, - }, - }, - }, - }, - "boot_disk_size_gb": { - Type: resource_notebooks_instance_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The size of the boot disk in GB attached to this instance, -up to a maximum of 64000 GB (64 TB). The minimum recommended value is 100 GB. -If not specified, this defaults to 100.`, - }, - "boot_disk_type": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_notebooks_instance_validation.StringInSlice([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", ""}, false), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED"]`, - }, - "container_image": { - Type: resource_notebooks_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Use a container image to start the notebook instance.`, - MaxItems: 1, - Elem: &resource_notebooks_instance_schema.Resource{ - Schema: map[string]*resource_notebooks_instance_schema.Schema{ - "repository": { - Type: resource_notebooks_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The path to the container image repository. -For example: gcr.io/{project_id}/{imageName}`, - }, - "tag": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The tag of the container image. If not specified, this defaults to the latest tag.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "custom_gpu_driver_path": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specify a custom Cloud Storage path where the GPU driver is stored. -If not specified, we'll automatically choose from official GPU drivers.`, - }, - "data_disk_size_gb": { - Type: resource_notebooks_instance_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `The size of the data disk in GB attached to this instance, -up to a maximum of 64000 GB (64 TB). -You can choose the size of the data disk based on how big your notebooks and data are. -If not specified, this defaults to 100.`, - }, - "data_disk_type": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_notebooks_instance_validation.StringInSlice([]string{"DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("DISK_TYPE_UNSPECIFIED"), - Description: `Possible disk types for notebook instances. Possible values: ["DISK_TYPE_UNSPECIFIED", "PD_STANDARD", "PD_SSD", "PD_BALANCED"]`, - }, - "disk_encryption": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_notebooks_instance_validation.StringInSlice([]string{"DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK", ""}, false), - DiffSuppressFunc: emptyOrDefaultStringSuppress("DISK_ENCRYPTION_UNSPECIFIED"), - Description: `Disk encryption method used on the boot and data disks, defaults to GMEK. Possible values: ["DISK_ENCRYPTION_UNSPECIFIED", "GMEK", "CMEK"]`, - }, - "install_gpu_driver": { - Type: resource_notebooks_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the end user authorizes Google Cloud to install GPU driver -on this instance. If this field is empty or set to false, the GPU driver -won't be installed. Only applicable to instances with GPUs.`, - }, - "instance_owners": { - Type: resource_notebooks_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The list of owners of this instance after creation. -Format: alias@example.com. -Currently supports one owner only. -If not specified, all of the service account users of -your VM instance's service account can use the instance.`, - Elem: &resource_notebooks_instance_schema.Schema{ - Type: resource_notebooks_instance_schema.TypeString, - }, - }, - "kms_key": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The KMS key used to encrypt the disks, only applicable if diskEncryption is CMEK. -Format: projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}`, - }, - "labels": { - Type: resource_notebooks_instance_schema.TypeMap, - Computed: true, - Optional: true, - DiffSuppressFunc: NotebooksInstanceLabelDiffSuppress, - Description: `Labels to apply to this instance. These can be later modified by the setLabels method. -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_notebooks_instance_schema.Schema{Type: resource_notebooks_instance_schema.TypeString}, - }, - "metadata": { - Type: resource_notebooks_instance_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Custom metadata to apply to this instance. -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_notebooks_instance_schema.Schema{Type: resource_notebooks_instance_schema.TypeString}, - }, - "network": { - Type: resource_notebooks_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the VPC that this instance is in. -Format: projects/{project_id}/global/networks/{network_id}`, - }, - "no_proxy_access": { - Type: resource_notebooks_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The notebook instance will not register with the proxy..`, - }, - "no_public_ip": { - Type: resource_notebooks_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `No public IP will be assigned to this instance.`, - }, - "no_remove_data_disk": { - Type: resource_notebooks_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If true, the data disk will not be auto deleted when deleting the instance.`, - }, - "post_startup_script": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Path to a Bash script that automatically runs after a -notebook instance fully boots up. The path must be a URL -or Cloud Storage path (gs://path-to-file/file-name).`, - }, - "service_account": { - Type: resource_notebooks_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The service account on this instance, giving access to other -Google Cloud services. You can use any service account within -the same project, but you must have the service account user -permission to use the instance. If not specified, -the Compute Engine default service account is used.`, - }, - "service_account_scopes": { - Type: resource_notebooks_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Optional. The URIs of service account scopes to be included in Compute Engine instances. -If not specified, the following scopes are defined: -- https://www.googleapis.com/auth/cloud-platform -- https://www.googleapis.com/auth/userinfo.email`, - Elem: &resource_notebooks_instance_schema.Schema{ - Type: resource_notebooks_instance_schema.TypeString, - }, - }, - "shielded_instance_config": { - Type: resource_notebooks_instance_schema.TypeList, - Computed: true, - Optional: true, - ForceNew: true, - Description: `A set of Shielded Instance options. Check [Images using supported Shielded VM features] -Not all combinations are valid`, - MaxItems: 1, - Elem: &resource_notebooks_instance_schema.Resource{ - Schema: map[string]*resource_notebooks_instance_schema.Schema{ - "enable_integrity_monitoring": { - Type: resource_notebooks_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Defines whether the instance has integrity monitoring enabled. Enables monitoring and attestation of the -boot integrity of the instance. The attestation is performed against the integrity policy baseline. -This baseline is initially derived from the implicitly trusted boot image when the instance is created. -Enabled by default.`, - Default: true, - }, - "enable_secure_boot": { - Type: resource_notebooks_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Defines whether the instance has Secure Boot enabled. Secure Boot helps ensure that the system only runs -authentic software by verifying the digital signature of all boot components, and halting the boot process -if signature verification fails. -Disabled by default.`, - }, - "enable_vtpm": { - Type: resource_notebooks_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Defines whether the instance has the vTPM enabled. -Enabled by default.`, - Default: true, - }, - }, - }, - }, - "subnet": { - Type: resource_notebooks_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the subnet that this instance is in. -Format: projects/{project_id}/regions/{region}/subnetworks/{subnetwork_id}`, - }, - "tags": { - Type: resource_notebooks_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The Compute Engine tags to add to runtime.`, - Elem: &resource_notebooks_instance_schema.Schema{ - Type: resource_notebooks_instance_schema.TypeString, - }, - }, - "vm_image": { - Type: resource_notebooks_instance_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Use a Compute Engine VM image to start the notebook instance.`, - MaxItems: 1, - Elem: &resource_notebooks_instance_schema.Resource{ - Schema: map[string]*resource_notebooks_instance_schema.Schema{ - "project": { - Type: resource_notebooks_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Google Cloud project that this VM image belongs to. -Format: projects/{project_id}`, - }, - "image_family": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Use this VM image family to find the image; the newest image in this family will be used.`, - }, - "image_name": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Use VM image name to find the image.`, - }, - }, - }, - ExactlyOneOf: []string{"vm_image", "container_image"}, - }, - "create_time": { - Type: resource_notebooks_instance_schema.TypeString, - Computed: true, - Optional: true, - Description: `Instance creation time`, - }, - "proxy_uri": { - Type: resource_notebooks_instance_schema.TypeString, - Computed: true, - Description: `The proxy endpoint that is used to access the Jupyter notebook.`, - }, - "state": { - Type: resource_notebooks_instance_schema.TypeString, - Computed: true, - Description: `The state of this instance.`, - }, - "update_time": { - Type: resource_notebooks_instance_schema.TypeString, - Computed: true, - Optional: true, - Description: `Instance update time.`, - }, - "project": { - Type: resource_notebooks_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNotebooksInstanceCreate(d *resource_notebooks_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - machineTypeProp, err := expandNotebooksInstanceMachineType(d.Get("machine_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("machine_type"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(machineTypeProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, machineTypeProp)) { - obj["machineType"] = machineTypeProp - } - postStartupScriptProp, err := expandNotebooksInstancePostStartupScript(d.Get("post_startup_script"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("post_startup_script"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(postStartupScriptProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, postStartupScriptProp)) { - obj["postStartupScript"] = postStartupScriptProp - } - instanceOwnersProp, err := expandNotebooksInstanceInstanceOwners(d.Get("instance_owners"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_owners"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(instanceOwnersProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, instanceOwnersProp)) { - obj["instanceOwners"] = instanceOwnersProp - } - serviceAccountProp, err := expandNotebooksInstanceServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(serviceAccountProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - serviceAccountScopesProp, err := expandNotebooksInstanceServiceAccountScopes(d.Get("service_account_scopes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account_scopes"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(serviceAccountScopesProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, serviceAccountScopesProp)) { - obj["serviceAccountScopes"] = serviceAccountScopesProp - } - acceleratorConfigProp, err := expandNotebooksInstanceAcceleratorConfig(d.Get("accelerator_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("accelerator_config"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(acceleratorConfigProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, acceleratorConfigProp)) { - obj["acceleratorConfig"] = acceleratorConfigProp - } - shieldedInstanceConfigProp, err := expandNotebooksInstanceShieldedInstanceConfig(d.Get("shielded_instance_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("shielded_instance_config"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(shieldedInstanceConfigProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, shieldedInstanceConfigProp)) { - obj["shieldedInstanceConfig"] = shieldedInstanceConfigProp - } - installGpuDriverProp, err := expandNotebooksInstanceInstallGpuDriver(d.Get("install_gpu_driver"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("install_gpu_driver"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(installGpuDriverProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, installGpuDriverProp)) { - obj["installGpuDriver"] = installGpuDriverProp - } - customGpuDriverPathProp, err := expandNotebooksInstanceCustomGpuDriverPath(d.Get("custom_gpu_driver_path"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("custom_gpu_driver_path"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(customGpuDriverPathProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, customGpuDriverPathProp)) { - obj["customGpuDriverPath"] = customGpuDriverPathProp - } - bootDiskTypeProp, err := expandNotebooksInstanceBootDiskType(d.Get("boot_disk_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("boot_disk_type"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(bootDiskTypeProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, bootDiskTypeProp)) { - obj["bootDiskType"] = bootDiskTypeProp - } - bootDiskSizeGbProp, err := expandNotebooksInstanceBootDiskSizeGb(d.Get("boot_disk_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("boot_disk_size_gb"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(bootDiskSizeGbProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, bootDiskSizeGbProp)) { - obj["bootDiskSizeGb"] = bootDiskSizeGbProp - } - dataDiskTypeProp, err := expandNotebooksInstanceDataDiskType(d.Get("data_disk_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_disk_type"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(dataDiskTypeProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, dataDiskTypeProp)) { - obj["dataDiskType"] = dataDiskTypeProp - } - dataDiskSizeGbProp, err := expandNotebooksInstanceDataDiskSizeGb(d.Get("data_disk_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("data_disk_size_gb"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(dataDiskSizeGbProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, dataDiskSizeGbProp)) { - obj["dataDiskSizeGb"] = dataDiskSizeGbProp - } - noRemoveDataDiskProp, err := expandNotebooksInstanceNoRemoveDataDisk(d.Get("no_remove_data_disk"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("no_remove_data_disk"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(noRemoveDataDiskProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, noRemoveDataDiskProp)) { - obj["noRemoveDataDisk"] = noRemoveDataDiskProp - } - diskEncryptionProp, err := expandNotebooksInstanceDiskEncryption(d.Get("disk_encryption"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("disk_encryption"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(diskEncryptionProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, diskEncryptionProp)) { - obj["diskEncryption"] = diskEncryptionProp - } - kmsKeyProp, err := expandNotebooksInstanceKmsKey(d.Get("kms_key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(kmsKeyProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, kmsKeyProp)) { - obj["kmsKey"] = kmsKeyProp - } - noPublicIpProp, err := expandNotebooksInstanceNoPublicIp(d.Get("no_public_ip"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("no_public_ip"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(noPublicIpProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, noPublicIpProp)) { - obj["noPublicIp"] = noPublicIpProp - } - noProxyAccessProp, err := expandNotebooksInstanceNoProxyAccess(d.Get("no_proxy_access"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("no_proxy_access"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(noProxyAccessProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, noProxyAccessProp)) { - obj["noProxyAccess"] = noProxyAccessProp - } - networkProp, err := expandNotebooksInstanceNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(networkProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - subnetProp, err := expandNotebooksInstanceSubnet(d.Get("subnet"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("subnet"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(subnetProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, subnetProp)) { - obj["subnet"] = subnetProp - } - labelsProp, err := expandNotebooksInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(labelsProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - tagsProp, err := expandNotebooksInstanceTags(d.Get("tags"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tags"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(tagsProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, tagsProp)) { - obj["tags"] = tagsProp - } - metadataProp, err := expandNotebooksInstanceMetadata(d.Get("metadata"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(metadataProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, metadataProp)) { - obj["metadata"] = metadataProp - } - vmImageProp, err := expandNotebooksInstanceVmImage(d.Get("vm_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("vm_image"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(vmImageProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, vmImageProp)) { - obj["vmImage"] = vmImageProp - } - containerImageProp, err := expandNotebooksInstanceContainerImage(d.Get("container_image"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("container_image"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(containerImageProp)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, containerImageProp)) { - obj["containerImage"] = containerImageProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances?instanceId={{name}}") - if err != nil { - return err - } - - resource_notebooks_instance_log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_instance_schema.TimeoutCreate)) - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error creating Instance: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = notebooksOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(resource_notebooks_instance_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_notebooks_instance_fmt.Errorf("Error waiting to create Instance: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_notebooks_instance_log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceNotebooksInstanceRead(d, meta) -} - -func resourceNotebooksInstanceRead(d *resource_notebooks_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_notebooks_instance_fmt.Sprintf("NotebooksInstance %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("machine_type", flattenNotebooksInstanceMachineType(res["machineType"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("post_startup_script", flattenNotebooksInstancePostStartupScript(res["postStartupScript"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("proxy_uri", flattenNotebooksInstanceProxyUri(res["proxyUri"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("service_account", flattenNotebooksInstanceServiceAccount(res["serviceAccount"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("service_account_scopes", flattenNotebooksInstanceServiceAccountScopes(res["serviceAccountScopes"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("accelerator_config", flattenNotebooksInstanceAcceleratorConfig(res["acceleratorConfig"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("shielded_instance_config", flattenNotebooksInstanceShieldedInstanceConfig(res["shieldedInstanceConfig"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("state", flattenNotebooksInstanceState(res["state"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("install_gpu_driver", flattenNotebooksInstanceInstallGpuDriver(res["installGpuDriver"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("custom_gpu_driver_path", flattenNotebooksInstanceCustomGpuDriverPath(res["customGpuDriverPath"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("data_disk_type", flattenNotebooksInstanceDataDiskType(res["dataDiskType"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("disk_encryption", flattenNotebooksInstanceDiskEncryption(res["diskEncryption"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("kms_key", flattenNotebooksInstanceKmsKey(res["kmsKey"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("no_public_ip", flattenNotebooksInstanceNoPublicIp(res["noPublicIp"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("no_proxy_access", flattenNotebooksInstanceNoProxyAccess(res["noProxyAccess"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("network", flattenNotebooksInstanceNetwork(res["network"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("subnet", flattenNotebooksInstanceSubnet(res["subnet"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenNotebooksInstanceLabels(res["labels"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("tags", flattenNotebooksInstanceTags(res["tags"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenNotebooksInstanceCreateTime(res["createTime"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("update_time", flattenNotebooksInstanceUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_notebooks_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceNotebooksInstanceUpdate(d *resource_notebooks_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("labels") { - obj := make(map[string]interface{}) - - labelsProp, err := expandNotebooksInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_notebooks_instance_reflect.ValueOf(v)) && (ok || !resource_notebooks_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:setLabels") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_instance_schema.TimeoutUpdate)) - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - resource_notebooks_instance_log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = notebooksOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(resource_notebooks_instance_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceNotebooksInstanceRead(d, meta) -} - -func resourceNotebooksInstanceDelete(d *resource_notebooks_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_notebooks_instance_log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_instance_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = notebooksOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(resource_notebooks_instance_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_notebooks_instance_log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceNotebooksInstanceImport(d *resource_notebooks_instance_schema.ResourceData, meta interface{}) ([]*resource_notebooks_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/instances/{{name}}") - if err != nil { - return nil, resource_notebooks_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_notebooks_instance_schema.ResourceData{d}, nil -} - -func flattenNotebooksInstanceMachineType(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenNotebooksInstancePostStartupScript(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceProxyUri(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceServiceAccount(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceServiceAccountScopes(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceAcceleratorConfig(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenNotebooksInstanceAcceleratorConfigType(original["type"], d, config) - transformed["core_count"] = - flattenNotebooksInstanceAcceleratorConfigCoreCount(original["coreCount"], d, config) - return []interface{}{transformed} -} - -func flattenNotebooksInstanceAcceleratorConfigType(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceAcceleratorConfigCoreCount(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_notebooks_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenNotebooksInstanceShieldedInstanceConfig(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enable_integrity_monitoring"] = - flattenNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(original["enableIntegrityMonitoring"], d, config) - transformed["enable_secure_boot"] = - flattenNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(original["enableSecureBoot"], d, config) - transformed["enable_vtpm"] = - flattenNotebooksInstanceShieldedInstanceConfigEnableVtpm(original["enableVtpm"], d, config) - return []interface{}{transformed} -} - -func flattenNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceShieldedInstanceConfigEnableVtpm(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceState(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceInstallGpuDriver(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceCustomGpuDriverPath(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceDataDiskType(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceDiskEncryption(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceKmsKey(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceNoPublicIp(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceNoProxyAccess(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceNetwork(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceSubnet(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceLabels(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceTags(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceCreateTime(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNotebooksInstanceUpdateTime(v interface{}, d *resource_notebooks_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNotebooksInstanceMachineType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstancePostStartupScript(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceInstanceOwners(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceServiceAccountScopes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceAcceleratorConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandNotebooksInstanceAcceleratorConfigType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedCoreCount, err := expandNotebooksInstanceAcceleratorConfigCoreCount(original["core_count"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedCoreCount); val.IsValid() && !isEmptyValue(val) { - transformed["coreCount"] = transformedCoreCount - } - - return transformed, nil -} - -func expandNotebooksInstanceAcceleratorConfigType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceAcceleratorConfigCoreCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceShieldedInstanceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnableIntegrityMonitoring, err := expandNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(original["enable_integrity_monitoring"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedEnableIntegrityMonitoring); val.IsValid() && !isEmptyValue(val) { - transformed["enableIntegrityMonitoring"] = transformedEnableIntegrityMonitoring - } - - transformedEnableSecureBoot, err := expandNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(original["enable_secure_boot"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedEnableSecureBoot); val.IsValid() && !isEmptyValue(val) { - transformed["enableSecureBoot"] = transformedEnableSecureBoot - } - - transformedEnableVtpm, err := expandNotebooksInstanceShieldedInstanceConfigEnableVtpm(original["enable_vtpm"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedEnableVtpm); val.IsValid() && !isEmptyValue(val) { - transformed["enableVtpm"] = transformedEnableVtpm - } - - return transformed, nil -} - -func expandNotebooksInstanceShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceShieldedInstanceConfigEnableSecureBoot(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceShieldedInstanceConfigEnableVtpm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceInstallGpuDriver(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceCustomGpuDriverPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceBootDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceBootDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceDataDiskType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceDataDiskSizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNoRemoveDataDisk(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceDiskEncryption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceKmsKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNoPublicIp(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNoProxyAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceSubnet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNotebooksInstanceTags(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceMetadata(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandNotebooksInstanceVmImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedProject, err := expandNotebooksInstanceVmImageProject(original["project"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedProject); val.IsValid() && !isEmptyValue(val) { - transformed["project"] = transformedProject - } - - transformedImageFamily, err := expandNotebooksInstanceVmImageImageFamily(original["image_family"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedImageFamily); val.IsValid() && !isEmptyValue(val) { - transformed["imageFamily"] = transformedImageFamily - } - - transformedImageName, err := expandNotebooksInstanceVmImageImageName(original["image_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedImageName); val.IsValid() && !isEmptyValue(val) { - transformed["imageName"] = transformedImageName - } - - return transformed, nil -} - -func expandNotebooksInstanceVmImageProject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceVmImageImageFamily(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceVmImageImageName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceContainerImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRepository, err := expandNotebooksInstanceContainerImageRepository(original["repository"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedRepository); val.IsValid() && !isEmptyValue(val) { - transformed["repository"] = transformedRepository - } - - transformedTag, err := expandNotebooksInstanceContainerImageTag(original["tag"], d, config) - if err != nil { - return nil, err - } else if val := resource_notebooks_instance_reflect.ValueOf(transformedTag); val.IsValid() && !isEmptyValue(val) { - transformed["tag"] = transformedTag - } - - return transformed, nil -} - -func expandNotebooksInstanceContainerImageRepository(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNotebooksInstanceContainerImageTag(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceNotebooksLocation() *resource_notebooks_location_schema.Resource { - return &resource_notebooks_location_schema.Resource{ - Create: resourceNotebooksLocationCreate, - Read: resourceNotebooksLocationRead, - Update: resourceNotebooksLocationUpdate, - Delete: resourceNotebooksLocationDelete, - - Importer: &resource_notebooks_location_schema.ResourceImporter{ - State: resourceNotebooksLocationImport, - }, - - Timeouts: &resource_notebooks_location_schema.ResourceTimeout{ - Create: resource_notebooks_location_schema.DefaultTimeout(4 * resource_notebooks_location_time.Minute), - Update: resource_notebooks_location_schema.DefaultTimeout(4 * resource_notebooks_location_time.Minute), - Delete: resource_notebooks_location_schema.DefaultTimeout(4 * resource_notebooks_location_time.Minute), - }, - - Schema: map[string]*resource_notebooks_location_schema.Schema{ - "name": { - Type: resource_notebooks_location_schema.TypeString, - Optional: true, - Description: `Name of the Location resource.`, - }, - "project": { - Type: resource_notebooks_location_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_notebooks_location_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceNotebooksLocationCreate(d *resource_notebooks_location_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandNotebooksLocationName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_notebooks_location_reflect.ValueOf(nameProp)) && (ok || !resource_notebooks_location_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations") - if err != nil { - return err - } - - resource_notebooks_location_log.Printf("[DEBUG] Creating new Location: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_location_schema.TimeoutCreate)) - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error creating Location: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{name}}") - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = notebooksOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Location", userAgent, - d.Timeout(resource_notebooks_location_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_notebooks_location_fmt.Errorf("Error waiting to create Location: %s", err) - } - - if err := d.Set("name", flattenNotebooksLocationName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{name}}") - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_notebooks_location_log.Printf("[DEBUG] Finished creating Location %q: %#v", d.Id(), res) - - return resourceNotebooksLocationRead(d, meta) -} - -func resourceNotebooksLocationRead(d *resource_notebooks_location_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_notebooks_location_fmt.Sprintf("NotebooksLocation %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_notebooks_location_fmt.Errorf("Error reading Location: %s", err) - } - - if err := d.Set("name", flattenNotebooksLocationName(res["name"], d, config)); err != nil { - return resource_notebooks_location_fmt.Errorf("Error reading Location: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_notebooks_location_fmt.Errorf("Error reading Location: %s", err) - } - - return nil -} - -func resourceNotebooksLocationUpdate(d *resource_notebooks_location_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - nameProp, err := expandNotebooksLocationName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_notebooks_location_reflect.ValueOf(v)) && (ok || !resource_notebooks_location_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") - if err != nil { - return err - } - - resource_notebooks_location_log.Printf("[DEBUG] Updating Location %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_location_schema.TimeoutUpdate)) - - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error updating Location %q: %s", d.Id(), err) - } else { - resource_notebooks_location_log.Printf("[DEBUG] Finished updating Location %q: %#v", d.Id(), res) - } - - err = notebooksOperationWaitTime( - config, res, project, "Updating Location", userAgent, - d.Timeout(resource_notebooks_location_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceNotebooksLocationRead(d, meta) -} - -func resourceNotebooksLocationDelete(d *resource_notebooks_location_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_notebooks_location_fmt.Errorf("Error fetching project for Location: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{NotebooksBasePath}}projects/{{project}}/locations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_notebooks_location_log.Printf("[DEBUG] Deleting Location %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_notebooks_location_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Location") - } - - err = notebooksOperationWaitTime( - config, res, project, "Deleting Location", userAgent, - d.Timeout(resource_notebooks_location_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_notebooks_location_log.Printf("[DEBUG] Finished deleting Location %q: %#v", d.Id(), res) - return nil -} - -func resourceNotebooksLocationImport(d *resource_notebooks_location_schema.ResourceData, meta interface{}) ([]*resource_notebooks_location_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{name}}") - if err != nil { - return nil, resource_notebooks_location_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_notebooks_location_schema.ResourceData{d}, nil -} - -func flattenNotebooksLocationName(v interface{}, d *resource_notebooks_location_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandNotebooksLocationName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceOrgPolicyPolicy() *resource_org_policy_policy_schema.Resource { - return &resource_org_policy_policy_schema.Resource{ - Create: resourceOrgPolicyPolicyCreate, - Read: resourceOrgPolicyPolicyRead, - Update: resourceOrgPolicyPolicyUpdate, - Delete: resourceOrgPolicyPolicyDelete, - - Importer: &resource_org_policy_policy_schema.ResourceImporter{ - State: resourceOrgPolicyPolicyImport, - }, - - Timeouts: &resource_org_policy_policy_schema.ResourceTimeout{ - Create: resource_org_policy_policy_schema.DefaultTimeout(10 * resource_org_policy_policy_time.Minute), - Update: resource_org_policy_policy_schema.DefaultTimeout(10 * resource_org_policy_policy_time.Minute), - Delete: resource_org_policy_policy_schema.DefaultTimeout(10 * resource_org_policy_policy_time.Minute), - }, - - Schema: map[string]*resource_org_policy_policy_schema.Schema{ - "name": { - Type: resource_org_policy_policy_schema.TypeString, - Required: true, - ForceNew: true, - Description: "Immutable. The resource name of the Policy. Must be one of the following forms, where constraint_name is the name of the constraint which this Policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, \"projects/123/policies/compute.disableSerialPortAccess\". Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number.", - }, - - "parent": { - Type: resource_org_policy_policy_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The parent of the resource.", - }, - - "spec": { - Type: resource_org_policy_policy_schema.TypeList, - Optional: true, - Description: "Basic information about the Organization Policy.", - MaxItems: 1, - Elem: OrgPolicyPolicySpecSchema(), - }, - }, - } -} - -func OrgPolicyPolicySpecSchema() *resource_org_policy_policy_schema.Resource { - return &resource_org_policy_policy_schema.Resource{ - Schema: map[string]*resource_org_policy_policy_schema.Schema{ - "inherit_from_parent": { - Type: resource_org_policy_policy_schema.TypeBool, - Optional: true, - Description: "Determines the inheritance behavior for this `Policy`. If `inherit_from_parent` is true, PolicyRules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this Policy becomes the new root for evaluation. This field can be set only for Policies which configure list constraints.", - }, - - "reset": { - Type: resource_org_policy_policy_schema.TypeBool, - Optional: true, - Description: "Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific `Constraint` at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false.", - }, - - "rules": { - Type: resource_org_policy_policy_schema.TypeList, - Optional: true, - Description: "Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set `enforced` to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.", - Elem: OrgPolicyPolicySpecRulesSchema(), - }, - - "etag": { - Type: resource_org_policy_policy_schema.TypeString, - Computed: true, - Description: "An opaque tag indicating the current version of the `Policy`, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the `Policy` is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current `Policy` to use when executing a read-modify-write loop. When the `Policy` is returned from a `GetEffectivePolicy` request, the `etag` will be unset.", - }, - - "update_time": { - Type: resource_org_policy_policy_schema.TypeString, - Computed: true, - Description: "Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that `Policy`.", - }, - }, - } -} - -func OrgPolicyPolicySpecRulesSchema() *resource_org_policy_policy_schema.Resource { - return &resource_org_policy_policy_schema.Resource{ - Schema: map[string]*resource_org_policy_policy_schema.Schema{ - "allow_all": { - Type: resource_org_policy_policy_schema.TypeString, - Optional: true, - Description: "Setting this to true means that all values are allowed. This field can be set only in Policies for list constraints.", - }, - - "condition": { - Type: resource_org_policy_policy_schema.TypeList, - Optional: true, - Description: "A condition which determines whether this rule is used in the evaluation of the policy. When set, the `expression` field in the `Expr' must include from 1 to 10 subexpressions, joined by the \"||\" or \"&&\" operators. Each subexpression must be of the form \"resource.matchTag('/tag_key_short_name, 'tag_value_short_name')\". or \"resource.matchTagId('tagKeys/key_id', 'tagValues/value_id')\". where key_name and value_name are the resource names for Label Keys and Values. These names are available from the Tag Manager Service. An example expression is: \"resource.matchTag('123456789/environment, 'prod')\". or \"resource.matchTagId('tagKeys/123', 'tagValues/456')\".", - MaxItems: 1, - Elem: OrgPolicyPolicySpecRulesConditionSchema(), - }, - - "deny_all": { - Type: resource_org_policy_policy_schema.TypeString, - Optional: true, - Description: "Setting this to true means that all values are denied. This field can be set only in Policies for list constraints.", - }, - - "enforce": { - Type: resource_org_policy_policy_schema.TypeString, - Optional: true, - Description: "If `true`, then the `Policy` is enforced. If `false`, then any configuration is acceptable. This field can be set only in Policies for boolean constraints.", - }, - - "values": { - Type: resource_org_policy_policy_schema.TypeList, - Optional: true, - Description: "List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints.", - MaxItems: 1, - Elem: OrgPolicyPolicySpecRulesValuesSchema(), - }, - }, - } -} - -func OrgPolicyPolicySpecRulesConditionSchema() *resource_org_policy_policy_schema.Resource { - return &resource_org_policy_policy_schema.Resource{ - Schema: map[string]*resource_org_policy_policy_schema.Schema{ - "description": { - Type: resource_org_policy_policy_schema.TypeString, - Optional: true, - Description: "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", - }, - - "expression": { - Type: resource_org_policy_policy_schema.TypeString, - Optional: true, - Description: "Textual representation of an expression in Common Expression Language syntax.", - }, - - "location": { - Type: resource_org_policy_policy_schema.TypeString, - Optional: true, - Description: "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", - }, - - "title": { - Type: resource_org_policy_policy_schema.TypeString, - Optional: true, - Description: "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", - }, - }, - } -} - -func OrgPolicyPolicySpecRulesValuesSchema() *resource_org_policy_policy_schema.Resource { - return &resource_org_policy_policy_schema.Resource{ - Schema: map[string]*resource_org_policy_policy_schema.Schema{ - "allowed_values": { - Type: resource_org_policy_policy_schema.TypeList, - Optional: true, - Description: "List of values allowed at this resource.", - Elem: &resource_org_policy_policy_schema.Schema{Type: resource_org_policy_policy_schema.TypeString}, - }, - - "denied_values": { - Type: resource_org_policy_policy_schema.TypeList, - Optional: true, - Description: "List of values denied at this resource.", - Elem: &resource_org_policy_policy_schema.Schema{Type: resource_org_policy_policy_schema.TypeString}, - }, - }, - } -} - -func resourceOrgPolicyPolicyCreate(d *resource_org_policy_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_org_policy_policy_orgpolicyorgpolicy.Policy{ - Name: resource_org_policy_policy_dcldcl.String(d.Get("name").(string)), - Parent: resource_org_policy_policy_dcldcl.String(d.Get("parent").(string)), - Spec: expandOrgPolicyPolicySpec(d.Get("spec")), - } - - id, err := obj.ID() - if err != nil { - return resource_org_policy_policy_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(resource_org_policy_policy_schema.TimeoutCreate)) - res, err := client.ApplyPolicy(resource_org_policy_policy_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_org_policy_policy_dcldcl.DiffAfterApplyError); ok { - resource_org_policy_policy_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_org_policy_policy_fmt.Errorf("Error creating Policy: %s", err) - } - - resource_org_policy_policy_log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) - - return resourceOrgPolicyPolicyRead(d, meta) -} - -func resourceOrgPolicyPolicyRead(d *resource_org_policy_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_org_policy_policy_orgpolicyorgpolicy.Policy{ - Name: resource_org_policy_policy_dcldcl.String(d.Get("name").(string)), - Parent: resource_org_policy_policy_dcldcl.String(d.Get("parent").(string)), - Spec: expandOrgPolicyPolicySpec(d.Get("spec")), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(resource_org_policy_policy_schema.TimeoutRead)) - res, err := client.GetPolicy(resource_org_policy_policy_context.Background(), obj) - if err != nil { - resourceName := resource_org_policy_policy_fmt.Sprintf("OrgPolicyPolicy %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("name", res.Name); err != nil { - return resource_org_policy_policy_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("parent", res.Parent); err != nil { - return resource_org_policy_policy_fmt.Errorf("error setting parent in state: %s", err) - } - if err = d.Set("spec", flattenOrgPolicyPolicySpec(res.Spec)); err != nil { - return resource_org_policy_policy_fmt.Errorf("error setting spec in state: %s", err) - } - - return nil -} - -func resourceOrgPolicyPolicyUpdate(d *resource_org_policy_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_org_policy_policy_orgpolicyorgpolicy.Policy{ - Name: resource_org_policy_policy_dcldcl.String(d.Get("name").(string)), - Parent: resource_org_policy_policy_dcldcl.String(d.Get("parent").(string)), - Spec: expandOrgPolicyPolicySpec(d.Get("spec")), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(resource_org_policy_policy_schema.TimeoutUpdate)) - res, err := client.ApplyPolicy(resource_org_policy_policy_context.Background(), obj, directive...) - - if _, ok := err.(resource_org_policy_policy_dcldcl.DiffAfterApplyError); ok { - resource_org_policy_policy_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_org_policy_policy_fmt.Errorf("Error updating Policy: %s", err) - } - - resource_org_policy_policy_log.Printf("[DEBUG] Finished creating Policy %q: %#v", d.Id(), res) - - return resourceOrgPolicyPolicyRead(d, meta) -} - -func resourceOrgPolicyPolicyDelete(d *resource_org_policy_policy_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - obj := &resource_org_policy_policy_orgpolicyorgpolicy.Policy{ - Name: resource_org_policy_policy_dcldcl.String(d.Get("name").(string)), - Parent: resource_org_policy_policy_dcldcl.String(d.Get("parent").(string)), - Spec: expandOrgPolicyPolicySpec(d.Get("spec")), - } - - resource_org_policy_policy_log.Printf("[DEBUG] Deleting Policy %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLOrgPolicyClient(config, userAgent, billingProject, d.Timeout(resource_org_policy_policy_schema.TimeoutDelete)) - if err := client.DeletePolicy(resource_org_policy_policy_context.Background(), obj); err != nil { - return resource_org_policy_policy_fmt.Errorf("Error deleting Policy: %s", err) - } - - resource_org_policy_policy_log.Printf("[DEBUG] Finished deleting Policy %q", d.Id()) - return nil -} - -func resourceOrgPolicyPolicyImport(d *resource_org_policy_policy_schema.ResourceData, meta interface{}) ([]*resource_org_policy_policy_schema.ResourceData, error) { - config := meta.(*Config) - - if err := resourceOrgPolicyPolicyCustomImport(d, config); err != nil { - return nil, resource_org_policy_policy_fmt.Errorf("error encountered in import: %v", err) - } - - return []*resource_org_policy_policy_schema.ResourceData{d}, nil -} - -func expandOrgPolicyPolicySpec(o interface{}) *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpec { - if o == nil { - return resource_org_policy_policy_orgpolicyorgpolicy.EmptyPolicySpec - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_org_policy_policy_orgpolicyorgpolicy.EmptyPolicySpec - } - obj := objArr[0].(map[string]interface{}) - return &resource_org_policy_policy_orgpolicyorgpolicy.PolicySpec{ - InheritFromParent: resource_org_policy_policy_dcldcl.Bool(obj["inherit_from_parent"].(bool)), - Reset: resource_org_policy_policy_dcldcl.Bool(obj["reset"].(bool)), - Rules: expandOrgPolicyPolicySpecRulesArray(obj["rules"]), - } -} - -func flattenOrgPolicyPolicySpec(obj *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpec) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "inherit_from_parent": obj.InheritFromParent, - "reset": obj.Reset, - "rules": flattenOrgPolicyPolicySpecRulesArray(obj.Rules), - "etag": obj.Etag, - "update_time": obj.UpdateTime, - } - - return []interface{}{transformed} - -} - -func expandOrgPolicyPolicySpecRulesArray(o interface{}) []resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules { - if o == nil { - return make([]resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules, 0) - } - - items := make([]resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules, 0, len(objs)) - for _, item := range objs { - i := expandOrgPolicyPolicySpecRules(item) - items = append(items, *i) - } - - return items -} - -func expandOrgPolicyPolicySpecRules(o interface{}) *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules { - if o == nil { - return resource_org_policy_policy_orgpolicyorgpolicy.EmptyPolicySpecRules - } - - obj := o.(map[string]interface{}) - return &resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules{ - AllowAll: expandEnumBool(obj["allow_all"].(string)), - Condition: expandOrgPolicyPolicySpecRulesCondition(obj["condition"]), - DenyAll: expandEnumBool(obj["deny_all"].(string)), - Enforce: expandEnumBool(obj["enforce"].(string)), - Values: expandOrgPolicyPolicySpecRulesValues(obj["values"]), - } -} - -func flattenOrgPolicyPolicySpecRulesArray(objs []resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenOrgPolicyPolicySpecRules(&item) - items = append(items, i) - } - - return items -} - -func flattenOrgPolicyPolicySpecRules(obj *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRules) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_all": flattenEnumBool(obj.AllowAll), - "condition": flattenOrgPolicyPolicySpecRulesCondition(obj.Condition), - "deny_all": flattenEnumBool(obj.DenyAll), - "enforce": flattenEnumBool(obj.Enforce), - "values": flattenOrgPolicyPolicySpecRulesValues(obj.Values), - } - - return transformed - -} - -func expandOrgPolicyPolicySpecRulesCondition(o interface{}) *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRulesCondition { - if o == nil { - return resource_org_policy_policy_orgpolicyorgpolicy.EmptyPolicySpecRulesCondition - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_org_policy_policy_orgpolicyorgpolicy.EmptyPolicySpecRulesCondition - } - obj := objArr[0].(map[string]interface{}) - return &resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRulesCondition{ - Description: resource_org_policy_policy_dcldcl.String(obj["description"].(string)), - Expression: resource_org_policy_policy_dcldcl.String(obj["expression"].(string)), - Location: resource_org_policy_policy_dcldcl.String(obj["location"].(string)), - Title: resource_org_policy_policy_dcldcl.String(obj["title"].(string)), - } -} - -func flattenOrgPolicyPolicySpecRulesCondition(obj *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRulesCondition) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "description": obj.Description, - "expression": obj.Expression, - "location": obj.Location, - "title": obj.Title, - } - - return []interface{}{transformed} - -} - -func expandOrgPolicyPolicySpecRulesValues(o interface{}) *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRulesValues { - if o == nil { - return resource_org_policy_policy_orgpolicyorgpolicy.EmptyPolicySpecRulesValues - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_org_policy_policy_orgpolicyorgpolicy.EmptyPolicySpecRulesValues - } - obj := objArr[0].(map[string]interface{}) - return &resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRulesValues{ - AllowedValues: expandStringArray(obj["allowed_values"]), - DeniedValues: expandStringArray(obj["denied_values"]), - } -} - -func flattenOrgPolicyPolicySpecRulesValues(obj *resource_org_policy_policy_orgpolicyorgpolicy.PolicySpecRulesValues) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allowed_values": obj.AllowedValues, - "denied_values": obj.DeniedValues, - } - - return []interface{}{transformed} - -} - -func resourceAccessApprovalOrganizationSettings() *resource_organization_access_approval_settings_schema.Resource { - return &resource_organization_access_approval_settings_schema.Resource{ - Create: resourceAccessApprovalOrganizationSettingsCreate, - Read: resourceAccessApprovalOrganizationSettingsRead, - Update: resourceAccessApprovalOrganizationSettingsUpdate, - Delete: resourceAccessApprovalOrganizationSettingsDelete, - - Importer: &resource_organization_access_approval_settings_schema.ResourceImporter{ - State: resourceAccessApprovalOrganizationSettingsImport, - }, - - Timeouts: &resource_organization_access_approval_settings_schema.ResourceTimeout{ - Create: resource_organization_access_approval_settings_schema.DefaultTimeout(4 * resource_organization_access_approval_settings_time.Minute), - Update: resource_organization_access_approval_settings_schema.DefaultTimeout(4 * resource_organization_access_approval_settings_time.Minute), - Delete: resource_organization_access_approval_settings_schema.DefaultTimeout(4 * resource_organization_access_approval_settings_time.Minute), - }, - - Schema: map[string]*resource_organization_access_approval_settings_schema.Schema{ - "enrolled_services": { - Type: resource_organization_access_approval_settings_schema.TypeSet, - Required: true, - Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. -Access requests for the resource given by name against any of these services contained here will be required -to have explicit approval. Enrollment can be done for individual services. - -A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, - Elem: accessapprovalOrganizationSettingsEnrolledServicesSchema(), - Set: accessApprovalEnrolledServicesHash, - }, - "organization_id": { - Type: resource_organization_access_approval_settings_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the organization of the access approval settings.`, - }, - "notification_emails": { - Type: resource_organization_access_approval_settings_schema.TypeSet, - Computed: true, - Optional: true, - Description: `A list of email addresses to which notifications relating to approval requests should be sent. -Notifications relating to a resource will be sent to all emails in the settings of ancestor -resources of that resource. A maximum of 50 email addresses are allowed.`, - MaxItems: 50, - Elem: &resource_organization_access_approval_settings_schema.Schema{ - Type: resource_organization_access_approval_settings_schema.TypeString, - }, - Set: resource_organization_access_approval_settings_schema.HashString, - }, - "enrolled_ancestor": { - Type: resource_organization_access_approval_settings_schema.TypeBool, - Computed: true, - Description: `This field will always be unset for the organization since organizations do not have ancestors.`, - }, - "name": { - Type: resource_organization_access_approval_settings_schema.TypeString, - Computed: true, - Description: `The resource name of the settings. Format is "organizations/{organization_id}/accessApprovalSettings"`, - }, - }, - UseJSONNumber: true, - } -} - -func accessapprovalOrganizationSettingsEnrolledServicesSchema() *resource_organization_access_approval_settings_schema.Resource { - return &resource_organization_access_approval_settings_schema.Resource{ - Schema: map[string]*resource_organization_access_approval_settings_schema.Schema{ - "cloud_product": { - Type: resource_organization_access_approval_settings_schema.TypeString, - Required: true, - Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): - all - appengine.googleapis.com - bigquery.googleapis.com - bigtable.googleapis.com - cloudkms.googleapis.com - compute.googleapis.com - dataflow.googleapis.com - iam.googleapis.com - pubsub.googleapis.com - storage.googleapis.com`, - }, - "enrollment_level": { - Type: resource_organization_access_approval_settings_schema.TypeString, - Optional: true, - ValidateFunc: resource_organization_access_approval_settings_validation.StringInSlice([]string{"BLOCK_ALL", ""}, false), - Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, - Default: "BLOCK_ALL", - }, - }, - } -} - -func resourceAccessApprovalOrganizationSettingsCreate(d *resource_organization_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalOrganizationSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(resource_organization_access_approval_settings_reflect.ValueOf(notificationEmailsProp)) && (ok || !resource_organization_access_approval_settings_reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalOrganizationSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(resource_organization_access_approval_settings_reflect.ValueOf(enrolledServicesProp)) && (ok || !resource_organization_access_approval_settings_reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_organization_access_approval_settings_log.Printf("[DEBUG] Creating new OrganizationSettings: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_organization_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_organization_access_approval_settings_schema.TimeoutCreate)) - if err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error creating OrganizationSettings: %s", err) - } - if err := d.Set("name", flattenAccessApprovalOrganizationSettingsName(res["name"], d, config)); err != nil { - return resource_organization_access_approval_settings_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_organization_access_approval_settings_log.Printf("[DEBUG] Finished creating OrganizationSettings %q: %#v", d.Id(), res) - - return resourceAccessApprovalOrganizationSettingsRead(d, meta) -} - -func resourceAccessApprovalOrganizationSettingsRead(d *resource_organization_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_organization_access_approval_settings_fmt.Sprintf("AccessApprovalOrganizationSettings %q", d.Id())) - } - - if err := d.Set("name", flattenAccessApprovalOrganizationSettingsName(res["name"], d, config)); err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("notification_emails", flattenAccessApprovalOrganizationSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("enrolled_services", flattenAccessApprovalOrganizationSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - if err := d.Set("enrolled_ancestor", flattenAccessApprovalOrganizationSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error reading OrganizationSettings: %s", err) - } - - return nil -} - -func resourceAccessApprovalOrganizationSettingsUpdate(d *resource_organization_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalOrganizationSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(resource_organization_access_approval_settings_reflect.ValueOf(v)) && (ok || !resource_organization_access_approval_settings_reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalOrganizationSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(resource_organization_access_approval_settings_reflect.ValueOf(v)) && (ok || !resource_organization_access_approval_settings_reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_organization_access_approval_settings_log.Printf("[DEBUG] Updating OrganizationSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_organization_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_organization_access_approval_settings_schema.TimeoutUpdate)) - - if err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error updating OrganizationSettings %q: %s", d.Id(), err) - } else { - resource_organization_access_approval_settings_log.Printf("[DEBUG] Finished updating OrganizationSettings %q: %#v", d.Id(), res) - } - - return resourceAccessApprovalOrganizationSettingsRead(d, meta) -} - -func resourceAccessApprovalOrganizationSettingsDelete(d *resource_organization_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["notificationEmails"] = []string{} - obj["enrolledServices"] = []string{} - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_organization_access_approval_settings_log.Printf("[DEBUG] Emptying OrganizationSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - updateMask = append(updateMask, "notificationEmails") - updateMask = append(updateMask, "enrolledServices") - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_organization_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - res, err := sendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(resource_organization_access_approval_settings_schema.TimeoutUpdate)) - - if err != nil { - return resource_organization_access_approval_settings_fmt.Errorf("Error emptying OrganizationSettings %q: %s", d.Id(), err) - } else { - resource_organization_access_approval_settings_log.Printf("[DEBUG] Finished emptying OrganizationSettings %q: %#v", d.Id(), res) - } - - return nil -} - -func resourceAccessApprovalOrganizationSettingsImport(d *resource_organization_access_approval_settings_schema.ResourceData, meta interface{}) ([]*resource_organization_access_approval_settings_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "organizations/(?P[^/]+)/accessApprovalSettings", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "organizations/{{organization_id}}/accessApprovalSettings") - if err != nil { - return nil, resource_organization_access_approval_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_organization_access_approval_settings_schema.ResourceData{d}, nil -} - -func flattenAccessApprovalOrganizationSettingsName(v interface{}, d *resource_organization_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsNotificationEmails(v interface{}, d *resource_organization_access_approval_settings_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_organization_access_approval_settings_schema.NewSet(resource_organization_access_approval_settings_schema.HashString, v.([]interface{})) -} - -func flattenAccessApprovalOrganizationSettingsEnrolledServices(v interface{}, d *resource_organization_access_approval_settings_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_organization_access_approval_settings_schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "cloud_product": flattenAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), - "enrollment_level": flattenAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), - }) - } - return transformed -} - -func flattenAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(v interface{}, d *resource_organization_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *resource_organization_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalOrganizationSettingsEnrolledAncestor(v interface{}, d *resource_organization_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessApprovalOrganizationSettingsNotificationEmails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_organization_access_approval_settings_schema.Set).List() - return v, nil -} - -func expandAccessApprovalOrganizationSettingsEnrolledServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_organization_access_approval_settings_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudProduct, err := expandAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) - if err != nil { - return nil, err - } else if val := resource_organization_access_approval_settings_reflect.ValueOf(transformedCloudProduct); val.IsValid() && !isEmptyValue(val) { - transformed["cloudProduct"] = transformedCloudProduct - } - - transformedEnrollmentLevel, err := expandAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_organization_access_approval_settings_reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !isEmptyValue(val) { - transformed["enrollmentLevel"] = transformedEnrollmentLevel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessApprovalOrganizationSettingsEnrolledServicesCloudProduct(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalOrganizationSettingsEnrolledServicesEnrollmentLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceOSConfigPatchDeployment() *resource_os_config_patch_deployment_schema.Resource { - return &resource_os_config_patch_deployment_schema.Resource{ - Create: resourceOSConfigPatchDeploymentCreate, - Read: resourceOSConfigPatchDeploymentRead, - Delete: resourceOSConfigPatchDeploymentDelete, - - Importer: &resource_os_config_patch_deployment_schema.ResourceImporter{ - State: resourceOSConfigPatchDeploymentImport, - }, - - Timeouts: &resource_os_config_patch_deployment_schema.ResourceTimeout{ - Create: resource_os_config_patch_deployment_schema.DefaultTimeout(4 * resource_os_config_patch_deployment_time.Minute), - Delete: resource_os_config_patch_deployment_schema.DefaultTimeout(4 * resource_os_config_patch_deployment_time.Minute), - }, - - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "instance_filter": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Required: true, - ForceNew: true, - Description: `VM instances to patch.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "all": { - Type: resource_os_config_patch_deployment_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Target all VM instances in the project. If true, no other criteria is permitted.`, - AtLeastOneOf: []string{"instance_filter.0.all", "instance_filter.0.group_labels", "instance_filter.0.zones", "instance_filter.0.instances", "instance_filter.0.instance_name_prefixes"}, - }, - "group_labels": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Targets VM instances matching ANY of these GroupLabels. This allows targeting of disparate groups of VM instances.`, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "labels": { - Type: resource_os_config_patch_deployment_schema.TypeMap, - Required: true, - ForceNew: true, - Description: `Compute Engine instance labels that must be present for a VM instance to be targeted by this filter`, - Elem: &resource_os_config_patch_deployment_schema.Schema{Type: resource_os_config_patch_deployment_schema.TypeString}, - }, - }, - }, - AtLeastOneOf: []string{"instance_filter.0.all", "instance_filter.0.group_labels", "instance_filter.0.zones", "instance_filter.0.instances", "instance_filter.0.instance_name_prefixes"}, - }, - "instance_name_prefixes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Targets VMs whose name starts with one of these prefixes. Similar to labels, this is another way to group -VMs when targeting configs, for example prefix="prod-".`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"instance_filter.0.all", "instance_filter.0.group_labels", "instance_filter.0.zones", "instance_filter.0.instances", "instance_filter.0.instance_name_prefixes"}, - }, - "instances": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Targets any of the VM instances specified. Instances are specified by their URI in the 'form zones/{{zone}}/instances/{{instance_name}}', -'projects/{{project_id}}/zones/{{zone}}/instances/{{instance_name}}', or -'https://www.googleapis.com/compute/v1/projects/{{project_id}}/zones/{{zone}}/instances/{{instance_name}}'`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"instance_filter.0.all", "instance_filter.0.group_labels", "instance_filter.0.zones", "instance_filter.0.instances", "instance_filter.0.instance_name_prefixes"}, - }, - "zones": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Targets VM instances in ANY of these zones. Leave empty to target VM instances in any zone.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"instance_filter.0.all", "instance_filter.0.group_labels", "instance_filter.0.zones", "instance_filter.0.instances", "instance_filter.0.instance_name_prefixes"}, - }, - }, - }, - }, - "patch_deployment_id": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`(?:(?:[-a-z0-9]{1,63}\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))`), - Description: `A name for the patch deployment in the project. When creating a name the following rules apply: -* Must contain only lowercase letters, numbers, and hyphens. -* Must start with a letter. -* Must be between 1-63 characters. -* Must end with a number or a letter. -* Must be unique within the project.`, - }, - "description": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Description of the patch deployment. Length of the description is limited to 1024 characters.`, - }, - "duration": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Duration of the patch. After the duration ends, the patch times out. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"`, - }, - "one_time_schedule": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Schedule a one-time execution.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "execute_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The desired patch job execution time. A timestamp in RFC3339 UTC "Zulu" format, -accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - ExactlyOneOf: []string{"one_time_schedule", "recurring_schedule"}, - }, - "patch_config": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Patch configuration that is applied.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "apt": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Apt update settings. Use this setting to override the default apt patch rules.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "excludes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `List of packages to exclude from update. These packages will be excluded.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.apt.0.type", "patch_config.0.apt.0.excludes", "patch_config.0.apt.0.exclusive_packages"}, - }, - "exclusive_packages": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An exclusive list of packages to be updated. These are the only packages that will be updated. -If these packages are not installed, they will be ignored. This field cannot be specified with -any other patch configuration fields.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.apt.0.type", "patch_config.0.apt.0.excludes", "patch_config.0.apt.0.exclusive_packages"}, - }, - "type": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"DIST", "UPGRADE", ""}, false), - Description: `By changing the type to DIST, the patching is performed using apt-get dist-upgrade instead. Possible values: ["DIST", "UPGRADE"]`, - AtLeastOneOf: []string{"patch_config.0.apt.0.type", "patch_config.0.apt.0.excludes", "patch_config.0.apt.0.exclusive_packages"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - "goo": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `goo update settings. Use this setting to override the default goo patch rules.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "enabled": { - Type: resource_os_config_patch_deployment_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `goo update settings. Use this setting to override the default goo patch rules.`, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - "post_step": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The ExecStep to run after the patch update.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "linux_exec_step_config": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The ExecStepConfig for all Linux VMs targeted by the PatchJob.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "allowed_success_codes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Defaults to [0]. A list of possible return values that the execution can return to indicate a success.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeInt, - }, - }, - "gcs_object": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A Cloud Storage object containing the executable.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "bucket": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Bucket of the Cloud Storage object.`, - }, - "generation_number": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change.`, - }, - "object": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the Cloud Storage object.`, - }, - }, - }, - ExactlyOneOf: []string{"patch_config.0.post_step.0.linux_exec_step_config.0.local_path", "patch_config.0.post_step.0.linux_exec_step_config.0.gcs_object"}, - }, - "interpreter": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"SHELL", "POWERSHELL", ""}, false), - Description: `The script interpreter to use to run the script. If no interpreter is specified the script will -be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, - }, - "local_path": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An absolute path to the executable on the VM.`, - ExactlyOneOf: []string{"patch_config.0.post_step.0.linux_exec_step_config.0.local_path", "patch_config.0.post_step.0.linux_exec_step_config.0.gcs_object"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.post_step.0.linux_exec_step_config", "patch_config.0.post_step.0.windows_exec_step_config"}, - }, - "windows_exec_step_config": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The ExecStepConfig for all Windows VMs targeted by the PatchJob.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "allowed_success_codes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Defaults to [0]. A list of possible return values that the execution can return to indicate a success.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeInt, - }, - }, - "gcs_object": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A Cloud Storage object containing the executable.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "bucket": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Bucket of the Cloud Storage object.`, - }, - "generation_number": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change.`, - }, - "object": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the Cloud Storage object.`, - }, - }, - }, - ExactlyOneOf: []string{"patch_config.0.post_step.0.windows_exec_step_config.0.local_path", "patch_config.0.post_step.0.windows_exec_step_config.0.gcs_object"}, - }, - "interpreter": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"SHELL", "POWERSHELL", ""}, false), - Description: `The script interpreter to use to run the script. If no interpreter is specified the script will -be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, - }, - "local_path": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An absolute path to the executable on the VM.`, - ExactlyOneOf: []string{"patch_config.0.post_step.0.windows_exec_step_config.0.local_path", "patch_config.0.post_step.0.windows_exec_step_config.0.gcs_object"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.post_step.0.linux_exec_step_config", "patch_config.0.post_step.0.windows_exec_step_config"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - "pre_step": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The ExecStep to run before the patch update.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "linux_exec_step_config": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The ExecStepConfig for all Linux VMs targeted by the PatchJob.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "allowed_success_codes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Defaults to [0]. A list of possible return values that the execution can return to indicate a success.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeInt, - }, - }, - "gcs_object": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A Cloud Storage object containing the executable.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "bucket": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Bucket of the Cloud Storage object.`, - }, - "generation_number": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change.`, - }, - "object": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the Cloud Storage object.`, - }, - }, - }, - ExactlyOneOf: []string{"patch_config.0.pre_step.0.linux_exec_step_config.0.local_path", "patch_config.0.pre_step.0.linux_exec_step_config.0.gcs_object"}, - }, - "interpreter": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"SHELL", "POWERSHELL", ""}, false), - Description: `The script interpreter to use to run the script. If no interpreter is specified the script will -be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, - }, - "local_path": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An absolute path to the executable on the VM.`, - ExactlyOneOf: []string{"patch_config.0.pre_step.0.linux_exec_step_config.0.local_path", "patch_config.0.pre_step.0.linux_exec_step_config.0.gcs_object"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.pre_step.0.linux_exec_step_config", "patch_config.0.pre_step.0.windows_exec_step_config"}, - }, - "windows_exec_step_config": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The ExecStepConfig for all Windows VMs targeted by the PatchJob.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "allowed_success_codes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Defaults to [0]. A list of possible return values that the execution can return to indicate a success.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeInt, - }, - }, - "gcs_object": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `A Cloud Storage object containing the executable.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "bucket": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Bucket of the Cloud Storage object.`, - }, - "generation_number": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Generation number of the Cloud Storage object. This is used to ensure that the ExecStep specified by this PatchJob does not change.`, - }, - "object": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the Cloud Storage object.`, - }, - }, - }, - ExactlyOneOf: []string{"patch_config.0.pre_step.0.windows_exec_step_config.0.local_path", "patch_config.0.pre_step.0.windows_exec_step_config.0.gcs_object"}, - }, - "interpreter": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"SHELL", "POWERSHELL", ""}, false), - Description: `The script interpreter to use to run the script. If no interpreter is specified the script will -be executed directly, which will likely only succeed for scripts with shebang lines. Possible values: ["SHELL", "POWERSHELL"]`, - }, - "local_path": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `An absolute path to the executable on the VM.`, - ExactlyOneOf: []string{"patch_config.0.pre_step.0.windows_exec_step_config.0.local_path", "patch_config.0.pre_step.0.windows_exec_step_config.0.gcs_object"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.pre_step.0.linux_exec_step_config", "patch_config.0.pre_step.0.windows_exec_step_config"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - "reboot_config": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"DEFAULT", "ALWAYS", "NEVER", ""}, false), - Description: `Post-patch reboot settings. Possible values: ["DEFAULT", "ALWAYS", "NEVER"]`, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - "windows_update": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Windows update settings. Use this setting to override the default Windows patch rules.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "classifications": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Only apply updates of these windows update classifications. If empty, all updates are applied. Possible values: ["CRITICAL", "SECURITY", "DEFINITION", "DRIVER", "FEATURE_PACK", "SERVICE_PACK", "TOOL", "UPDATE_ROLLUP", "UPDATE"]`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"CRITICAL", "SECURITY", "DEFINITION", "DRIVER", "FEATURE_PACK", "SERVICE_PACK", "TOOL", "UPDATE_ROLLUP", "UPDATE"}, false), - }, - ExactlyOneOf: []string{"patch_config.0.windows_update.0.classifications", "patch_config.0.windows_update.0.excludes", "patch_config.0.windows_update.0.exclusive_patches"}, - }, - "excludes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `List of KBs to exclude from update.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - ExactlyOneOf: []string{"patch_config.0.windows_update.0.classifications", "patch_config.0.windows_update.0.excludes", "patch_config.0.windows_update.0.exclusive_patches"}, - }, - "exclusive_patches": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An exclusive list of kbs to be updated. These are the only patches that will be updated. -This field must not be used with other patch configurations.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - ExactlyOneOf: []string{"patch_config.0.windows_update.0.classifications", "patch_config.0.windows_update.0.excludes", "patch_config.0.windows_update.0.exclusive_patches"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - "yum": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Yum update settings. Use this setting to override the default yum patch rules.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "excludes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `List of packages to exclude from update. These packages will be excluded.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.yum.0.security", "patch_config.0.yum.0.minimal", "patch_config.0.yum.0.excludes", "patch_config.0.yum.0.exclusive_packages"}, - }, - "exclusive_packages": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An exclusive list of packages to be updated. These are the only packages that will be updated. -If these packages are not installed, they will be ignored. This field cannot be specified with -any other patch configuration fields.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.yum.0.security", "patch_config.0.yum.0.minimal", "patch_config.0.yum.0.excludes", "patch_config.0.yum.0.exclusive_packages"}, - }, - "minimal": { - Type: resource_os_config_patch_deployment_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Will cause patch to run yum update-minimal instead.`, - AtLeastOneOf: []string{"patch_config.0.yum.0.security", "patch_config.0.yum.0.minimal", "patch_config.0.yum.0.excludes", "patch_config.0.yum.0.exclusive_packages"}, - }, - "security": { - Type: resource_os_config_patch_deployment_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Adds the --security flag to yum update. Not supported on all platforms.`, - AtLeastOneOf: []string{"patch_config.0.yum.0.security", "patch_config.0.yum.0.minimal", "patch_config.0.yum.0.excludes", "patch_config.0.yum.0.exclusive_packages"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - "zypper": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `zypper update settings. Use this setting to override the default zypper patch rules.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "categories": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Install only patches with these categories. Common categories include security, recommended, and feature.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.zypper.0.with_optional", "patch_config.0.zypper.0.with_update", "patch_config.0.zypper.0.categories", "patch_config.0.zypper.0.severities", "patch_config.0.zypper.0.excludes", "patch_config.0.zypper.0.exclusive_patches"}, - }, - "excludes": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `List of packages to exclude from update.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.zypper.0.with_optional", "patch_config.0.zypper.0.with_update", "patch_config.0.zypper.0.categories", "patch_config.0.zypper.0.severities", "patch_config.0.zypper.0.excludes", "patch_config.0.zypper.0.exclusive_patches"}, - }, - "exclusive_patches": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An exclusive list of patches to be updated. These are the only patches that will be installed using 'zypper patch patch:' command. -This field must not be used with any other patch configuration fields.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.zypper.0.with_optional", "patch_config.0.zypper.0.with_update", "patch_config.0.zypper.0.categories", "patch_config.0.zypper.0.severities", "patch_config.0.zypper.0.excludes", "patch_config.0.zypper.0.exclusive_patches"}, - }, - "severities": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Install only patches with these severities. Common severities include critical, important, moderate, and low.`, - Elem: &resource_os_config_patch_deployment_schema.Schema{ - Type: resource_os_config_patch_deployment_schema.TypeString, - }, - AtLeastOneOf: []string{"patch_config.0.zypper.0.with_optional", "patch_config.0.zypper.0.with_update", "patch_config.0.zypper.0.categories", "patch_config.0.zypper.0.severities", "patch_config.0.zypper.0.excludes", "patch_config.0.zypper.0.exclusive_patches"}, - }, - "with_optional": { - Type: resource_os_config_patch_deployment_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Adds the --with-optional flag to zypper patch.`, - AtLeastOneOf: []string{"patch_config.0.zypper.0.with_optional", "patch_config.0.zypper.0.with_update", "patch_config.0.zypper.0.categories", "patch_config.0.zypper.0.severities", "patch_config.0.zypper.0.excludes", "patch_config.0.zypper.0.exclusive_patches"}, - }, - "with_update": { - Type: resource_os_config_patch_deployment_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Adds the --with-update flag, to zypper patch.`, - AtLeastOneOf: []string{"patch_config.0.zypper.0.with_optional", "patch_config.0.zypper.0.with_update", "patch_config.0.zypper.0.categories", "patch_config.0.zypper.0.severities", "patch_config.0.zypper.0.excludes", "patch_config.0.zypper.0.exclusive_patches"}, - }, - }, - }, - AtLeastOneOf: []string{"patch_config.0.reboot_config", "patch_config.0.apt", "patch_config.0.yum", "patch_config.0.goo", "patch_config.0.zypper", "patch_config.0.windows_update", "patch_config.0.pre_step", "patch_config.0.post_step"}, - }, - }, - }, - }, - "recurring_schedule": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Schedule recurring executions.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "time_of_day": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Time of the day to run a recurring deployment.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "hours": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntBetween(0, 23), - Description: `Hours of day in 24 hour format. Should be from 0 to 23. -An API may choose to allow the value "24:00:00" for scenarios like business closing time.`, - AtLeastOneOf: []string{"recurring_schedule.0.time_of_day.0.hours", "recurring_schedule.0.time_of_day.0.minutes", "recurring_schedule.0.time_of_day.0.seconds", "recurring_schedule.0.time_of_day.0.nanos"}, - }, - "minutes": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntBetween(0, 59), - Description: `Minutes of hour of day. Must be from 0 to 59.`, - AtLeastOneOf: []string{"recurring_schedule.0.time_of_day.0.hours", "recurring_schedule.0.time_of_day.0.minutes", "recurring_schedule.0.time_of_day.0.seconds", "recurring_schedule.0.time_of_day.0.nanos"}, - }, - "nanos": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntBetween(0, 999999999), - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - AtLeastOneOf: []string{"recurring_schedule.0.time_of_day.0.hours", "recurring_schedule.0.time_of_day.0.minutes", "recurring_schedule.0.time_of_day.0.seconds", "recurring_schedule.0.time_of_day.0.nanos"}, - }, - "seconds": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntBetween(0, 60), - Description: `Seconds of minutes of the time. Must normally be from 0 to 59. An API may allow the value 60 if it allows leap-seconds.`, - AtLeastOneOf: []string{"recurring_schedule.0.time_of_day.0.hours", "recurring_schedule.0.time_of_day.0.minutes", "recurring_schedule.0.time_of_day.0.seconds", "recurring_schedule.0.time_of_day.0.nanos"}, - }, - }, - }, - }, - "time_zone": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Defines the time zone that timeOfDay is relative to. The rules for daylight saving time are -determined by the chosen time zone.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "id": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - Description: `IANA Time Zone Database time zone, e.g. "America/New_York".`, - }, - "version": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `IANA Time Zone Database version number, e.g. "2019a".`, - }, - }, - }, - }, - "end_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The end time at which a recurring patch deployment schedule is no longer active. -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "monthly": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Schedule with monthly executions.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "month_day": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntBetween(-1, 31), - Description: `One day of the month. 1-31 indicates the 1st to the 31st day. -1 indicates the last day of the month. -Months without the target day will be skipped. For example, a schedule to run "every month on the 31st" -will not run in February, April, June, etc.`, - ExactlyOneOf: []string{"recurring_schedule.0.monthly.0.week_day_of_month", "recurring_schedule.0.monthly.0.month_day"}, - }, - "week_day_of_month": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Week day in a month.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "day_of_week": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}, false), - Description: `A day of the week. Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - "week_ordinal": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntBetween(-1, 4), - Description: `Week number in a month. 1-4 indicates the 1st to 4th week of the month. -1 indicates the last week of the month.`, - }, - }, - }, - ExactlyOneOf: []string{"recurring_schedule.0.monthly.0.week_day_of_month", "recurring_schedule.0.monthly.0.month_day"}, - }, - }, - }, - ExactlyOneOf: []string{"recurring_schedule.0.weekly", "recurring_schedule.0.monthly"}, - }, - "start_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The time that the recurring schedule becomes effective. Defaults to createTime of the patch deployment. -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "weekly": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Schedule with weekly executions.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "day_of_week": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"}, false), - Description: `IANA Time Zone Database time zone, e.g. "America/New_York". Possible values: ["MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY", "SUNDAY"]`, - }, - }, - }, - ExactlyOneOf: []string{"recurring_schedule.0.weekly", "recurring_schedule.0.monthly"}, - }, - "last_execute_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Computed: true, - Description: `The time the last patch job ran successfully. -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "next_execute_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Computed: true, - Description: `The time the next patch job is scheduled to run. -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - }, - }, - ExactlyOneOf: []string{"one_time_schedule", "recurring_schedule"}, - }, - "rollout": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Rollout strategy of the patch job.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "disruption_budget": { - Type: resource_os_config_patch_deployment_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The maximum number (or percentage) of VMs per zone to disrupt at any given moment. The number of VMs calculated from multiplying the percentage by the total number of VMs in a zone is rounded up. -During patching, a VM is considered disrupted from the time the agent is notified to begin until patching has completed. This disruption time includes the time to complete reboot and any post-patch steps. -A VM contributes to the disruption budget if its patching operation fails either when applying the patches, running pre or post patch steps, or if it fails to respond with a success notification before timing out. VMs that are not running or do not have an active agent do not count toward this disruption budget. -For zone-by-zone rollouts, if the disruption budget in a zone is exceeded, the patch job stops, because continuing to the next zone requires completion of the patch process in the previous zone. -For example, if the disruption budget has a fixed value of 10, and 8 VMs fail to patch in the current zone, the patch job continues to patch 2 VMs at a time until the zone is completed. When that zone is completed successfully, patching begins with 10 VMs at a time in the next zone. If 10 VMs in the next zone fail to patch, the patch job stops.`, - MaxItems: 1, - Elem: &resource_os_config_patch_deployment_schema.Resource{ - Schema: map[string]*resource_os_config_patch_deployment_schema.Schema{ - "fixed": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntAtLeast(1), - Description: `Specifies a fixed value.`, - ExactlyOneOf: []string{"rollout.0.disruption_budget.0.fixed", "rollout.0.disruption_budget.0.percentage"}, - }, - "percentage": { - Type: resource_os_config_patch_deployment_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.IntBetween(0, 100), - Description: `Specifies the relative value defined as a percentage, which will be multiplied by a reference value.`, - ExactlyOneOf: []string{"rollout.0.disruption_budget.0.fixed", "rollout.0.disruption_budget.0.percentage"}, - }, - }, - }, - }, - "mode": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_os_config_patch_deployment_validation.StringInSlice([]string{"ZONE_BY_ZONE", "CONCURRENT_ZONES"}, false), - Description: `Mode of the patch rollout. Possible values: ["ZONE_BY_ZONE", "CONCURRENT_ZONES"]`, - }, - }, - }, - }, - "create_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Computed: true, - Description: `Time the patch deployment was created. Timestamp is in RFC3339 text format. -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "last_execute_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Computed: true, - Description: `The last time a patch job was started by this deployment. Timestamp is in RFC3339 text format. -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Computed: true, - Description: `Unique name for the patch deployment resource in a project. -The patch deployment name is in the form: projects/{project_id}/patchDeployments/{patchDeploymentId}.`, - }, - "update_time": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Computed: true, - Description: `Time the patch deployment was last updated. Timestamp is in RFC3339 text format. -A timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds. Example: "2014-10-02T15:01:23.045123456Z".`, - }, - "project": { - Type: resource_os_config_patch_deployment_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceOSConfigPatchDeploymentCreate(d *resource_os_config_patch_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandOSConfigPatchDeploymentDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_os_config_patch_deployment_reflect.ValueOf(descriptionProp)) && (ok || !resource_os_config_patch_deployment_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - instanceFilterProp, err := expandOSConfigPatchDeploymentInstanceFilter(d.Get("instance_filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance_filter"); !isEmptyValue(resource_os_config_patch_deployment_reflect.ValueOf(instanceFilterProp)) && (ok || !resource_os_config_patch_deployment_reflect.DeepEqual(v, instanceFilterProp)) { - obj["instanceFilter"] = instanceFilterProp - } - patchConfigProp, err := expandOSConfigPatchDeploymentPatchConfig(d.Get("patch_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("patch_config"); !isEmptyValue(resource_os_config_patch_deployment_reflect.ValueOf(patchConfigProp)) && (ok || !resource_os_config_patch_deployment_reflect.DeepEqual(v, patchConfigProp)) { - obj["patchConfig"] = patchConfigProp - } - durationProp, err := expandOSConfigPatchDeploymentDuration(d.Get("duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("duration"); !isEmptyValue(resource_os_config_patch_deployment_reflect.ValueOf(durationProp)) && (ok || !resource_os_config_patch_deployment_reflect.DeepEqual(v, durationProp)) { - obj["duration"] = durationProp - } - oneTimeScheduleProp, err := expandOSConfigPatchDeploymentOneTimeSchedule(d.Get("one_time_schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("one_time_schedule"); !isEmptyValue(resource_os_config_patch_deployment_reflect.ValueOf(oneTimeScheduleProp)) && (ok || !resource_os_config_patch_deployment_reflect.DeepEqual(v, oneTimeScheduleProp)) { - obj["oneTimeSchedule"] = oneTimeScheduleProp - } - recurringScheduleProp, err := expandOSConfigPatchDeploymentRecurringSchedule(d.Get("recurring_schedule"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("recurring_schedule"); !isEmptyValue(resource_os_config_patch_deployment_reflect.ValueOf(recurringScheduleProp)) && (ok || !resource_os_config_patch_deployment_reflect.DeepEqual(v, recurringScheduleProp)) { - obj["recurringSchedule"] = recurringScheduleProp - } - rolloutProp, err := expandOSConfigPatchDeploymentRollout(d.Get("rollout"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rollout"); !isEmptyValue(resource_os_config_patch_deployment_reflect.ValueOf(rolloutProp)) && (ok || !resource_os_config_patch_deployment_reflect.DeepEqual(v, rolloutProp)) { - obj["rollout"] = rolloutProp - } - - obj, err = resourceOSConfigPatchDeploymentEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{OSConfigBasePath}}projects/{{project}}/patchDeployments?patchDeploymentId={{patch_deployment_id}}") - if err != nil { - return err - } - - resource_os_config_patch_deployment_log.Printf("[DEBUG] Creating new PatchDeployment: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error fetching project for PatchDeployment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_os_config_patch_deployment_schema.TimeoutCreate)) - if err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error creating PatchDeployment: %s", err) - } - if err := d.Set("name", flattenOSConfigPatchDeploymentName(res["name"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_os_config_patch_deployment_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_os_config_patch_deployment_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_os_config_patch_deployment_log.Printf("[DEBUG] Finished creating PatchDeployment %q: %#v", d.Id(), res) - - return resourceOSConfigPatchDeploymentRead(d, meta) -} - -func resourceOSConfigPatchDeploymentRead(d *resource_os_config_patch_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{OSConfigBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error fetching project for PatchDeployment: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_os_config_patch_deployment_fmt.Sprintf("OSConfigPatchDeployment %q", d.Id())) - } - - res, err = resourceOSConfigPatchDeploymentDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_os_config_patch_deployment_log.Printf("[DEBUG] Removing OSConfigPatchDeployment because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - - if err := d.Set("name", flattenOSConfigPatchDeploymentName(res["name"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("description", flattenOSConfigPatchDeploymentDescription(res["description"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("instance_filter", flattenOSConfigPatchDeploymentInstanceFilter(res["instanceFilter"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("patch_config", flattenOSConfigPatchDeploymentPatchConfig(res["patchConfig"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("duration", flattenOSConfigPatchDeploymentDuration(res["duration"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("create_time", flattenOSConfigPatchDeploymentCreateTime(res["createTime"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("update_time", flattenOSConfigPatchDeploymentUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("last_execute_time", flattenOSConfigPatchDeploymentLastExecuteTime(res["lastExecuteTime"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("one_time_schedule", flattenOSConfigPatchDeploymentOneTimeSchedule(res["oneTimeSchedule"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("recurring_schedule", flattenOSConfigPatchDeploymentRecurringSchedule(res["recurringSchedule"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - if err := d.Set("rollout", flattenOSConfigPatchDeploymentRollout(res["rollout"], d, config)); err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error reading PatchDeployment: %s", err) - } - - return nil -} - -func resourceOSConfigPatchDeploymentDelete(d *resource_os_config_patch_deployment_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_os_config_patch_deployment_fmt.Errorf("Error fetching project for PatchDeployment: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{OSConfigBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_os_config_patch_deployment_log.Printf("[DEBUG] Deleting PatchDeployment %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_os_config_patch_deployment_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "PatchDeployment") - } - - resource_os_config_patch_deployment_log.Printf("[DEBUG] Finished deleting PatchDeployment %q: %#v", d.Id(), res) - return nil -} - -func resourceOSConfigPatchDeploymentImport(d *resource_os_config_patch_deployment_schema.ResourceData, meta interface{}) ([]*resource_os_config_patch_deployment_schema.ResourceData, error) { - - config := meta.(*Config) - - if err := parseImportId([]string{"(?P[^ ]+) (?P[^ ]+)", "(?P[^ ]+)"}, d, config); err != nil { - return nil, err - } - - return []*resource_os_config_patch_deployment_schema.ResourceData{d}, nil -} - -func flattenOSConfigPatchDeploymentName(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentDescription(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentInstanceFilter(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["all"] = - flattenOSConfigPatchDeploymentInstanceFilterAll(original["all"], d, config) - transformed["group_labels"] = - flattenOSConfigPatchDeploymentInstanceFilterGroupLabels(original["groupLabels"], d, config) - transformed["zones"] = - flattenOSConfigPatchDeploymentInstanceFilterZones(original["zones"], d, config) - transformed["instances"] = - flattenOSConfigPatchDeploymentInstanceFilterInstances(original["instances"], d, config) - transformed["instance_name_prefixes"] = - flattenOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(original["instanceNamePrefixes"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentInstanceFilterAll(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "labels": flattenOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(original["labels"], d, config), - }) - } - return transformed -} - -func flattenOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentInstanceFilterZones(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentInstanceFilterInstances(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfig(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["reboot_config"] = - flattenOSConfigPatchDeploymentPatchConfigRebootConfig(original["rebootConfig"], d, config) - transformed["apt"] = - flattenOSConfigPatchDeploymentPatchConfigApt(original["apt"], d, config) - transformed["yum"] = - flattenOSConfigPatchDeploymentPatchConfigYum(original["yum"], d, config) - transformed["goo"] = - flattenOSConfigPatchDeploymentPatchConfigGoo(original["goo"], d, config) - transformed["zypper"] = - flattenOSConfigPatchDeploymentPatchConfigZypper(original["zypper"], d, config) - transformed["windows_update"] = - flattenOSConfigPatchDeploymentPatchConfigWindowsUpdate(original["windowsUpdate"], d, config) - transformed["pre_step"] = - flattenOSConfigPatchDeploymentPatchConfigPreStep(original["preStep"], d, config) - transformed["post_step"] = - flattenOSConfigPatchDeploymentPatchConfigPostStep(original["postStep"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigRebootConfig(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigApt(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["type"] = - flattenOSConfigPatchDeploymentPatchConfigAptType(original["type"], d, config) - transformed["excludes"] = - flattenOSConfigPatchDeploymentPatchConfigAptExcludes(original["excludes"], d, config) - transformed["exclusive_packages"] = - flattenOSConfigPatchDeploymentPatchConfigAptExclusivePackages(original["exclusivePackages"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigAptType(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigAptExcludes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigAptExclusivePackages(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigYum(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["security"] = - flattenOSConfigPatchDeploymentPatchConfigYumSecurity(original["security"], d, config) - transformed["minimal"] = - flattenOSConfigPatchDeploymentPatchConfigYumMinimal(original["minimal"], d, config) - transformed["excludes"] = - flattenOSConfigPatchDeploymentPatchConfigYumExcludes(original["excludes"], d, config) - transformed["exclusive_packages"] = - flattenOSConfigPatchDeploymentPatchConfigYumExclusivePackages(original["exclusivePackages"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigYumSecurity(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigYumMinimal(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigYumExcludes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigYumExclusivePackages(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["enabled"] = - flattenOSConfigPatchDeploymentPatchConfigGooEnabled(original["enabled"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigGooEnabled(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["with_optional"] = - flattenOSConfigPatchDeploymentPatchConfigZypperWithOptional(original["withOptional"], d, config) - transformed["with_update"] = - flattenOSConfigPatchDeploymentPatchConfigZypperWithUpdate(original["withUpdate"], d, config) - transformed["categories"] = - flattenOSConfigPatchDeploymentPatchConfigZypperCategories(original["categories"], d, config) - transformed["severities"] = - flattenOSConfigPatchDeploymentPatchConfigZypperSeverities(original["severities"], d, config) - transformed["excludes"] = - flattenOSConfigPatchDeploymentPatchConfigZypperExcludes(original["excludes"], d, config) - transformed["exclusive_patches"] = - flattenOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(original["exclusivePatches"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigZypperWithOptional(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigZypperWithUpdate(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigZypperCategories(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigZypperSeverities(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigZypperExcludes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["classifications"] = - flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(original["classifications"], d, config) - transformed["excludes"] = - flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(original["excludes"], d, config) - transformed["exclusive_patches"] = - flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(original["exclusivePatches"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["linux_exec_step_config"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(original["linuxExecStepConfig"], d, config) - transformed["windows_exec_step_config"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(original["windowsExecStepConfig"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_success_codes"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(original["allowedSuccessCodes"], d, config) - transformed["interpreter"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(original["interpreter"], d, config) - transformed["local_path"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(original["localPath"], d, config) - transformed["gcs_object"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(original["gcsObject"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(original["bucket"], d, config) - transformed["object"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(original["object"], d, config) - transformed["generation_number"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_success_codes"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(original["allowedSuccessCodes"], d, config) - transformed["interpreter"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(original["interpreter"], d, config) - transformed["local_path"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(original["localPath"], d, config) - transformed["gcs_object"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(original["gcsObject"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(original["bucket"], d, config) - transformed["object"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(original["object"], d, config) - transformed["generation_number"] = - flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["linux_exec_step_config"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(original["linuxExecStepConfig"], d, config) - transformed["windows_exec_step_config"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(original["windowsExecStepConfig"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_success_codes"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(original["allowedSuccessCodes"], d, config) - transformed["interpreter"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(original["interpreter"], d, config) - transformed["local_path"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(original["localPath"], d, config) - transformed["gcs_object"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(original["gcsObject"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(original["bucket"], d, config) - transformed["object"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(original["object"], d, config) - transformed["generation_number"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_success_codes"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(original["allowedSuccessCodes"], d, config) - transformed["interpreter"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(original["interpreter"], d, config) - transformed["local_path"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(original["localPath"], d, config) - transformed["gcs_object"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(original["gcsObject"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["bucket"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(original["bucket"], d, config) - transformed["object"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(original["object"], d, config) - transformed["generation_number"] = - flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generationNumber"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentDuration(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentCreateTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentUpdateTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentLastExecuteTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["execute_time"] = - flattenOSConfigPatchDeploymentOneTimeScheduleExecuteTime(original["executeTime"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentOneTimeScheduleExecuteTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringSchedule(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["time_zone"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeZone(original["timeZone"], d, config) - transformed["start_time"] = - flattenOSConfigPatchDeploymentRecurringScheduleStartTime(original["startTime"], d, config) - transformed["end_time"] = - flattenOSConfigPatchDeploymentRecurringScheduleEndTime(original["endTime"], d, config) - transformed["time_of_day"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDay(original["timeOfDay"], d, config) - transformed["last_execute_time"] = - flattenOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(original["lastExecuteTime"], d, config) - transformed["next_execute_time"] = - flattenOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(original["nextExecuteTime"], d, config) - transformed["weekly"] = - flattenOSConfigPatchDeploymentRecurringScheduleWeekly(original["weekly"], d, config) - transformed["monthly"] = - flattenOSConfigPatchDeploymentRecurringScheduleMonthly(original["monthly"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["id"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneId(original["id"], d, config) - transformed["version"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(original["version"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneId(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleStartTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleEndTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["hours"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(original["hours"], d, config) - transformed["minutes"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(original["minutes"], d, config) - transformed["seconds"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(original["seconds"], d, config) - transformed["nanos"] = - flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(original["nanos"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["day_of_week"] = - flattenOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(original["dayOfWeek"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["week_day_of_month"] = - flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(original["weekDayOfMonth"], d, config) - transformed["month_day"] = - flattenOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(original["monthDay"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["week_ordinal"] = - flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(original["weekOrdinal"], d, config) - transformed["day_of_week"] = - flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(original["dayOfWeek"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenOSConfigPatchDeploymentRollout(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["mode"] = - flattenOSConfigPatchDeploymentRolloutMode(original["mode"], d, config) - transformed["disruption_budget"] = - flattenOSConfigPatchDeploymentRolloutDisruptionBudget(original["disruptionBudget"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRolloutMode(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["fixed"] = - flattenOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(original["fixed"], d, config) - transformed["percentage"] = - flattenOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(original["percent"], d, config) - return []interface{}{transformed} -} - -func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(v interface{}, d *resource_os_config_patch_deployment_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_os_config_patch_deployment_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandOSConfigPatchDeploymentDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentInstanceFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAll, err := expandOSConfigPatchDeploymentInstanceFilterAll(original["all"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedAll); val.IsValid() && !isEmptyValue(val) { - transformed["all"] = transformedAll - } - - transformedGroupLabels, err := expandOSConfigPatchDeploymentInstanceFilterGroupLabels(original["group_labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGroupLabels); val.IsValid() && !isEmptyValue(val) { - transformed["groupLabels"] = transformedGroupLabels - } - - transformedZones, err := expandOSConfigPatchDeploymentInstanceFilterZones(original["zones"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedZones); val.IsValid() && !isEmptyValue(val) { - transformed["zones"] = transformedZones - } - - transformedInstances, err := expandOSConfigPatchDeploymentInstanceFilterInstances(original["instances"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { - transformed["instances"] = transformedInstances - } - - transformedInstanceNamePrefixes, err := expandOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(original["instance_name_prefixes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedInstanceNamePrefixes); val.IsValid() && !isEmptyValue(val) { - transformed["instanceNamePrefixes"] = transformedInstanceNamePrefixes - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentInstanceFilterAll(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentInstanceFilterGroupLabels(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLabels, err := expandOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(original["labels"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { - transformed["labels"] = transformedLabels - } - - req = append(req, transformed) - } - return req, nil -} - -func expandOSConfigPatchDeploymentInstanceFilterGroupLabelsLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandOSConfigPatchDeploymentInstanceFilterZones(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentInstanceFilterInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentInstanceFilterInstanceNamePrefixes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRebootConfig, err := expandOSConfigPatchDeploymentPatchConfigRebootConfig(original["reboot_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedRebootConfig); val.IsValid() && !isEmptyValue(val) { - transformed["rebootConfig"] = transformedRebootConfig - } - - transformedApt, err := expandOSConfigPatchDeploymentPatchConfigApt(original["apt"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedApt); val.IsValid() && !isEmptyValue(val) { - transformed["apt"] = transformedApt - } - - transformedYum, err := expandOSConfigPatchDeploymentPatchConfigYum(original["yum"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedYum); val.IsValid() && !isEmptyValue(val) { - transformed["yum"] = transformedYum - } - - transformedGoo, err := expandOSConfigPatchDeploymentPatchConfigGoo(original["goo"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGoo); val.IsValid() && !isEmptyValue(val) { - transformed["goo"] = transformedGoo - } - - transformedZypper, err := expandOSConfigPatchDeploymentPatchConfigZypper(original["zypper"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedZypper); val.IsValid() && !isEmptyValue(val) { - transformed["zypper"] = transformedZypper - } - - transformedWindowsUpdate, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdate(original["windows_update"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWindowsUpdate); val.IsValid() && !isEmptyValue(val) { - transformed["windowsUpdate"] = transformedWindowsUpdate - } - - transformedPreStep, err := expandOSConfigPatchDeploymentPatchConfigPreStep(original["pre_step"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedPreStep); val.IsValid() && !isEmptyValue(val) { - transformed["preStep"] = transformedPreStep - } - - transformedPostStep, err := expandOSConfigPatchDeploymentPatchConfigPostStep(original["post_step"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedPostStep); val.IsValid() && !isEmptyValue(val) { - transformed["postStep"] = transformedPostStep - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigRebootConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigApt(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedType, err := expandOSConfigPatchDeploymentPatchConfigAptType(original["type"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedType); val.IsValid() && !isEmptyValue(val) { - transformed["type"] = transformedType - } - - transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigAptExcludes(original["excludes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { - transformed["excludes"] = transformedExcludes - } - - transformedExclusivePackages, err := expandOSConfigPatchDeploymentPatchConfigAptExclusivePackages(original["exclusive_packages"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExclusivePackages); val.IsValid() && !isEmptyValue(val) { - transformed["exclusivePackages"] = transformedExclusivePackages - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigAptType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigAptExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigAptExclusivePackages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigYum(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSecurity, err := expandOSConfigPatchDeploymentPatchConfigYumSecurity(original["security"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedSecurity); val.IsValid() && !isEmptyValue(val) { - transformed["security"] = transformedSecurity - } - - transformedMinimal, err := expandOSConfigPatchDeploymentPatchConfigYumMinimal(original["minimal"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedMinimal); val.IsValid() && !isEmptyValue(val) { - transformed["minimal"] = transformedMinimal - } - - transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigYumExcludes(original["excludes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { - transformed["excludes"] = transformedExcludes - } - - transformedExclusivePackages, err := expandOSConfigPatchDeploymentPatchConfigYumExclusivePackages(original["exclusive_packages"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExclusivePackages); val.IsValid() && !isEmptyValue(val) { - transformed["exclusivePackages"] = transformedExclusivePackages - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigYumSecurity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigYumMinimal(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigYumExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigYumExclusivePackages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigGoo(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedEnabled, err := expandOSConfigPatchDeploymentPatchConfigGooEnabled(original["enabled"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedEnabled); val.IsValid() && !isEmptyValue(val) { - transformed["enabled"] = transformedEnabled - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigGooEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigZypper(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWithOptional, err := expandOSConfigPatchDeploymentPatchConfigZypperWithOptional(original["with_optional"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWithOptional); val.IsValid() && !isEmptyValue(val) { - transformed["withOptional"] = transformedWithOptional - } - - transformedWithUpdate, err := expandOSConfigPatchDeploymentPatchConfigZypperWithUpdate(original["with_update"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWithUpdate); val.IsValid() && !isEmptyValue(val) { - transformed["withUpdate"] = transformedWithUpdate - } - - transformedCategories, err := expandOSConfigPatchDeploymentPatchConfigZypperCategories(original["categories"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedCategories); val.IsValid() && !isEmptyValue(val) { - transformed["categories"] = transformedCategories - } - - transformedSeverities, err := expandOSConfigPatchDeploymentPatchConfigZypperSeverities(original["severities"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedSeverities); val.IsValid() && !isEmptyValue(val) { - transformed["severities"] = transformedSeverities - } - - transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigZypperExcludes(original["excludes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { - transformed["excludes"] = transformedExcludes - } - - transformedExclusivePatches, err := expandOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(original["exclusive_patches"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExclusivePatches); val.IsValid() && !isEmptyValue(val) { - transformed["exclusivePatches"] = transformedExclusivePatches - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigZypperWithOptional(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigZypperWithUpdate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigZypperCategories(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigZypperSeverities(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigZypperExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigZypperExclusivePatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedClassifications, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(original["classifications"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedClassifications); val.IsValid() && !isEmptyValue(val) { - transformed["classifications"] = transformedClassifications - } - - transformedExcludes, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(original["excludes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExcludes); val.IsValid() && !isEmptyValue(val) { - transformed["excludes"] = transformedExcludes - } - - transformedExclusivePatches, err := expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(original["exclusive_patches"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExclusivePatches); val.IsValid() && !isEmptyValue(val) { - transformed["exclusivePatches"] = transformedExclusivePatches - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateClassifications(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExcludes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigWindowsUpdateExclusivePatches(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStep(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLinuxExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(original["linux_exec_step_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLinuxExecStepConfig); val.IsValid() && !isEmptyValue(val) { - transformed["linuxExecStepConfig"] = transformedLinuxExecStepConfig - } - - transformedWindowsExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(original["windows_exec_step_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWindowsExecStepConfig); val.IsValid() && !isEmptyValue(val) { - transformed["windowsExecStepConfig"] = transformedWindowsExecStepConfig - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { - transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes - } - - transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(original["interpreter"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { - transformed["interpreter"] = transformedInterpreter - } - - transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(original["local_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { - transformed["localPath"] = transformedLocalPath - } - - transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(original["gcs_object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { - transformed["gcsObject"] = transformedGcsObject - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(original["bucket"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { - transformed["bucket"] = transformedBucket - } - - transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(original["object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { - transformed["object"] = transformedObject - } - - transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { - transformed["generationNumber"] = transformedGenerationNumber - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { - transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes - } - - transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(original["interpreter"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { - transformed["interpreter"] = transformedInterpreter - } - - transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(original["local_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { - transformed["localPath"] = transformedLocalPath - } - - transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(original["gcs_object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { - transformed["gcsObject"] = transformedGcsObject - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(original["bucket"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { - transformed["bucket"] = transformedBucket - } - - transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(original["object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { - transformed["object"] = transformedObject - } - - transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { - transformed["generationNumber"] = transformedGenerationNumber - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStep(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLinuxExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(original["linux_exec_step_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLinuxExecStepConfig); val.IsValid() && !isEmptyValue(val) { - transformed["linuxExecStepConfig"] = transformedLinuxExecStepConfig - } - - transformedWindowsExecStepConfig, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(original["windows_exec_step_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWindowsExecStepConfig); val.IsValid() && !isEmptyValue(val) { - transformed["windowsExecStepConfig"] = transformedWindowsExecStepConfig - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { - transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes - } - - transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(original["interpreter"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { - transformed["interpreter"] = transformedInterpreter - } - - transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(original["local_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { - transformed["localPath"] = transformedLocalPath - } - - transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(original["gcs_object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { - transformed["gcsObject"] = transformedGcsObject - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(original["bucket"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { - transformed["bucket"] = transformedBucket - } - - transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(original["object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { - transformed["object"] = transformedObject - } - - transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { - transformed["generationNumber"] = transformedGenerationNumber - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedSuccessCodes, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(original["allowed_success_codes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedAllowedSuccessCodes); val.IsValid() && !isEmptyValue(val) { - transformed["allowedSuccessCodes"] = transformedAllowedSuccessCodes - } - - transformedInterpreter, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(original["interpreter"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedInterpreter); val.IsValid() && !isEmptyValue(val) { - transformed["interpreter"] = transformedInterpreter - } - - transformedLocalPath, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(original["local_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLocalPath); val.IsValid() && !isEmptyValue(val) { - transformed["localPath"] = transformedLocalPath - } - - transformedGcsObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(original["gcs_object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGcsObject); val.IsValid() && !isEmptyValue(val) { - transformed["gcsObject"] = transformedGcsObject - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigAllowedSuccessCodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigLocalPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBucket, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(original["bucket"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedBucket); val.IsValid() && !isEmptyValue(val) { - transformed["bucket"] = transformedBucket - } - - transformedObject, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(original["object"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedObject); val.IsValid() && !isEmptyValue(val) { - transformed["object"] = transformedObject - } - - transformedGenerationNumber, err := expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(original["generation_number"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedGenerationNumber); val.IsValid() && !isEmptyValue(val) { - transformed["generationNumber"] = transformedGenerationNumber - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectGenerationNumber(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentOneTimeSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExecuteTime, err := expandOSConfigPatchDeploymentOneTimeScheduleExecuteTime(original["execute_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedExecuteTime); val.IsValid() && !isEmptyValue(val) { - transformed["executeTime"] = transformedExecuteTime - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentOneTimeScheduleExecuteTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringSchedule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTimeZone, err := expandOSConfigPatchDeploymentRecurringScheduleTimeZone(original["time_zone"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedTimeZone); val.IsValid() && !isEmptyValue(val) { - transformed["timeZone"] = transformedTimeZone - } - - transformedStartTime, err := expandOSConfigPatchDeploymentRecurringScheduleStartTime(original["start_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedStartTime); val.IsValid() && !isEmptyValue(val) { - transformed["startTime"] = transformedStartTime - } - - transformedEndTime, err := expandOSConfigPatchDeploymentRecurringScheduleEndTime(original["end_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedEndTime); val.IsValid() && !isEmptyValue(val) { - transformed["endTime"] = transformedEndTime - } - - transformedTimeOfDay, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDay(original["time_of_day"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedTimeOfDay); val.IsValid() && !isEmptyValue(val) { - transformed["timeOfDay"] = transformedTimeOfDay - } - - transformedLastExecuteTime, err := expandOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(original["last_execute_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedLastExecuteTime); val.IsValid() && !isEmptyValue(val) { - transformed["lastExecuteTime"] = transformedLastExecuteTime - } - - transformedNextExecuteTime, err := expandOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(original["next_execute_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedNextExecuteTime); val.IsValid() && !isEmptyValue(val) { - transformed["nextExecuteTime"] = transformedNextExecuteTime - } - - transformedWeekly, err := expandOSConfigPatchDeploymentRecurringScheduleWeekly(original["weekly"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWeekly); val.IsValid() && !isEmptyValue(val) { - transformed["weekly"] = transformedWeekly - } - - transformedMonthly, err := expandOSConfigPatchDeploymentRecurringScheduleMonthly(original["monthly"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedMonthly); val.IsValid() && !isEmptyValue(val) { - transformed["monthly"] = transformedMonthly - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeZone(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedId, err := expandOSConfigPatchDeploymentRecurringScheduleTimeZoneId(original["id"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedId); val.IsValid() && !isEmptyValue(val) { - transformed["id"] = transformedId - } - - transformedVersion, err := expandOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(original["version"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { - transformed["version"] = transformedVersion - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeZoneId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeZoneVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleStartTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleEndTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedHours, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(original["hours"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedHours); val.IsValid() && !isEmptyValue(val) { - transformed["hours"] = transformedHours - } - - transformedMinutes, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(original["minutes"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedMinutes); val.IsValid() && !isEmptyValue(val) { - transformed["minutes"] = transformedMinutes - } - - transformedSeconds, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(original["seconds"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedSeconds); val.IsValid() && !isEmptyValue(val) { - transformed["seconds"] = transformedSeconds - } - - transformedNanos, err := expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(original["nanos"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedNanos); val.IsValid() && !isEmptyValue(val) { - transformed["nanos"] = transformedNanos - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayHours(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayMinutes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleTimeOfDayNanos(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleLastExecuteTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleNextExecuteTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleWeekly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDayOfWeek, err := expandOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(original["day_of_week"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeek"] = transformedDayOfWeek - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleWeeklyDayOfWeek(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleMonthly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWeekDayOfMonth, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(original["week_day_of_month"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWeekDayOfMonth); val.IsValid() && !isEmptyValue(val) { - transformed["weekDayOfMonth"] = transformedWeekDayOfMonth - } - - transformedMonthDay, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(original["month_day"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedMonthDay); val.IsValid() && !isEmptyValue(val) { - transformed["monthDay"] = transformedMonthDay - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedWeekOrdinal, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(original["week_ordinal"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedWeekOrdinal); val.IsValid() && !isEmptyValue(val) { - transformed["weekOrdinal"] = transformedWeekOrdinal - } - - transformedDayOfWeek, err := expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(original["day_of_week"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedDayOfWeek); val.IsValid() && !isEmptyValue(val) { - transformed["dayOfWeek"] = transformedDayOfWeek - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthWeekOrdinal(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeek(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRecurringScheduleMonthlyMonthDay(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRollout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMode, err := expandOSConfigPatchDeploymentRolloutMode(original["mode"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { - transformed["mode"] = transformedMode - } - - transformedDisruptionBudget, err := expandOSConfigPatchDeploymentRolloutDisruptionBudget(original["disruption_budget"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedDisruptionBudget); val.IsValid() && !isEmptyValue(val) { - transformed["disruptionBudget"] = transformedDisruptionBudget - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRolloutMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRolloutDisruptionBudget(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFixed, err := expandOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(original["fixed"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedFixed); val.IsValid() && !isEmptyValue(val) { - transformed["fixed"] = transformedFixed - } - - transformedPercentage, err := expandOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(original["percentage"], d, config) - if err != nil { - return nil, err - } else if val := resource_os_config_patch_deployment_reflect.ValueOf(transformedPercentage); val.IsValid() && !isEmptyValue(val) { - transformed["percent"] = transformedPercentage - } - - return transformed, nil -} - -func expandOSConfigPatchDeploymentRolloutDisruptionBudgetFixed(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSConfigPatchDeploymentRolloutDisruptionBudgetPercentage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceOSConfigPatchDeploymentEncoder(d *resource_os_config_patch_deployment_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - if obj["recurringSchedule"] != nil { - schedule := obj["recurringSchedule"].(map[string]interface{}) - if schedule["monthly"] != nil { - obj["recurringSchedule"].(map[string]interface{})["frequency"] = "MONTHLY" - } else if schedule["weekly"] != nil { - obj["recurringSchedule"].(map[string]interface{})["frequency"] = "WEEKLY" - } - } - - if obj["patchConfig"] != nil { - patchConfig := obj["patchConfig"].(map[string]interface{}) - if patchConfig["goo"] != nil { - goo := patchConfig["goo"].(map[string]interface{}) - - if goo["enabled"] == true { - delete(goo, "enabled") - patchConfig["goo"] = goo - } else { - delete(patchConfig, "goo") - } - - obj["patchConfig"] = patchConfig - } - } - - return obj, nil -} - -func resourceOSConfigPatchDeploymentDecoder(d *resource_os_config_patch_deployment_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if res["patchConfig"] != nil { - patchConfig := res["patchConfig"].(map[string]interface{}) - if patchConfig["goo"] != nil { - patchConfig["goo"].(map[string]interface{})["enabled"] = true - res["patchConfig"] = patchConfig - } - } - - return res, nil -} - -func resourceOSLoginSSHPublicKey() *resource_os_login_ssh_public_key_schema.Resource { - return &resource_os_login_ssh_public_key_schema.Resource{ - Create: resourceOSLoginSSHPublicKeyCreate, - Read: resourceOSLoginSSHPublicKeyRead, - Update: resourceOSLoginSSHPublicKeyUpdate, - Delete: resourceOSLoginSSHPublicKeyDelete, - - Importer: &resource_os_login_ssh_public_key_schema.ResourceImporter{ - State: resourceOSLoginSSHPublicKeyImport, - }, - - Timeouts: &resource_os_login_ssh_public_key_schema.ResourceTimeout{ - Create: resource_os_login_ssh_public_key_schema.DefaultTimeout(4 * resource_os_login_ssh_public_key_time.Minute), - Update: resource_os_login_ssh_public_key_schema.DefaultTimeout(4 * resource_os_login_ssh_public_key_time.Minute), - Delete: resource_os_login_ssh_public_key_schema.DefaultTimeout(4 * resource_os_login_ssh_public_key_time.Minute), - }, - - Schema: map[string]*resource_os_login_ssh_public_key_schema.Schema{ - "key": { - Type: resource_os_login_ssh_public_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Public key text in SSH format, defined by RFC4253 section 6.6.`, - }, - "user": { - Type: resource_os_login_ssh_public_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The user email.`, - }, - "expiration_time_usec": { - Type: resource_os_login_ssh_public_key_schema.TypeString, - Optional: true, - Description: `An expiration time in microseconds since epoch.`, - }, - "project": { - Type: resource_os_login_ssh_public_key_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The project ID of the Google Cloud Platform project.`, - }, - "fingerprint": { - Type: resource_os_login_ssh_public_key_schema.TypeString, - Computed: true, - Description: `The SHA-256 fingerprint of the SSH public key.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceOSLoginSSHPublicKeyCreate(d *resource_os_login_ssh_public_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - keyProp, err := expandOSLoginSSHPublicKeyKey(d.Get("key"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("key"); !isEmptyValue(resource_os_login_ssh_public_key_reflect.ValueOf(keyProp)) && (ok || !resource_os_login_ssh_public_key_reflect.DeepEqual(v, keyProp)) { - obj["key"] = keyProp - } - expirationTimeUsecProp, err := expandOSLoginSSHPublicKeyExpirationTimeUsec(d.Get("expiration_time_usec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time_usec"); !isEmptyValue(resource_os_login_ssh_public_key_reflect.ValueOf(expirationTimeUsecProp)) && (ok || !resource_os_login_ssh_public_key_reflect.DeepEqual(v, expirationTimeUsecProp)) { - obj["expirationTimeUsec"] = expirationTimeUsecProp - } - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}:importSshPublicKey") - if err != nil { - return err - } - - resource_os_login_ssh_public_key_log.Printf("[DEBUG] Creating new SSHPublicKey: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if p, ok := d.GetOk("project"); ok { - url, err = addQueryParams(url, map[string]string{"projectId": p.(string)}) - if err != nil { - return err - } - } - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_os_login_ssh_public_key_schema.TimeoutCreate)) - if err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error creating SSHPublicKey: %s", err) - } - - id, err := replaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") - if err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - loginProfile, ok := res["loginProfile"] - if !ok { - return resource_os_login_ssh_public_key_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - sshPublicKeys := loginProfile.(map[string]interface{})["sshPublicKeys"] - for _, sshPublicKey := range sshPublicKeys.(map[string]interface{}) { - if sshPublicKey.(map[string]interface{})["key"].(string) == d.Get("key") { - if err := d.Set("fingerprint", sshPublicKey.(map[string]interface{})["fingerprint"].(string)); err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error setting fingerprint: %s", err) - } - break - } - } - - id, err = replaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") - if err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_os_login_ssh_public_key_log.Printf("[DEBUG] Finished creating SSHPublicKey %q: %#v", d.Id(), res) - - return resourceOSLoginSSHPublicKeyRead(d, meta) -} - -func resourceOSLoginSSHPublicKeyRead(d *resource_os_login_ssh_public_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_os_login_ssh_public_key_fmt.Sprintf("OSLoginSSHPublicKey %q", d.Id())) - } - - if err := d.Set("key", flattenOSLoginSSHPublicKeyKey(res["key"], d, config)); err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error reading SSHPublicKey: %s", err) - } - if err := d.Set("expiration_time_usec", flattenOSLoginSSHPublicKeyExpirationTimeUsec(res["expirationTimeUsec"], d, config)); err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error reading SSHPublicKey: %s", err) - } - if err := d.Set("fingerprint", flattenOSLoginSSHPublicKeyFingerprint(res["fingerprint"], d, config)); err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error reading SSHPublicKey: %s", err) - } - - return nil -} - -func resourceOSLoginSSHPublicKeyUpdate(d *resource_os_login_ssh_public_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - expirationTimeUsecProp, err := expandOSLoginSSHPublicKeyExpirationTimeUsec(d.Get("expiration_time_usec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_time_usec"); !isEmptyValue(resource_os_login_ssh_public_key_reflect.ValueOf(v)) && (ok || !resource_os_login_ssh_public_key_reflect.DeepEqual(v, expirationTimeUsecProp)) { - obj["expirationTimeUsec"] = expirationTimeUsecProp - } - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") - if err != nil { - return err - } - - resource_os_login_ssh_public_key_log.Printf("[DEBUG] Updating SSHPublicKey %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("expiration_time_usec") { - updateMask = append(updateMask, "expirationTimeUsec") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_os_login_ssh_public_key_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_os_login_ssh_public_key_schema.TimeoutUpdate)) - - if err != nil { - return resource_os_login_ssh_public_key_fmt.Errorf("Error updating SSHPublicKey %q: %s", d.Id(), err) - } else { - resource_os_login_ssh_public_key_log.Printf("[DEBUG] Finished updating SSHPublicKey %q: %#v", d.Id(), res) - } - - return resourceOSLoginSSHPublicKeyRead(d, meta) -} - -func resourceOSLoginSSHPublicKeyDelete(d *resource_os_login_ssh_public_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{OSLoginBasePath}}users/{{user}}/sshPublicKeys/{{fingerprint}}/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_os_login_ssh_public_key_log.Printf("[DEBUG] Deleting SSHPublicKey %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_os_login_ssh_public_key_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SSHPublicKey") - } - - resource_os_login_ssh_public_key_log.Printf("[DEBUG] Finished deleting SSHPublicKey %q: %#v", d.Id(), res) - return nil -} - -func resourceOSLoginSSHPublicKeyImport(d *resource_os_login_ssh_public_key_schema.ResourceData, meta interface{}) ([]*resource_os_login_ssh_public_key_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "users/(?P[^/]+)/sshPublicKeys/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "users/{{user}}/sshPublicKeys/{{fingerprint}}") - if err != nil { - return nil, resource_os_login_ssh_public_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_os_login_ssh_public_key_schema.ResourceData{d}, nil -} - -func flattenOSLoginSSHPublicKeyKey(v interface{}, d *resource_os_login_ssh_public_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSLoginSSHPublicKeyExpirationTimeUsec(v interface{}, d *resource_os_login_ssh_public_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenOSLoginSSHPublicKeyFingerprint(v interface{}, d *resource_os_login_ssh_public_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandOSLoginSSHPublicKeyKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandOSLoginSSHPublicKeyExpirationTimeUsec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePrivatecaCaPool() *resource_privateca_ca_pool_schema.Resource { - return &resource_privateca_ca_pool_schema.Resource{ - Create: resourcePrivatecaCaPoolCreate, - Read: resourcePrivatecaCaPoolRead, - Update: resourcePrivatecaCaPoolUpdate, - Delete: resourcePrivatecaCaPoolDelete, - - Importer: &resource_privateca_ca_pool_schema.ResourceImporter{ - State: resourcePrivatecaCaPoolImport, - }, - - Timeouts: &resource_privateca_ca_pool_schema.ResourceTimeout{ - Create: resource_privateca_ca_pool_schema.DefaultTimeout(4 * resource_privateca_ca_pool_time.Minute), - Update: resource_privateca_ca_pool_schema.DefaultTimeout(4 * resource_privateca_ca_pool_time.Minute), - Delete: resource_privateca_ca_pool_schema.DefaultTimeout(4 * resource_privateca_ca_pool_time.Minute), - }, - - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "location": { - Type: resource_privateca_ca_pool_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Location of the CaPool. A full list of valid locations can be found by -running 'gcloud privateca locations list'.`, - }, - "name": { - Type: resource_privateca_ca_pool_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this CaPool.`, - }, - "tier": { - Type: resource_privateca_ca_pool_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_privateca_ca_pool_validation.StringInSlice([]string{"ENTERPRISE", "DEVOPS"}, false), - Description: `The Tier of this CaPool. Possible values: ["ENTERPRISE", "DEVOPS"]`, - }, - "issuance_policy": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `The IssuancePolicy to control how Certificates will be issued from this CaPool.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "allowed_issuance_modes": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `IssuanceModes specifies the allowed ways in which Certificates may be requested from this CaPool.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "allow_config_based_issuance": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Required: true, - Description: `When true, allows callers to create Certificates by specifying a CertificateConfig.`, - }, - "allow_csr_based_issuance": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Required: true, - Description: `When true, allows callers to create Certificates by specifying a CSR.`, - }, - }, - }, - }, - "allowed_key_types": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `If any AllowedKeyType is specified, then the certificate request's public key must match one of the key types listed here. -Otherwise, any key may be used.`, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "elliptic_curve": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `Represents an allowed Elliptic Curve key type.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "signature_algorithm": { - Type: resource_privateca_ca_pool_schema.TypeString, - Required: true, - ValidateFunc: resource_privateca_ca_pool_validation.StringInSlice([]string{"ECDSA_P256", "ECDSA_P384", "EDDSA_25519"}, false), - Description: `The algorithm used. Possible values: ["ECDSA_P256", "ECDSA_P384", "EDDSA_25519"]`, - }, - }, - }, - }, - "rsa": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `Describes an RSA key that may be used in a Certificate issued from a CaPool.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "max_modulus_size": { - Type: resource_privateca_ca_pool_schema.TypeString, - Optional: true, - Description: `The maximum allowed RSA modulus size, in bits. If this is not set, or if set to zero, the -service will not enforce an explicit upper bound on RSA modulus sizes.`, - }, - "min_modulus_size": { - Type: resource_privateca_ca_pool_schema.TypeString, - Optional: true, - Description: `The minimum allowed RSA modulus size, in bits. If this is not set, or if set to zero, the -service-level min RSA modulus size will continue to apply.`, - }, - }, - }, - }, - }, - }, - }, - "baseline_values": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `A set of X.509 values that will be applied to all certificates issued through this CaPool. If a certificate request -includes conflicting values for the same properties, they will be overwritten by the values defined here. If a certificate -request uses a CertificateTemplate that defines conflicting predefinedValues for the same properties, the certificate -issuance request will fail.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "ca_options": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `Describes values that are relevant in a CA certificate.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "is_ca": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `Refers to the "CA" X.509 extension, which is a boolean value. When this value is missing, -the extension will be omitted from the CA certificate.`, - }, - "max_issuer_path_length": { - Type: resource_privateca_ca_pool_schema.TypeInt, - Optional: true, - Description: `Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of -subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this -value is missing, the max path length will be omitted from the CA certificate.`, - }, - }, - }, - }, - "key_usage": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `Indicates the intended use for keys that correspond to a certificate.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "base_key_usage": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `Describes high-level ways in which a key may be used.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "cert_sign": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used to sign certificates.`, - }, - "content_commitment": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation".`, - }, - "crl_sign": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used sign certificate revocation lists.`, - }, - "data_encipherment": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used to encipher data.`, - }, - "decipher_only": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used to decipher only.`, - }, - "digital_signature": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used for digital signatures.`, - }, - "encipher_only": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used to encipher only.`, - }, - "key_agreement": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used in a key agreement protocol.`, - }, - "key_encipherment": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `The key may be used to encipher other keys.`, - }, - }, - }, - }, - "extended_key_usage": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `Describes high-level ways in which a key may be used.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "client_auth": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS.`, - }, - "code_signing": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication".`, - }, - "email_protection": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection".`, - }, - "ocsp_signing": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses".`, - }, - "server_auth": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS.`, - }, - "time_stamping": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Optional: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time".`, - }, - }, - }, - }, - "unknown_extended_key_usages": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "object_id_path": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_ca_pool_schema.Schema{ - Type: resource_privateca_ca_pool_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - "additional_extensions": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `Specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs.`, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "critical": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Required: true, - Description: `Indicates whether or not this extension is critical (i.e., if the client does not know how to -handle this extension, the client should consider this to be an error).`, - }, - "object_id": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `Describes values that are relevant in a CA certificate.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "object_id_path": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_ca_pool_schema.Schema{ - Type: resource_privateca_ca_pool_schema.TypeInt, - }, - }, - }, - }, - }, - "value": { - Type: resource_privateca_ca_pool_schema.TypeString, - Required: true, - Description: `The value of this X.509 extension. A base64-encoded string.`, - }, - }, - }, - }, - "aia_ocsp_servers": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the -"Authority Information Access" extension in the certificate.`, - Elem: &resource_privateca_ca_pool_schema.Schema{ - Type: resource_privateca_ca_pool_schema.TypeString, - }, - }, - "policy_ids": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.`, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "object_id_path": { - Type: resource_privateca_ca_pool_schema.TypeList, - Required: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_ca_pool_schema.Schema{ - Type: resource_privateca_ca_pool_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - "identity_constraints": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `Describes constraints on identities that may appear in Certificates issued through this CaPool. -If this is omitted, then this CaPool will not add restrictions on a certificate's identity.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "allow_subject_alt_names_passthrough": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Required: true, - Description: `If this is set, the SubjectAltNames extension may be copied from a certificate request into the signed certificate. -Otherwise, the requested SubjectAltNames will be discarded.`, - }, - "allow_subject_passthrough": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Required: true, - Description: `If this is set, the Subject field may be copied from a certificate request into the signed certificate. -Otherwise, the requested Subject will be discarded.`, - }, - "cel_expression": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - Description: `A CEL expression that may be used to validate the resolved X.509 Subject and/or Subject Alternative Name before a -certificate is signed. To see the full allowed syntax and some examples, -see https://cloud.google.com/certificate-authority-service/docs/cel-guide`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "expression": { - Type: resource_privateca_ca_pool_schema.TypeString, - Required: true, - Description: `Textual representation of an expression in Common Expression Language syntax.`, - }, - "description": { - Type: resource_privateca_ca_pool_schema.TypeString, - Optional: true, - Description: `Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.`, - }, - "location": { - Type: resource_privateca_ca_pool_schema.TypeString, - Optional: true, - Description: `String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.`, - }, - "title": { - Type: resource_privateca_ca_pool_schema.TypeString, - Optional: true, - Description: `Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.`, - }, - }, - }, - }, - }, - }, - }, - "maximum_lifetime": { - Type: resource_privateca_ca_pool_schema.TypeString, - Optional: true, - Description: `The maximum lifetime allowed for issued Certificates. Note that if the issuing CertificateAuthority -expires before a Certificate's requested maximumLifetime, the effective lifetime will be explicitly truncated to match it.`, - }, - }, - }, - }, - "labels": { - Type: resource_privateca_ca_pool_schema.TypeMap, - Optional: true, - Description: `Labels with user-defined metadata. - -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": -"1.3kg", "count": "3" }.`, - Elem: &resource_privateca_ca_pool_schema.Schema{Type: resource_privateca_ca_pool_schema.TypeString}, - }, - "publishing_options": { - Type: resource_privateca_ca_pool_schema.TypeList, - Optional: true, - DiffSuppressFunc: emptyOrUnsetBlockDiffSuppress, - Description: `The PublishingOptions to follow when issuing Certificates from any CertificateAuthority in this CaPool.`, - MaxItems: 1, - Elem: &resource_privateca_ca_pool_schema.Resource{ - Schema: map[string]*resource_privateca_ca_pool_schema.Schema{ - "publish_ca_cert": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Required: true, - Description: `When true, publishes each CertificateAuthority's CA certificate and includes its URL in the "Authority Information Access" -X.509 extension in all issued Certificates. If this is false, the CA certificate will not be published and the corresponding -X.509 extension will not be written in issued certificates.`, - }, - "publish_crl": { - Type: resource_privateca_ca_pool_schema.TypeBool, - Required: true, - Description: `When true, publishes each CertificateAuthority's CRL and includes its URL in the "CRL Distribution Points" X.509 extension -in all issued Certificates. If this is false, CRLs will not be published and the corresponding X.509 extension will not -be written in issued certificates. CRLs will expire 7 days from their creation. However, we will rebuild daily. CRLs are -also rebuilt shortly after a certificate is revoked.`, - }, - }, - }, - }, - "project": { - Type: resource_privateca_ca_pool_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePrivatecaCaPoolCreate(d *resource_privateca_ca_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - tierProp, err := expandPrivatecaCaPoolTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(resource_privateca_ca_pool_reflect.ValueOf(tierProp)) && (ok || !resource_privateca_ca_pool_reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - issuancePolicyProp, err := expandPrivatecaCaPoolIssuancePolicy(d.Get("issuance_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuance_policy"); !isEmptyValue(resource_privateca_ca_pool_reflect.ValueOf(issuancePolicyProp)) && (ok || !resource_privateca_ca_pool_reflect.DeepEqual(v, issuancePolicyProp)) { - obj["issuancePolicy"] = issuancePolicyProp - } - publishingOptionsProp, err := expandPrivatecaCaPoolPublishingOptions(d.Get("publishing_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("publishing_options"); !isEmptyValue(resource_privateca_ca_pool_reflect.ValueOf(publishingOptionsProp)) && (ok || !resource_privateca_ca_pool_reflect.DeepEqual(v, publishingOptionsProp)) { - obj["publishingOptions"] = publishingOptionsProp - } - labelsProp, err := expandPrivatecaCaPoolLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_privateca_ca_pool_reflect.ValueOf(labelsProp)) && (ok || !resource_privateca_ca_pool_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools?caPoolId={{name}}") - if err != nil { - return err - } - - resource_privateca_ca_pool_log.Printf("[DEBUG] Creating new CaPool: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error fetching project for CaPool: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_privateca_ca_pool_schema.TimeoutCreate)) - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error creating CaPool: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = privatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating CaPool", userAgent, - d.Timeout(resource_privateca_ca_pool_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_privateca_ca_pool_fmt.Errorf("Error waiting to create CaPool: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_privateca_ca_pool_log.Printf("[DEBUG] Finished creating CaPool %q: %#v", d.Id(), res) - - return resourcePrivatecaCaPoolRead(d, meta) -} - -func resourcePrivatecaCaPoolRead(d *resource_privateca_ca_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error fetching project for CaPool: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_privateca_ca_pool_fmt.Sprintf("PrivatecaCaPool %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error reading CaPool: %s", err) - } - - if err := d.Set("tier", flattenPrivatecaCaPoolTier(res["tier"], d, config)); err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error reading CaPool: %s", err) - } - if err := d.Set("issuance_policy", flattenPrivatecaCaPoolIssuancePolicy(res["issuancePolicy"], d, config)); err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error reading CaPool: %s", err) - } - if err := d.Set("publishing_options", flattenPrivatecaCaPoolPublishingOptions(res["publishingOptions"], d, config)); err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error reading CaPool: %s", err) - } - if err := d.Set("labels", flattenPrivatecaCaPoolLabels(res["labels"], d, config)); err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error reading CaPool: %s", err) - } - - return nil -} - -func resourcePrivatecaCaPoolUpdate(d *resource_privateca_ca_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error fetching project for CaPool: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - issuancePolicyProp, err := expandPrivatecaCaPoolIssuancePolicy(d.Get("issuance_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("issuance_policy"); !isEmptyValue(resource_privateca_ca_pool_reflect.ValueOf(v)) && (ok || !resource_privateca_ca_pool_reflect.DeepEqual(v, issuancePolicyProp)) { - obj["issuancePolicy"] = issuancePolicyProp - } - publishingOptionsProp, err := expandPrivatecaCaPoolPublishingOptions(d.Get("publishing_options"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("publishing_options"); !isEmptyValue(resource_privateca_ca_pool_reflect.ValueOf(v)) && (ok || !resource_privateca_ca_pool_reflect.DeepEqual(v, publishingOptionsProp)) { - obj["publishingOptions"] = publishingOptionsProp - } - labelsProp, err := expandPrivatecaCaPoolLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_privateca_ca_pool_reflect.ValueOf(v)) && (ok || !resource_privateca_ca_pool_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") - if err != nil { - return err - } - - resource_privateca_ca_pool_log.Printf("[DEBUG] Updating CaPool %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("issuance_policy") { - updateMask = append(updateMask, "issuancePolicy") - } - - if d.HasChange("publishing_options") { - updateMask = append(updateMask, "publishingOptions") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_privateca_ca_pool_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_privateca_ca_pool_schema.TimeoutUpdate)) - - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error updating CaPool %q: %s", d.Id(), err) - } else { - resource_privateca_ca_pool_log.Printf("[DEBUG] Finished updating CaPool %q: %#v", d.Id(), res) - } - - err = privatecaOperationWaitTime( - config, res, project, "Updating CaPool", userAgent, - d.Timeout(resource_privateca_ca_pool_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourcePrivatecaCaPoolRead(d, meta) -} - -func resourcePrivatecaCaPoolDelete(d *resource_privateca_ca_pool_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_ca_pool_fmt.Errorf("Error fetching project for CaPool: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_privateca_ca_pool_log.Printf("[DEBUG] Deleting CaPool %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_privateca_ca_pool_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "CaPool") - } - - err = privatecaOperationWaitTime( - config, res, project, "Deleting CaPool", userAgent, - d.Timeout(resource_privateca_ca_pool_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_privateca_ca_pool_log.Printf("[DEBUG] Finished deleting CaPool %q: %#v", d.Id(), res) - return nil -} - -func resourcePrivatecaCaPoolImport(d *resource_privateca_ca_pool_schema.ResourceData, meta interface{}) ([]*resource_privateca_ca_pool_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{name}}") - if err != nil { - return nil, resource_privateca_ca_pool_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_privateca_ca_pool_schema.ResourceData{d}, nil -} - -func flattenPrivatecaCaPoolTier(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicy(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_key_types"] = - flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(original["allowedKeyTypes"], d, config) - transformed["maximum_lifetime"] = - flattenPrivatecaCaPoolIssuancePolicyMaximumLifetime(original["maximumLifetime"], d, config) - transformed["allowed_issuance_modes"] = - flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(original["allowedIssuanceModes"], d, config) - transformed["identity_constraints"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraints(original["identityConstraints"], d, config) - transformed["baseline_values"] = - flattenPrivatecaCaPoolIssuancePolicyBaselineValues(original["baselineValues"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "rsa": flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(original["rsa"], d, config), - "elliptic_curve": flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(original["ellipticCurve"], d, config), - }) - } - return transformed -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["min_modulus_size"] = - flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(original["minModulusSize"], d, config) - transformed["max_modulus_size"] = - flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(original["maxModulusSize"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["signature_algorithm"] = - flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(original["signatureAlgorithm"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyMaximumLifetime(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_csr_based_issuance"] = - flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(original["allowCsrBasedIssuance"], d, config) - transformed["allow_config_based_issuance"] = - flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(original["allowConfigBasedIssuance"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allow_subject_passthrough"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(original["allowSubjectPassthrough"], d, config) - transformed["allow_subject_alt_names_passthrough"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(original["allowSubjectAltNamesPassthrough"], d, config) - transformed["cel_expression"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(original["celExpression"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["expression"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(original["expression"], d, config) - transformed["title"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(original["title"], d, config) - transformed["description"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(original["description"], d, config) - transformed["location"] = - flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(original["location"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - v = make(map[string]interface{}) - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["additional_extensions"] = - flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensions(original["additionalExtensions"], d, config) - transformed["policy_ids"] = - flattenPrivatecaCertificateConfigX509ConfigPolicyIds(original["policyIds"], d, config) - transformed["aia_ocsp_servers"] = flattenPrivatecaCertificateConfigX509ConfigAiaOcspServers(original["aiaOcspServers"], d, config) - transformed["ca_options"] = - flattenPrivatecaCertificateConfigX509ConfigCaOptions(original["caOptions"], d, config) - transformed["key_usage"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsage(original["keyUsage"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolPublishingOptions(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["publish_ca_cert"] = - flattenPrivatecaCaPoolPublishingOptionsPublishCaCert(original["publishCaCert"], d, config) - transformed["publish_crl"] = - flattenPrivatecaCaPoolPublishingOptionsPublishCrl(original["publishCrl"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCaPoolPublishingOptionsPublishCaCert(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolPublishingOptionsPublishCrl(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCaPoolLabels(v interface{}, d *resource_privateca_ca_pool_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPrivatecaCaPoolTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedKeyTypes, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(original["allowed_key_types"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedAllowedKeyTypes); val.IsValid() && !isEmptyValue(val) { - transformed["allowedKeyTypes"] = transformedAllowedKeyTypes - } - - transformedMaximumLifetime, err := expandPrivatecaCaPoolIssuancePolicyMaximumLifetime(original["maximum_lifetime"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedMaximumLifetime); val.IsValid() && !isEmptyValue(val) { - transformed["maximumLifetime"] = transformedMaximumLifetime - } - - transformedAllowedIssuanceModes, err := expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(original["allowed_issuance_modes"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedAllowedIssuanceModes); val.IsValid() && !isEmptyValue(val) { - transformed["allowedIssuanceModes"] = transformedAllowedIssuanceModes - } - - transformedIdentityConstraints, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraints(original["identity_constraints"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedIdentityConstraints); val.IsValid() && !isEmptyValue(val) { - transformed["identityConstraints"] = transformedIdentityConstraints - } - - transformedBaselineValues, err := expandPrivatecaCaPoolIssuancePolicyBaselineValues(original["baseline_values"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedBaselineValues); val.IsValid() && !isEmptyValue(val) { - transformed["baselineValues"] = transformedBaselineValues - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedRsa, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(original["rsa"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedRsa); val.IsValid() && !isEmptyValue(val) { - transformed["rsa"] = transformedRsa - } - - transformedEllipticCurve, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(original["elliptic_curve"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedEllipticCurve); val.IsValid() && !isEmptyValue(val) { - transformed["ellipticCurve"] = transformedEllipticCurve - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsa(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinModulusSize, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(original["min_modulus_size"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedMinModulusSize); val.IsValid() && !isEmptyValue(val) { - transformed["minModulusSize"] = transformedMinModulusSize - } - - transformedMaxModulusSize, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(original["max_modulus_size"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedMaxModulusSize); val.IsValid() && !isEmptyValue(val) { - transformed["maxModulusSize"] = transformedMaxModulusSize - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMinModulusSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesRsaMaxModulusSize(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurve(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSignatureAlgorithm, err := expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(original["signature_algorithm"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedSignatureAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["signatureAlgorithm"] = transformedSignatureAlgorithm - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedKeyTypesEllipticCurveSignatureAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyMaximumLifetime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowCsrBasedIssuance, err := expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(original["allow_csr_based_issuance"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedAllowCsrBasedIssuance); val.IsValid() && !isEmptyValue(val) { - transformed["allowCsrBasedIssuance"] = transformedAllowCsrBasedIssuance - } - - transformedAllowConfigBasedIssuance, err := expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(original["allow_config_based_issuance"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedAllowConfigBasedIssuance); val.IsValid() && !isEmptyValue(val) { - transformed["allowConfigBasedIssuance"] = transformedAllowConfigBasedIssuance - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowCsrBasedIssuance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyAllowedIssuanceModesAllowConfigBasedIssuance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraints(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowSubjectPassthrough, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(original["allow_subject_passthrough"], d, config) - if err != nil { - return nil, err - } else { - transformed["allowSubjectPassthrough"] = transformedAllowSubjectPassthrough - } - - transformedAllowSubjectAltNamesPassthrough, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(original["allow_subject_alt_names_passthrough"], d, config) - if err != nil { - return nil, err - } else { - transformed["allowSubjectAltNamesPassthrough"] = transformedAllowSubjectAltNamesPassthrough - } - - transformedCelExpression, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(original["cel_expression"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedCelExpression); val.IsValid() && !isEmptyValue(val) { - transformed["celExpression"] = transformedCelExpression - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectPassthrough(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsAllowSubjectAltNamesPassthrough(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedExpression, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(original["expression"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedExpression); val.IsValid() && !isEmptyValue(val) { - transformed["expression"] = transformedExpression - } - - transformedTitle, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(original["title"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedTitle); val.IsValid() && !isEmptyValue(val) { - transformed["title"] = transformedTitle - } - - transformedDescription, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(original["description"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedDescription); val.IsValid() && !isEmptyValue(val) { - transformed["description"] = transformedDescription - } - - transformedLocation, err := expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionExpression(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionTitle(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyIdentityConstraintsCelExpressionLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValues(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAdditionalExtensions, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensions(original["additional_extensions"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedAdditionalExtensions); val.IsValid() && !isEmptyValue(val) { - transformed["additionalExtensions"] = transformedAdditionalExtensions - } - - transformedPolicyIds, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesPolicyIds(original["policy_ids"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedPolicyIds); val.IsValid() && !isEmptyValue(val) { - transformed["policyIds"] = transformedPolicyIds - } - - transformedAiaOcspServers, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesAiaOcspServers(original["aia_ocsp_servers"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedAiaOcspServers); val.IsValid() && !isEmptyValue(val) { - transformed["aiaOcspServers"] = transformedAiaOcspServers - } - - transformedCaOptions, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesCaOptions(original["ca_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedCaOptions); val.IsValid() && !isEmptyValue(val) { - transformed["caOptions"] = transformedCaOptions - } - - transformedKeyUsage, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsage(original["key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["keyUsage"] = transformedKeyUsage - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCritical, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsCritical(original["critical"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedCritical); val.IsValid() && !isEmptyValue(val) { - transformed["critical"] = transformedCritical - } - - transformedValue, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedObjectId, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsObjectId(original["object_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedObjectId); val.IsValid() && !isEmptyValue(val) { - transformed["objectId"] = transformedObjectId - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsCritical(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsObjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsObjectIdObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesPolicyIds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesPolicyIdsObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesPolicyIdsObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesAiaOcspServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesCaOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIsCa, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesCaOptionsIsCa(original["is_ca"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedIsCa); val.IsValid() && !isEmptyValue(val) { - transformed["isCa"] = transformedIsCa - } - - transformedMaxIssuerPathLength, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesCaOptionsMaxIssuerPathLength(original["max_issuer_path_length"], d, config) - if err != nil { - return nil, err - } else { - transformed["maxIssuerPathLength"] = transformedMaxIssuerPathLength - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesCaOptionsIsCa(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesCaOptionsMaxIssuerPathLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBaseKeyUsage, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsage(original["base_key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedBaseKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["baseKeyUsage"] = transformedBaseKeyUsage - } - - transformedExtendedKeyUsage, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsage(original["extended_key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedExtendedKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["extendedKeyUsage"] = transformedExtendedKeyUsage - } - - transformedUnknownExtendedKeyUsages, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsages(original["unknown_extended_key_usages"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedUnknownExtendedKeyUsages); val.IsValid() && !isEmptyValue(val) { - transformed["unknownExtendedKeyUsages"] = transformedUnknownExtendedKeyUsages - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDigitalSignature, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageDigitalSignature(original["digital_signature"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedDigitalSignature); val.IsValid() && !isEmptyValue(val) { - transformed["digitalSignature"] = transformedDigitalSignature - } - - transformedContentCommitment, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageContentCommitment(original["content_commitment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedContentCommitment); val.IsValid() && !isEmptyValue(val) { - transformed["contentCommitment"] = transformedContentCommitment - } - - transformedKeyEncipherment, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageKeyEncipherment(original["key_encipherment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedKeyEncipherment); val.IsValid() && !isEmptyValue(val) { - transformed["keyEncipherment"] = transformedKeyEncipherment - } - - transformedDataEncipherment, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageDataEncipherment(original["data_encipherment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedDataEncipherment); val.IsValid() && !isEmptyValue(val) { - transformed["dataEncipherment"] = transformedDataEncipherment - } - - transformedKeyAgreement, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageKeyAgreement(original["key_agreement"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedKeyAgreement); val.IsValid() && !isEmptyValue(val) { - transformed["keyAgreement"] = transformedKeyAgreement - } - - transformedCertSign, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageCertSign(original["cert_sign"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedCertSign); val.IsValid() && !isEmptyValue(val) { - transformed["certSign"] = transformedCertSign - } - - transformedCrlSign, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageCrlSign(original["crl_sign"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedCrlSign); val.IsValid() && !isEmptyValue(val) { - transformed["crlSign"] = transformedCrlSign - } - - transformedEncipherOnly, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageEncipherOnly(original["encipher_only"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedEncipherOnly); val.IsValid() && !isEmptyValue(val) { - transformed["encipherOnly"] = transformedEncipherOnly - } - - transformedDecipherOnly, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageDecipherOnly(original["decipher_only"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedDecipherOnly); val.IsValid() && !isEmptyValue(val) { - transformed["decipherOnly"] = transformedDecipherOnly - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageContentCommitment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageCertSign(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageCrlSign(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServerAuth, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageServerAuth(original["server_auth"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedServerAuth); val.IsValid() && !isEmptyValue(val) { - transformed["serverAuth"] = transformedServerAuth - } - - transformedClientAuth, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageClientAuth(original["client_auth"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedClientAuth); val.IsValid() && !isEmptyValue(val) { - transformed["clientAuth"] = transformedClientAuth - } - - transformedCodeSigning, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageCodeSigning(original["code_signing"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedCodeSigning); val.IsValid() && !isEmptyValue(val) { - transformed["codeSigning"] = transformedCodeSigning - } - - transformedEmailProtection, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageEmailProtection(original["email_protection"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedEmailProtection); val.IsValid() && !isEmptyValue(val) { - transformed["emailProtection"] = transformedEmailProtection - } - - transformedTimeStamping, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageTimeStamping(original["time_stamping"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedTimeStamping); val.IsValid() && !isEmptyValue(val) { - transformed["timeStamping"] = transformedTimeStamping - } - - transformedOcspSigning, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageOcspSigning(original["ocsp_signing"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedOcspSigning); val.IsValid() && !isEmptyValue(val) { - transformed["ocspSigning"] = transformedOcspSigning - } - - return transformed, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageServerAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageClientAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsagesObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCaPoolIssuancePolicyBaselineValuesKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolPublishingOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPublishCaCert, err := expandPrivatecaCaPoolPublishingOptionsPublishCaCert(original["publish_ca_cert"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedPublishCaCert); val.IsValid() && !isEmptyValue(val) { - transformed["publishCaCert"] = transformedPublishCaCert - } - - transformedPublishCrl, err := expandPrivatecaCaPoolPublishingOptionsPublishCrl(original["publish_crl"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_ca_pool_reflect.ValueOf(transformedPublishCrl); val.IsValid() && !isEmptyValue(val) { - transformed["publishCrl"] = transformedPublishCrl - } - - return transformed, nil -} - -func expandPrivatecaCaPoolPublishingOptionsPublishCaCert(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolPublishingOptionsPublishCrl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCaPoolLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourcePrivatecaCertificate() *resource_privateca_certificate_schema.Resource { - return &resource_privateca_certificate_schema.Resource{ - Create: resourcePrivatecaCertificateCreate, - Read: resourcePrivatecaCertificateRead, - Delete: resourcePrivatecaCertificateDelete, - - Importer: &resource_privateca_certificate_schema.ResourceImporter{ - State: resourcePrivatecaCertificateImport, - }, - - Timeouts: &resource_privateca_certificate_schema.ResourceTimeout{ - Create: resource_privateca_certificate_schema.DefaultTimeout(4 * resource_privateca_certificate_time.Minute), - Delete: resource_privateca_certificate_schema.DefaultTimeout(4 * resource_privateca_certificate_time.Minute), - }, - - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "location": { - Type: resource_privateca_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Location of the Certificate. A full list of valid locations can be found by -running 'gcloud privateca locations list'.`, - }, - "name": { - Type: resource_privateca_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name for this Certificate.`, - }, - "pool": { - Type: resource_privateca_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the CaPool this Certificate belongs to.`, - }, - "certificate_template": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareResourceNames, - Description: `The resource name for a CertificateTemplate used to issue this certificate, -in the format 'projects/*/locations/*/certificateTemplates/*'. If this is specified, -the caller must have the necessary permission to use this template. If this is -omitted, no template will be used. This template must be in the same location -as the Certificate.`, - }, - "certificate_authority": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Certificate Authority name.`, - }, - "config": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The config used to create a self-signed X.509 certificate or CSR.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "public_key": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `A PublicKey describes a public key.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "format": { - Type: resource_privateca_certificate_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_privateca_certificate_validation.StringInSlice([]string{"KEY_TYPE_UNSPECIFIED", "PEM"}, false), - Description: `The format of the public key. Currently, only PEM format is supported. Possible values: ["KEY_TYPE_UNSPECIFIED", "PEM"]`, - }, - "key": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Required. A public key. When this is specified in a request, the padding and encoding can be any of the options described by the respective 'KeyType' value. When this is generated by the service, it will always be an RFC 5280 SubjectPublicKeyInfo structure containing an algorithm identifier and a key. A base64-encoded string.`, - }, - }, - }, - }, - "subject_config": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Specifies some of the values in a certificate that are related to the subject.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "subject": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Contains distinguished name fields such as the location and organization.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "common_name": { - Type: resource_privateca_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The common name of the distinguished name.`, - }, - "organization": { - Type: resource_privateca_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization of the subject.`, - }, - "country_code": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The country code of the subject.`, - }, - "locality": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The locality or city of the subject.`, - }, - "organizational_unit": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The organizational unit of the subject.`, - }, - "postal_code": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The postal code of the subject.`, - }, - "province": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The province, territory, or regional state of the subject.`, - }, - "street_address": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The street address of the subject.`, - }, - }, - }, - }, - "subject_alt_name": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The subject alternative name fields.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "dns_names": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid, fully-qualified host names.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - "email_addresses": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid RFC 2822 E-mail addresses.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - "ip_addresses": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - "uris": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid RFC 3986 URIs.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - }, - }, - }, - }, - }, - }, - "x509_config": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes how some of the technical X.509 fields in a certificate should be populated.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "key_usage": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Indicates the intended use for keys that correspond to a certificate.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "base_key_usage": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes high-level ways in which a key may be used.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "cert_sign": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to sign certificates.`, - }, - "content_commitment": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation".`, - }, - "crl_sign": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used sign certificate revocation lists.`, - }, - "data_encipherment": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to encipher data.`, - }, - "decipher_only": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to decipher only.`, - }, - "digital_signature": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used for digital signatures.`, - }, - "encipher_only": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to encipher only.`, - }, - "key_agreement": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used in a key agreement protocol.`, - }, - "key_encipherment": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to encipher other keys.`, - }, - }, - }, - }, - "extended_key_usage": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes high-level ways in which a key may be used.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "client_auth": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS.`, - }, - "code_signing": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication".`, - }, - "email_protection": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection".`, - }, - "ocsp_signing": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses".`, - }, - "server_auth": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS.`, - }, - "time_stamping": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time".`, - }, - }, - }, - }, - "unknown_extended_key_usages": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - "additional_extensions": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "critical": { - Type: resource_privateca_certificate_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Indicates whether or not this extension is critical (i.e., if the client does not know how to -handle this extension, the client should consider this to be an error).`, - }, - "object_id": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes values that are relevant in a CA certificate.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeInt, - }, - }, - }, - }, - }, - "value": { - Type: resource_privateca_certificate_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The value of this X.509 extension. A base64-encoded string.`, - }, - }, - }, - }, - "aia_ocsp_servers": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the -"Authority Information Access" extension in the certificate.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - "ca_options": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Describes values that are relevant in a CA certificate.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "is_ca": { - Type: resource_privateca_certificate_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Refers to the "CA" X.509 extension, which is a boolean value. When this value is missing, -the extension will be omitted from the CA certificate.`, - }, - "max_issuer_path_length": { - Type: resource_privateca_certificate_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of -subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this -value is missing, the max path length will be omitted from the CA certificate.`, - }, - }, - }, - }, - "policy_ids": { - Type: resource_privateca_certificate_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_schema.TypeList, - Required: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"pem_csr", "config"}, - }, - "labels": { - Type: resource_privateca_certificate_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels with user-defined metadata to apply to this resource.`, - Elem: &resource_privateca_certificate_schema.Schema{Type: resource_privateca_certificate_schema.TypeString}, - }, - "lifetime": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The desired lifetime of the CA certificate. Used to create the "notBeforeTime" and -"notAfterTime" fields inside an X.509 certificate. A duration in seconds with up to nine -fractional digits, terminated by 's'. Example: "3.5s".`, - Default: "315360000s", - }, - "pem_csr": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Immutable. A pem-encoded X.509 certificate signing request (CSR).`, - ExactlyOneOf: []string{"pem_csr", "config"}, - }, - "certificate_description": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Output only. Details regarding the revocation of this Certificate. This Certificate is considered revoked if and only if this field is present.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "aia_issuing_certificate_urls": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes lists of issuer CA certificate URLs that appear in the "Authority Information Access" extension in the certificate.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - "authority_key_id": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Identifies the subjectKeyId of the parent certificate, per https://tools.ietf.org/html/rfc5280#section-4.2.1.1`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "key_id": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `Optional. The value of this KeyId encoded in lowercase hexadecimal. This is most likely the 160 bit SHA-1 hash of the public key.`, - }, - }, - }, - }, - "cert_fingerprint": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `The hash of the x.509 certificate.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "sha256_hash": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The SHA 256 hash, encoded in hexadecimal, of the DER x509 certificate.`, - }, - }, - }, - }, - "config_values": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes some of the technical fields in a certificate.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "key_usage": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Indicates the intended use for keys that correspond to a certificate.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "base_key_usage": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes high-level ways in which a key may be used.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "key_usage_options": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes high-level ways in which a key may be used.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "cert_sign": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used to sign certificates.`, - }, - "content_commitment": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation".`, - }, - "crl_sign": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used sign certificate revocation lists.`, - }, - "data_encipherment": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used to encipher data.`, - }, - "decipher_only": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used to decipher only.`, - }, - "digital_signature": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used for digital signatures.`, - }, - "encipher_only": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used to encipher only.`, - }, - "key_agreement": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used in a key agreement protocol.`, - }, - "key_encipherment": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `The key may be used to encipher other keys.`, - }, - }, - }, - }, - }, - }, - }, - "extended_key_usage": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes high-level ways in which a key may be used.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "client_auth": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS.`, - }, - "code_signing": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication".`, - }, - "email_protection": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection".`, - }, - "ocsp_signing": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses".`, - }, - "server_auth": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS.`, - }, - "time_stamping": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time".`, - }, - }, - }, - }, - "unknown_extended_key_usages": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "obect_id": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Required. Describes how some of the technical fields in a certificate should be populated.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "crl_distribution_points": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes a list of locations to obtain CRL information, i.e. the DistributionPoint.fullName described by https://tools.ietf.org/html/rfc5280#section-4.2.1.13`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - "public_key": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `A PublicKey describes a public key.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "format": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The format of the public key. Currently, only PEM format is supported.`, - }, - "key": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `Required. A public key. When this is specified in a request, the padding and encoding can be any of the options described by the respective 'KeyType' value. When this is generated by the service, it will always be an RFC 5280 SubjectPublicKeyInfo structure containing an algorithm identifier and a key. A base64-encoded string.`, - }, - }, - }, - }, - "subject_description": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes some of the values in a certificate that are related to the subject and lifetime.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "hex_serial_number": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The serial number encoded in lowercase hexadecimal.`, - }, - "lifetime": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `For convenience, the actual lifetime of an issued certificate. Corresponds to 'notAfterTime' - 'notBeforeTime'.`, - }, - "not_after_time": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The time at which the certificate expires.`, - }, - "not_before_time": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The time at which the certificate becomes valid.`, - }, - "subject": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Contains distinguished name fields such as the location and organization.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "common_name": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The "common name" of the distinguished name.`, - }, - "country_code": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The country code of the subject.`, - }, - "locality": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The locality or city of the subject.`, - }, - "organization": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The organization of the subject.`, - }, - "organizational_unit": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The organizationalUnit of the subject.`, - }, - "postal_code": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The postalCode or city of the subject.`, - }, - "province": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The province of the subject.`, - }, - "street_address": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The streetAddress or city of the subject.`, - }, - }, - }, - }, - "subject_alt_name": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `The subject alternative name fields.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "custom_sans": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Contains additional subject alternative name values.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "critical": { - Type: resource_privateca_certificate_schema.TypeBool, - Computed: true, - Description: `Required. Indicates whether or not this extension is critical (i.e., if the client does not know how to handle this extension, the client should consider this to be an error).`, - }, - "obect_id": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Describes how some of the technical fields in a certificate should be populated.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeInt, - }, - }, - }, - }, - }, - "value": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The value of this X.509 extension.`, - }, - }, - }, - }, - "dns_names": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Contains only valid, fully-qualified host names.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - "email_addresses": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Contains only valid RFC 2822 E-mail addresses.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - "ip_addresses": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - "uris": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Contains only valid RFC 3986 URIs.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - }, - }, - }, - }, - }, - }, - "subject_key_id": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Provides a means of identifiying certificates that contain a particular public key, per https://tools.ietf.org/html/rfc5280#section-4.2.1.2.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "key_id": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `Optional. The value of this KeyId encoded in lowercase hexadecimal. This is most likely the 160 bit SHA-1 hash of the public key.`, - }, - }, - }, - }, - }, - }, - }, - "create_time": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The time that this resource was created on the server. -This is in RFC3339 text format.`, - }, - "pem_certificate": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `Output only. The pem-encoded, signed X.509 certificate.`, - }, - "pem_certificates": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Required. Expected to be in leaf-to-root order according to RFC 5246.`, - Elem: &resource_privateca_certificate_schema.Schema{ - Type: resource_privateca_certificate_schema.TypeString, - }, - }, - "revocation_details": { - Type: resource_privateca_certificate_schema.TypeList, - Computed: true, - Description: `Output only. Details regarding the revocation of this Certificate. This Certificate is -considered revoked if and only if this field is present.`, - Elem: &resource_privateca_certificate_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_schema.Schema{ - "revocation_state": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `Indicates why a Certificate was revoked.`, - }, - "revocation_time": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `The time at which this Certificate was revoked.`, - }, - }, - }, - }, - "update_time": { - Type: resource_privateca_certificate_schema.TypeString, - Computed: true, - Description: `Output only. The time at which this CertificateAuthority was updated. -This is in RFC3339 text format.`, - }, - "project": { - Type: resource_privateca_certificate_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePrivatecaCertificateCreate(d *resource_privateca_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - lifetimeProp, err := expandPrivatecaCertificateLifetime(d.Get("lifetime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("lifetime"); !isEmptyValue(resource_privateca_certificate_reflect.ValueOf(lifetimeProp)) && (ok || !resource_privateca_certificate_reflect.DeepEqual(v, lifetimeProp)) { - obj["lifetime"] = lifetimeProp - } - certificateTemplateProp, err := expandPrivatecaCertificateCertificateTemplate(d.Get("certificate_template"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("certificate_template"); !isEmptyValue(resource_privateca_certificate_reflect.ValueOf(certificateTemplateProp)) && (ok || !resource_privateca_certificate_reflect.DeepEqual(v, certificateTemplateProp)) { - obj["certificateTemplate"] = certificateTemplateProp - } - labelsProp, err := expandPrivatecaCertificateLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_privateca_certificate_reflect.ValueOf(labelsProp)) && (ok || !resource_privateca_certificate_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - pemCsrProp, err := expandPrivatecaCertificatePemCsr(d.Get("pem_csr"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pem_csr"); !isEmptyValue(resource_privateca_certificate_reflect.ValueOf(pemCsrProp)) && (ok || !resource_privateca_certificate_reflect.DeepEqual(v, pemCsrProp)) { - obj["pemCsr"] = pemCsrProp - } - configProp, err := expandPrivatecaCertificateConfig(d.Get("config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("config"); !isEmptyValue(resource_privateca_certificate_reflect.ValueOf(configProp)) && (ok || !resource_privateca_certificate_reflect.DeepEqual(v, configProp)) { - obj["config"] = configProp - } - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates?certificateId={{name}}") - if err != nil { - return err - } - - resource_privateca_certificate_log.Printf("[DEBUG] Creating new Certificate: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_certificate_fmt.Errorf("Error fetching project for Certificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if p, ok := d.GetOk("certificate_authority"); ok { - url, err = addQueryParams(url, map[string]string{"issuingCertificateAuthorityId": p.(string)}) - if err != nil { - return err - } - } - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_privateca_certificate_schema.TimeoutCreate)) - if err != nil { - return resource_privateca_certificate_fmt.Errorf("Error creating Certificate: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") - if err != nil { - return resource_privateca_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_privateca_certificate_log.Printf("[DEBUG] Finished creating Certificate %q: %#v", d.Id(), res) - - return resourcePrivatecaCertificateRead(d, meta) -} - -func resourcePrivatecaCertificateRead(d *resource_privateca_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_certificate_fmt.Errorf("Error fetching project for Certificate: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_privateca_certificate_fmt.Sprintf("PrivatecaCertificate %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - - if err := d.Set("lifetime", flattenPrivatecaCertificateLifetime(res["lifetime"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("revocation_details", flattenPrivatecaCertificateRevocationDetails(res["revocationDetails"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("pem_certificate", flattenPrivatecaCertificatePemCertificate(res["pemCertificate"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("certificate_description", flattenPrivatecaCertificateCertificateDescription(res["certificateDescription"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("pem_certificates", flattenPrivatecaCertificatePemCertificates(res["pemCertificates"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("create_time", flattenPrivatecaCertificateCreateTime(res["createTime"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("update_time", flattenPrivatecaCertificateUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("certificate_template", flattenPrivatecaCertificateCertificateTemplate(res["certificateTemplate"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("labels", flattenPrivatecaCertificateLabels(res["labels"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("pem_csr", flattenPrivatecaCertificatePemCsr(res["pemCsr"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - if err := d.Set("config", flattenPrivatecaCertificateConfig(res["config"], d, config)); err != nil { - return resource_privateca_certificate_fmt.Errorf("Error reading Certificate: %s", err) - } - - return nil -} - -func resourcePrivatecaCertificateDelete(d *resource_privateca_certificate_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_certificate_fmt.Errorf("Error fetching project for Certificate: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}:revoke") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_privateca_certificate_log.Printf("[DEBUG] Deleting Certificate %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_privateca_certificate_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Certificate") - } - - resource_privateca_certificate_log.Printf("[DEBUG] Finished deleting Certificate %q: %#v", d.Id(), res) - return nil -} - -func resourcePrivatecaCertificateImport(d *resource_privateca_certificate_schema.ResourceData, meta interface{}) ([]*resource_privateca_certificate_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)/certificates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificates/{{name}}") - if err != nil { - return nil, resource_privateca_certificate_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_privateca_certificate_schema.ResourceData{d}, nil -} - -func flattenPrivatecaCertificateLifetime(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateRevocationDetails(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["revocation_state"] = - flattenPrivatecaCertificateRevocationDetailsRevocationState(original["revocationState"], d, config) - transformed["revocation_time"] = - flattenPrivatecaCertificateRevocationDetailsRevocationTime(original["revocationTime"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateRevocationDetailsRevocationState(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateRevocationDetailsRevocationTime(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificatePemCertificate(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescription(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subject_description"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescription(original["subjectDescription"], d, config) - transformed["config_values"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValues(original["configValues"], d, config) - transformed["public_key"] = - flattenPrivatecaCertificateCertificateDescriptionPublicKey(original["publicKey"], d, config) - transformed["subject_key_id"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectKeyId(original["subjectKeyId"], d, config) - transformed["authority_key_id"] = - flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyId(original["authorityKeyId"], d, config) - transformed["crl_distribution_points"] = - flattenPrivatecaCertificateCertificateDescriptionCrlDistributionPoints(original["crlDistributionPoints"], d, config) - transformed["aia_issuing_certificate_urls"] = - flattenPrivatecaCertificateCertificateDescriptionAiaIssuingCertificateUrls(original["aiaIssuingCertificateUrls"], d, config) - transformed["cert_fingerprint"] = - flattenPrivatecaCertificateCertificateDescriptionCertFingerprint(original["certFingerprint"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescription(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subject"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubject(original["subject"], d, config) - transformed["subject_alt_name"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltName(original["subjectAltName"], d, config) - transformed["hex_serial_number"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionHexSerialNumber(original["hexSerialNumber"], d, config) - transformed["lifetime"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionLifetime(original["lifetime"], d, config) - transformed["not_before_time"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotBeforeTime(original["notBeforeTime"], d, config) - transformed["not_after_time"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotAfterTime(original["notAfterTime"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubject(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["country_code"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCountryCode(original["countryCode"], d, config) - transformed["organization"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganization(original["organization"], d, config) - transformed["organizational_unit"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganizationalUnit(original["organizationalUnit"], d, config) - transformed["locality"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectLocality(original["locality"], d, config) - transformed["province"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectProvince(original["province"], d, config) - transformed["street_address"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectStreetAddress(original["streetAddress"], d, config) - transformed["postal_code"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectPostalCode(original["postalCode"], d, config) - transformed["common_name"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCommonName(original["commonName"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCountryCode(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganization(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectOrganizationalUnit(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectLocality(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectProvince(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectStreetAddress(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectPostalCode(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectCommonName(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltName(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dns_names"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameDnsNames(original["dnsNames"], d, config) - transformed["uris"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameUris(original["uris"], d, config) - transformed["email_addresses"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameEmailAddresses(original["emailAddresses"], d, config) - transformed["ip_addresses"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameIpAddresses(original["ipAddresses"], d, config) - transformed["custom_sans"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSans(original["customSans"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameDnsNames(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameUris(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameEmailAddresses(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameIpAddresses(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSans(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "obect_id": flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectId(original["obectId"], d, config), - "critical": flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansCritical(original["critical"], d, config), - "value": flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansValue(original["value"], d, config), - }) - } - return transformed -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectId(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["object_id_path"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectIdObjectIdPath(original["objectIdPath"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansObectIdObjectIdPath(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansCritical(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionSubjectAltNameCustomSansValue(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionHexSerialNumber(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionLifetime(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotBeforeTime(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectDescriptionNotAfterTime(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValues(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key_usage"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsage(original["keyUsage"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsage(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["base_key_usage"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsage(original["baseKeyUsage"], d, config) - transformed["extended_key_usage"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsage(original["extendedKeyUsage"], d, config) - transformed["unknown_extended_key_usages"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsages(original["unknownExtendedKeyUsages"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsage(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key_usage_options"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptions(original["keyUsageOptions"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptions(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["digital_signature"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDigitalSignature(original["digitalSignature"], d, config) - transformed["content_commitment"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsContentCommitment(original["contentCommitment"], d, config) - transformed["key_encipherment"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyEncipherment(original["keyEncipherment"], d, config) - transformed["data_encipherment"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDataEncipherment(original["dataEncipherment"], d, config) - transformed["key_agreement"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyAgreement(original["keyAgreement"], d, config) - transformed["cert_sign"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCertSign(original["certSign"], d, config) - transformed["crl_sign"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCrlSign(original["crlSign"], d, config) - transformed["encipher_only"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsEncipherOnly(original["encipherOnly"], d, config) - transformed["decipher_only"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDecipherOnly(original["decipherOnly"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDigitalSignature(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsContentCommitment(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyEncipherment(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDataEncipherment(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsKeyAgreement(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCertSign(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsCrlSign(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsEncipherOnly(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageBaseKeyUsageKeyUsageOptionsDecipherOnly(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsage(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["server_auth"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageServerAuth(original["serverAuth"], d, config) - transformed["client_auth"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageClientAuth(original["clientAuth"], d, config) - transformed["code_signing"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageCodeSigning(original["codeSigning"], d, config) - transformed["email_protection"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageEmailProtection(original["emailProtection"], d, config) - transformed["time_stamping"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageTimeStamping(original["timeStamping"], d, config) - transformed["ocsp_signing"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageOcspSigning(original["ocspSigning"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageServerAuth(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageClientAuth(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsages(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "obect_id": flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectId(original["obectId"], d, config), - }) - } - return transformed -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectId(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["object_id_path"] = - flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectIdObjectIdPath(original["objectIdPath"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionConfigValuesKeyUsageUnknownExtendedKeyUsagesObectIdObjectIdPath(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionPublicKey(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenPrivatecaCertificateCertificateDescriptionPublicKeyKey(original["key"], d, config) - transformed["format"] = - flattenPrivatecaCertificateCertificateDescriptionPublicKeyFormat(original["format"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionPublicKeyKey(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionPublicKeyFormat(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectKeyId(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key_id"] = - flattenPrivatecaCertificateCertificateDescriptionSubjectKeyIdKeyId(original["keyId"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionSubjectKeyIdKeyId(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyId(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key_id"] = - flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyIdKeyId(original["keyId"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionAuthorityKeyIdKeyId(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionCrlDistributionPoints(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionAiaIssuingCertificateUrls(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateDescriptionCertFingerprint(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["sha256_hash"] = - flattenPrivatecaCertificateCertificateDescriptionCertFingerprintSha256Hash(original["sha256Hash"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateCertificateDescriptionCertFingerprintSha256Hash(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificatePemCertificates(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCreateTime(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateUpdateTime(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateCertificateTemplate(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateLabels(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificatePemCsr(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfig(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["x509_config"] = - flattenPrivatecaCertificateConfigX509Config(original["x509Config"], d, config) - transformed["subject_config"] = - flattenPrivatecaCertificateConfigSubjectConfig(original["subjectConfig"], d, config) - transformed["public_key"] = - flattenPrivatecaCertificateConfigPublicKey(original["publicKey"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigX509Config(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - v = make(map[string]interface{}) - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["additional_extensions"] = - flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensions(original["additionalExtensions"], d, config) - transformed["policy_ids"] = - flattenPrivatecaCertificateConfigX509ConfigPolicyIds(original["policyIds"], d, config) - transformed["aia_ocsp_servers"] = flattenPrivatecaCertificateConfigX509ConfigAiaOcspServers(original["aiaOcspServers"], d, config) - transformed["ca_options"] = - flattenPrivatecaCertificateConfigX509ConfigCaOptions(original["caOptions"], d, config) - transformed["key_usage"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsage(original["keyUsage"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigSubjectConfig(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subject"] = - flattenPrivatecaCertificateConfigSubjectConfigSubject(original["subject"], d, config) - transformed["subject_alt_name"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectAltName(original["subjectAltName"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["country_code"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(original["countryCode"], d, config) - transformed["organization"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganization(original["organization"], d, config) - transformed["organizational_unit"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(original["organizationalUnit"], d, config) - transformed["locality"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectLocality(original["locality"], d, config) - transformed["province"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectProvince(original["province"], d, config) - transformed["street_address"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(original["streetAddress"], d, config) - transformed["postal_code"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(original["postalCode"], d, config) - transformed["common_name"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectCommonName(original["commonName"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganization(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectLocality(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectProvince(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectCommonName(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dns_names"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(original["dnsNames"], d, config) - transformed["uris"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(original["uris"], d, config) - transformed["email_addresses"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(original["emailAddresses"], d, config) - transformed["ip_addresses"] = - flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(original["ipAddresses"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigPublicKey(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["key"] = - flattenPrivatecaCertificateConfigPublicKeyKey(original["key"], d, config) - transformed["format"] = - flattenPrivatecaCertificateConfigPublicKeyFormat(original["format"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateConfigPublicKeyKey(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateConfigPublicKeyFormat(v interface{}, d *resource_privateca_certificate_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPrivatecaCertificateLifetime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateCertificateTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandPrivatecaCertificatePemCsr(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Config, err := expandPrivatecaCertificateConfigX509Config(original["x509_config"], d, config) - if err != nil { - return nil, err - } else { - transformed["x509Config"] = transformedX509Config - } - - transformedSubjectConfig, err := expandPrivatecaCertificateConfigSubjectConfig(original["subject_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedSubjectConfig); val.IsValid() && !isEmptyValue(val) { - transformed["subjectConfig"] = transformedSubjectConfig - } - - transformedPublicKey, err := expandPrivatecaCertificateConfigPublicKey(original["public_key"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedPublicKey); val.IsValid() && !isEmptyValue(val) { - transformed["publicKey"] = transformedPublicKey - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigX509Config(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAdditionalExtensions, err := expandPrivatecaCertificateConfigX509ConfigAdditionalExtensions(original["additional_extensions"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedAdditionalExtensions); val.IsValid() && !isEmptyValue(val) { - transformed["additionalExtensions"] = transformedAdditionalExtensions - } - - transformedPolicyIds, err := expandPrivatecaCertificateConfigX509ConfigPolicyIds(original["policy_ids"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedPolicyIds); val.IsValid() && !isEmptyValue(val) { - transformed["policyIds"] = transformedPolicyIds - } - - transformedAiaOcspServers, err := expandPrivatecaCertificateConfigX509ConfigAiaOcspServers(original["aia_ocsp_servers"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedAiaOcspServers); val.IsValid() && !isEmptyValue(val) { - transformed["aiaOcspServers"] = transformedAiaOcspServers - } - - transformedCaOptions, err := expandPrivatecaCertificateConfigX509ConfigCaOptions(original["ca_options"], d, config) - if err != nil { - return nil, err - } else { - transformed["caOptions"] = transformedCaOptions - } - - transformedKeyUsage, err := expandPrivatecaCertificateConfigX509ConfigKeyUsage(original["key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["keyUsage"] = transformedKeyUsage - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCritical, err := expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsCritical(original["critical"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedCritical); val.IsValid() && !isEmptyValue(val) { - transformed["critical"] = transformedCritical - } - - transformedValue, err := expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedObjectId, err := expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(original["object_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedObjectId); val.IsValid() && !isEmptyValue(val) { - transformed["objectId"] = transformedObjectId - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsCritical(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigPolicyIds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCertificateConfigX509ConfigPolicyIdsObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCertificateConfigX509ConfigPolicyIdsObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigAiaOcspServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigCaOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIsCa, err := expandPrivatecaCertificateConfigX509ConfigCaOptionsIsCa(original["is_ca"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedIsCa); val.IsValid() && !isEmptyValue(val) { - transformed["isCa"] = transformedIsCa - } - - transformedMaxIssuerPathLength, err := expandPrivatecaCertificateConfigX509ConfigCaOptionsMaxIssuerPathLength(original["max_issuer_path_length"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedMaxIssuerPathLength); val.IsValid() && !isEmptyValue(val) { - transformed["maxIssuerPathLength"] = transformedMaxIssuerPathLength - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigX509ConfigCaOptionsIsCa(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigCaOptionsMaxIssuerPathLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBaseKeyUsage, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(original["base_key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedBaseKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["baseKeyUsage"] = transformedBaseKeyUsage - } - - transformedExtendedKeyUsage, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(original["extended_key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedExtendedKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["extendedKeyUsage"] = transformedExtendedKeyUsage - } - - transformedUnknownExtendedKeyUsages, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(original["unknown_extended_key_usages"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedUnknownExtendedKeyUsages); val.IsValid() && !isEmptyValue(val) { - transformed["unknownExtendedKeyUsages"] = transformedUnknownExtendedKeyUsages - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDigitalSignature, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(original["digital_signature"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedDigitalSignature); val.IsValid() && !isEmptyValue(val) { - transformed["digitalSignature"] = transformedDigitalSignature - } - - transformedContentCommitment, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(original["content_commitment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedContentCommitment); val.IsValid() && !isEmptyValue(val) { - transformed["contentCommitment"] = transformedContentCommitment - } - - transformedKeyEncipherment, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(original["key_encipherment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedKeyEncipherment); val.IsValid() && !isEmptyValue(val) { - transformed["keyEncipherment"] = transformedKeyEncipherment - } - - transformedDataEncipherment, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(original["data_encipherment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedDataEncipherment); val.IsValid() && !isEmptyValue(val) { - transformed["dataEncipherment"] = transformedDataEncipherment - } - - transformedKeyAgreement, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(original["key_agreement"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedKeyAgreement); val.IsValid() && !isEmptyValue(val) { - transformed["keyAgreement"] = transformedKeyAgreement - } - - transformedCertSign, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCertSign(original["cert_sign"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedCertSign); val.IsValid() && !isEmptyValue(val) { - transformed["certSign"] = transformedCertSign - } - - transformedCrlSign, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(original["crl_sign"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedCrlSign); val.IsValid() && !isEmptyValue(val) { - transformed["crlSign"] = transformedCrlSign - } - - transformedEncipherOnly, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(original["encipher_only"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedEncipherOnly); val.IsValid() && !isEmptyValue(val) { - transformed["encipherOnly"] = transformedEncipherOnly - } - - transformedDecipherOnly, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(original["decipher_only"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedDecipherOnly); val.IsValid() && !isEmptyValue(val) { - transformed["decipherOnly"] = transformedDecipherOnly - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCertSign(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServerAuth, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(original["server_auth"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedServerAuth); val.IsValid() && !isEmptyValue(val) { - transformed["serverAuth"] = transformedServerAuth - } - - transformedClientAuth, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(original["client_auth"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedClientAuth); val.IsValid() && !isEmptyValue(val) { - transformed["clientAuth"] = transformedClientAuth - } - - transformedCodeSigning, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(original["code_signing"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedCodeSigning); val.IsValid() && !isEmptyValue(val) { - transformed["codeSigning"] = transformedCodeSigning - } - - transformedEmailProtection, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(original["email_protection"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedEmailProtection); val.IsValid() && !isEmptyValue(val) { - transformed["emailProtection"] = transformedEmailProtection - } - - transformedTimeStamping, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(original["time_stamping"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedTimeStamping); val.IsValid() && !isEmptyValue(val) { - transformed["timeStamping"] = transformedTimeStamping - } - - transformedOcspSigning, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(original["ocsp_signing"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedOcspSigning); val.IsValid() && !isEmptyValue(val) { - transformed["ocspSigning"] = transformedOcspSigning - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCertificateConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubject, err := expandPrivatecaCertificateConfigSubjectConfigSubject(original["subject"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedSubject); val.IsValid() && !isEmptyValue(val) { - transformed["subject"] = transformedSubject - } - - transformedSubjectAltName, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltName(original["subject_alt_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedSubjectAltName); val.IsValid() && !isEmptyValue(val) { - transformed["subjectAltName"] = transformedSubjectAltName - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCountryCode, err := expandPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(original["country_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedCountryCode); val.IsValid() && !isEmptyValue(val) { - transformed["countryCode"] = transformedCountryCode - } - - transformedOrganization, err := expandPrivatecaCertificateConfigSubjectConfigSubjectOrganization(original["organization"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedOrganization); val.IsValid() && !isEmptyValue(val) { - transformed["organization"] = transformedOrganization - } - - transformedOrganizationalUnit, err := expandPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(original["organizational_unit"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedOrganizationalUnit); val.IsValid() && !isEmptyValue(val) { - transformed["organizationalUnit"] = transformedOrganizationalUnit - } - - transformedLocality, err := expandPrivatecaCertificateConfigSubjectConfigSubjectLocality(original["locality"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedLocality); val.IsValid() && !isEmptyValue(val) { - transformed["locality"] = transformedLocality - } - - transformedProvince, err := expandPrivatecaCertificateConfigSubjectConfigSubjectProvince(original["province"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedProvince); val.IsValid() && !isEmptyValue(val) { - transformed["province"] = transformedProvince - } - - transformedStreetAddress, err := expandPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(original["street_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedStreetAddress); val.IsValid() && !isEmptyValue(val) { - transformed["streetAddress"] = transformedStreetAddress - } - - transformedPostalCode, err := expandPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(original["postal_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedPostalCode); val.IsValid() && !isEmptyValue(val) { - transformed["postalCode"] = transformedPostalCode - } - - transformedCommonName, err := expandPrivatecaCertificateConfigSubjectConfigSubjectCommonName(original["common_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedCommonName); val.IsValid() && !isEmptyValue(val) { - transformed["commonName"] = transformedCommonName - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectCountryCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectOrganization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectLocality(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectProvince(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectStreetAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectPostalCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectCommonName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDnsNames, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(original["dns_names"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedDnsNames); val.IsValid() && !isEmptyValue(val) { - transformed["dnsNames"] = transformedDnsNames - } - - transformedUris, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(original["uris"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedUris); val.IsValid() && !isEmptyValue(val) { - transformed["uris"] = transformedUris - } - - transformedEmailAddresses, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(original["email_addresses"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedEmailAddresses); val.IsValid() && !isEmptyValue(val) { - transformed["emailAddresses"] = transformedEmailAddresses - } - - transformedIpAddresses, err := expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(original["ip_addresses"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedIpAddresses); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddresses"] = transformedIpAddresses - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigPublicKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKey, err := expandPrivatecaCertificateConfigPublicKeyKey(original["key"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedKey); val.IsValid() && !isEmptyValue(val) { - transformed["key"] = transformedKey - } - - transformedFormat, err := expandPrivatecaCertificateConfigPublicKeyFormat(original["format"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_reflect.ValueOf(transformedFormat); val.IsValid() && !isEmptyValue(val) { - transformed["format"] = transformedFormat - } - - return transformed, nil -} - -func expandPrivatecaCertificateConfigPublicKeyKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateConfigPublicKeyFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePrivatecaCertificateAuthority() *resource_privateca_certificate_authority_schema.Resource { - return &resource_privateca_certificate_authority_schema.Resource{ - Create: resourcePrivatecaCertificateAuthorityCreate, - Read: resourcePrivatecaCertificateAuthorityRead, - Delete: resourcePrivatecaCertificateAuthorityDelete, - - Importer: &resource_privateca_certificate_authority_schema.ResourceImporter{ - State: resourcePrivatecaCertificateAuthorityImport, - }, - - Timeouts: &resource_privateca_certificate_authority_schema.ResourceTimeout{ - Create: resource_privateca_certificate_authority_schema.DefaultTimeout(4 * resource_privateca_certificate_authority_time.Minute), - Delete: resource_privateca_certificate_authority_schema.DefaultTimeout(4 * resource_privateca_certificate_authority_time.Minute), - }, - - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "certificate_authority_id": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The user provided Resource ID for this Certificate Authority.`, - }, - "config": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The config used to create a self-signed X.509 certificate or CSR.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "subject_config": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Specifies some of the values in a certificate that are related to the subject.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "subject": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Contains distinguished name fields such as the location and organization.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "common_name": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The common name of the distinguished name.`, - }, - "organization": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization of the subject.`, - }, - "country_code": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The country code of the subject.`, - }, - "locality": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The locality or city of the subject.`, - }, - "organizational_unit": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The organizational unit of the subject.`, - }, - "postal_code": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The postal code of the subject.`, - }, - "province": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The province, territory, or regional state of the subject.`, - }, - "street_address": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The street address of the subject.`, - }, - }, - }, - }, - "subject_alt_name": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The subject alternative name fields.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "dns_names": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid, fully-qualified host names.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - "email_addresses": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid RFC 2822 E-mail addresses.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - "ip_addresses": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid 32-bit IPv4 addresses or RFC 4291 IPv6 addresses.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - "uris": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Contains only valid RFC 3986 URIs.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeString, - }, - AtLeastOneOf: []string{"config.0.subject_config.0.subject_alt_name.0.dns_names", "config.0.subject_config.0.subject_alt_name.0.uris", "config.0.subject_config.0.subject_alt_name.0.email_addresses", "config.0.subject_config.0.subject_alt_name.0.ip_addresses"}, - }, - }, - }, - }, - }, - }, - }, - "x509_config": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes how some of the technical X.509 fields in a certificate should be populated.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "ca_options": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes values that are relevant in a CA certificate.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "is_ca": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Refers to the "CA" X.509 extension, which is a boolean value. When this value is missing, -the extension will be omitted from the CA certificate.`, - }, - "max_issuer_path_length": { - Type: resource_privateca_certificate_authority_schema.TypeInt, - Optional: true, - ForceNew: true, - Description: `Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of -subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this -value is missing, the max path length will be omitted from the CA certificate.`, - }, - }, - }, - }, - "key_usage": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Indicates the intended use for keys that correspond to a certificate.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "base_key_usage": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes high-level ways in which a key may be used.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "cert_sign": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to sign certificates.`, - }, - "content_commitment": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used for cryptographic commitments. Note that this may also be referred to as "non-repudiation".`, - }, - "crl_sign": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used sign certificate revocation lists.`, - }, - "data_encipherment": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to encipher data.`, - }, - "decipher_only": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to decipher only.`, - }, - "digital_signature": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used for digital signatures.`, - }, - "encipher_only": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to encipher only.`, - }, - "key_agreement": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used in a key agreement protocol.`, - }, - "key_encipherment": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The key may be used to encipher other keys.`, - }, - }, - }, - }, - "extended_key_usage": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes high-level ways in which a key may be used.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "client_auth": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as "TLS WWW client authentication", though regularly used for non-WWW TLS.`, - }, - "code_signing": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as "Signing of downloadable executable code client authentication".`, - }, - "email_protection": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as "Email protection".`, - }, - "ocsp_signing": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as "Signing OCSP responses".`, - }, - "server_auth": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as "TLS WWW server authentication", though regularly used for non-WWW TLS.`, - }, - "time_stamping": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as "Binding the hash of an object to a time".`, - }, - }, - }, - }, - "unknown_extended_key_usages": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - "additional_extensions": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs.`, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "critical": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Indicates whether or not this extension is critical (i.e., if the client does not know how to -handle this extension, the client should consider this to be an error).`, - }, - "object_id": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Describes values that are relevant in a CA certificate.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeInt, - }, - }, - }, - }, - }, - "value": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The value of this X.509 extension. A base64-encoded string.`, - }, - }, - }, - }, - "aia_ocsp_servers": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the -"Authority Information Access" extension in the certificate.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeString, - }, - }, - "policy_ids": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.`, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `An ObjectId specifies an object identifier (OID). These provide context and describe types in ASN.1 messages.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeInt, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "key_spec": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Required: true, - ForceNew: true, - Description: `Used when issuing certificates for this CertificateAuthority. If this CertificateAuthority -is a self-signed CertificateAuthority, this key is also used to sign the self-signed CA -certificate. Otherwise, it is used to sign a CSR.`, - MaxItems: 1, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "algorithm": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_privateca_certificate_authority_validation.StringInSlice([]string{"SIGN_HASH_ALGORITHM_UNSPECIFIED", "RSA_PSS_2048_SHA256", "RSA_PSS_3072_SHA256", "RSA_PSS_4096_SHA256", "RSA_PKCS1_2048_SHA256", "RSA_PKCS1_3072_SHA256", "RSA_PKCS1_4096_SHA256", "EC_P256_SHA256", "EC_P384_SHA384", ""}, false), - Description: `The algorithm to use for creating a managed Cloud KMS key for a for a simplified -experience. All managed keys will be have their ProtectionLevel as HSM. Possible values: ["SIGN_HASH_ALGORITHM_UNSPECIFIED", "RSA_PSS_2048_SHA256", "RSA_PSS_3072_SHA256", "RSA_PSS_4096_SHA256", "RSA_PKCS1_2048_SHA256", "RSA_PKCS1_3072_SHA256", "RSA_PKCS1_4096_SHA256", "EC_P256_SHA256", "EC_P384_SHA384"]`, - ExactlyOneOf: []string{"key_spec.0.cloud_kms_key_version", "key_spec.0.algorithm"}, - }, - "cloud_kms_key_version": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The resource name for an existing Cloud KMS CryptoKeyVersion in the format -'projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*'.`, - ExactlyOneOf: []string{"key_spec.0.cloud_kms_key_version", "key_spec.0.algorithm"}, - }, - }, - }, - }, - "location": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Location of the CertificateAuthority. A full list of valid locations can be found by -running 'gcloud privateca locations list'.`, - }, - "pool": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the CaPool this Certificate Authority belongs to.`, - }, - "gcs_bucket": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The name of a Cloud Storage bucket where this CertificateAuthority will publish content, -such as the CA certificate and CRLs. This must be a bucket name, without any prefixes -(such as 'gs://') or suffixes (such as '.googleapis.com'). For example, to use a bucket named -my-bucket, you would simply specify 'my-bucket'. If not specified, a managed bucket will be -created.`, - }, - "ignore_active_certificates_on_deletion": { - Type: resource_privateca_certificate_authority_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `This field allows the CA to be deleted even if the CA has active certs. Active certs include both unrevoked and unexpired certs. -Use with care. Defaults to 'false'.`, - Default: false, - }, - "labels": { - Type: resource_privateca_certificate_authority_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Labels with user-defined metadata. - -An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": -"1.3kg", "count": "3" }.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{Type: resource_privateca_certificate_authority_schema.TypeString}, - }, - "lifetime": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The desired lifetime of the CA certificate. Used to create the "notBeforeTime" and -"notAfterTime" fields inside an X.509 certificate. A duration in seconds with up to nine -fractional digits, terminated by 's'. Example: "3.5s".`, - Default: "315360000s", - }, - "type": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_privateca_certificate_authority_validation.StringInSlice([]string{"SELF_SIGNED", "SUBORDINATE", ""}, false), - Description: `The Type of this CertificateAuthority. - -~> **Note:** For 'SUBORDINATE' Certificate Authorities, they need to -be manually activated (via Cloud Console of 'gcloud') before they can -issue certificates. Default value: "SELF_SIGNED" Possible values: ["SELF_SIGNED", "SUBORDINATE"]`, - Default: "SELF_SIGNED", - }, - "access_urls": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Computed: true, - Description: `URLs for accessing content published by this CA, such as the CA certificate and CRLs.`, - Elem: &resource_privateca_certificate_authority_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_authority_schema.Schema{ - "ca_certificate_access_url": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Computed: true, - Description: `The URL where this CertificateAuthority's CA certificate is published. This will only be -set for CAs that have been activated.`, - }, - "crl_access_url": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Computed: true, - Description: `The URL where this CertificateAuthority's CRLs are published. This will only be set for -CAs that have been activated.`, - }, - }, - }, - }, - "create_time": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Computed: true, - Description: `The time at which this CertificateAuthority was created. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine -fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Computed: true, - Description: `The resource name for this CertificateAuthority in the format -projects/*/locations/*/certificateAuthorities/*.`, - }, - "pem_ca_certificates": { - Type: resource_privateca_certificate_authority_schema.TypeList, - Computed: true, - Description: `This CertificateAuthority's certificate chain, including the current -CertificateAuthority's certificate. Ordered such that the root issuer is the final -element (consistent with RFC 5246). For a self-signed CA, this will only list the current -CertificateAuthority's certificate.`, - Elem: &resource_privateca_certificate_authority_schema.Schema{ - Type: resource_privateca_certificate_authority_schema.TypeString, - }, - }, - "state": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Computed: true, - Description: `The State for this CertificateAuthority.`, - }, - "update_time": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Computed: true, - Description: `The time at which this CertificateAuthority was updated. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine -fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "project": { - Type: resource_privateca_certificate_authority_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePrivatecaCertificateAuthorityCreate(d *resource_privateca_certificate_authority_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - typeProp, err := expandPrivatecaCertificateAuthorityType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_privateca_certificate_authority_reflect.ValueOf(typeProp)) && (ok || !resource_privateca_certificate_authority_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - configProp, err := expandPrivatecaCertificateAuthorityConfig(d.Get("config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("config"); !isEmptyValue(resource_privateca_certificate_authority_reflect.ValueOf(configProp)) && (ok || !resource_privateca_certificate_authority_reflect.DeepEqual(v, configProp)) { - obj["config"] = configProp - } - lifetimeProp, err := expandPrivatecaCertificateAuthorityLifetime(d.Get("lifetime"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("lifetime"); !isEmptyValue(resource_privateca_certificate_authority_reflect.ValueOf(lifetimeProp)) && (ok || !resource_privateca_certificate_authority_reflect.DeepEqual(v, lifetimeProp)) { - obj["lifetime"] = lifetimeProp - } - keySpecProp, err := expandPrivatecaCertificateAuthorityKeySpec(d.Get("key_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("key_spec"); !isEmptyValue(resource_privateca_certificate_authority_reflect.ValueOf(keySpecProp)) && (ok || !resource_privateca_certificate_authority_reflect.DeepEqual(v, keySpecProp)) { - obj["keySpec"] = keySpecProp - } - gcsBucketProp, err := expandPrivatecaCertificateAuthorityGcsBucket(d.Get("gcs_bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("gcs_bucket"); !isEmptyValue(resource_privateca_certificate_authority_reflect.ValueOf(gcsBucketProp)) && (ok || !resource_privateca_certificate_authority_reflect.DeepEqual(v, gcsBucketProp)) { - obj["gcsBucket"] = gcsBucketProp - } - labelsProp, err := expandPrivatecaCertificateAuthorityLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_privateca_certificate_authority_reflect.ValueOf(labelsProp)) && (ok || !resource_privateca_certificate_authority_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities?certificateAuthorityId={{certificate_authority_id}}") - if err != nil { - return err - } - - resource_privateca_certificate_authority_log.Printf("[DEBUG] Creating new CertificateAuthority: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_privateca_certificate_authority_schema.TimeoutCreate)) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error creating CertificateAuthority: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = privatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating CertificateAuthority", userAgent, - d.Timeout(resource_privateca_certificate_authority_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_privateca_certificate_authority_fmt.Errorf("Error waiting to create CertificateAuthority: %s", err) - } - - opRes, err = resourcePrivatecaCertificateAuthorityDecoder(d, meta, opRes) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenPrivatecaCertificateAuthorityName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if d.Get("type").(string) != "SUBORDINATE" { - url, err = replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:enable") - if err != nil { - return err - } - - resource_privateca_certificate_authority_log.Printf("[DEBUG] Enabling CertificateAuthority: %#v", obj) - - res, err = sendRequest(config, "POST", billingProject, url, userAgent, nil) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error enabling CertificateAuthority: %s", err) - } - - err = privatecaOperationWaitTimeWithResponse( - config, res, &opRes, project, "Enabling CertificateAuthority", userAgent, - d.Timeout(resource_privateca_certificate_authority_schema.TimeoutCreate)) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error waiting to enable CertificateAuthority: %s", err) - } - } - - resource_privateca_certificate_authority_log.Printf("[DEBUG] Finished creating CertificateAuthority %q: %#v", d.Id(), res) - - return resourcePrivatecaCertificateAuthorityRead(d, meta) -} - -func resourcePrivatecaCertificateAuthorityRead(d *resource_privateca_certificate_authority_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_privateca_certificate_authority_fmt.Sprintf("PrivatecaCertificateAuthority %q", d.Id())) - } - - res, err = resourcePrivatecaCertificateAuthorityDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_privateca_certificate_authority_log.Printf("[DEBUG] Removing PrivatecaCertificateAuthority because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - - if err := d.Set("name", flattenPrivatecaCertificateAuthorityName(res["name"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("type", flattenPrivatecaCertificateAuthorityType(res["type"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("config", flattenPrivatecaCertificateAuthorityConfig(res["config"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("lifetime", flattenPrivatecaCertificateAuthorityLifetime(res["lifetime"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("key_spec", flattenPrivatecaCertificateAuthorityKeySpec(res["keySpec"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("state", flattenPrivatecaCertificateAuthorityState(res["state"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("pem_ca_certificates", flattenPrivatecaCertificateAuthorityPemCaCertificates(res["pemCaCertificates"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("gcs_bucket", flattenPrivatecaCertificateAuthorityGcsBucket(res["gcsBucket"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("access_urls", flattenPrivatecaCertificateAuthorityAccessUrls(res["accessUrls"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("create_time", flattenPrivatecaCertificateAuthorityCreateTime(res["createTime"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("update_time", flattenPrivatecaCertificateAuthorityUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - if err := d.Set("labels", flattenPrivatecaCertificateAuthorityLabels(res["labels"], d, config)); err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error reading CertificateAuthority: %s", err) - } - - return nil -} - -func resourcePrivatecaCertificateAuthorityDelete(d *resource_privateca_certificate_authority_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error fetching project for CertificateAuthority: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}?ignoreActiveCertificates={{ignore_active_certificates_on_deletion}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if d.Get("state").(string) == "ENABLED" { - disableUrl, err := replaceVars(d, config, "{{PrivatecaBasePath}}projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}:disable") - if err != nil { - return err - } - - resource_privateca_certificate_authority_log.Printf("[DEBUG] Disabling CertificateAuthority: %#v", obj) - - dRes, err := sendRequest(config, "POST", billingProject, disableUrl, userAgent, nil) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error disabling CertificateAuthority: %s", err) - } - - var opRes map[string]interface{} - err = privatecaOperationWaitTimeWithResponse( - config, dRes, &opRes, project, "Disabling CertificateAuthority", userAgent, - d.Timeout(resource_privateca_certificate_authority_schema.TimeoutDelete)) - if err != nil { - return resource_privateca_certificate_authority_fmt.Errorf("Error waiting to disable CertificateAuthority: %s", err) - } - } - resource_privateca_certificate_authority_log.Printf("[DEBUG] Deleting CertificateAuthority %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_privateca_certificate_authority_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "CertificateAuthority") - } - - err = privatecaOperationWaitTime( - config, res, project, "Deleting CertificateAuthority", userAgent, - d.Timeout(resource_privateca_certificate_authority_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_privateca_certificate_authority_log.Printf("[DEBUG] Finished deleting CertificateAuthority %q: %#v", d.Id(), res) - return nil -} - -func resourcePrivatecaCertificateAuthorityImport(d *resource_privateca_certificate_authority_schema.ResourceData, meta interface{}) ([]*resource_privateca_certificate_authority_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/caPools/(?P[^/]+)/certificateAuthorities/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/caPools/{{pool}}/certificateAuthorities/{{certificate_authority_id}}") - if err != nil { - return nil, resource_privateca_certificate_authority_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("ignore_active_certificates_on_deletion", false); err != nil { - return nil, err - } - - return []*resource_privateca_certificate_authority_schema.ResourceData{d}, nil -} - -func flattenPrivatecaCertificateAuthorityName(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityType(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfig(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["x509_config"] = - flattenPrivatecaCertificateAuthorityConfigX509Config(original["x509Config"], d, config) - transformed["subject_config"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfig(original["subjectConfig"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - if v == nil { - v = make(map[string]interface{}) - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["additional_extensions"] = - flattenPrivatecaCertificateConfigX509ConfigAdditionalExtensions(original["additionalExtensions"], d, config) - transformed["policy_ids"] = - flattenPrivatecaCertificateConfigX509ConfigPolicyIds(original["policyIds"], d, config) - transformed["aia_ocsp_servers"] = flattenPrivatecaCertificateConfigX509ConfigAiaOcspServers(original["aiaOcspServers"], d, config) - transformed["ca_options"] = - flattenPrivatecaCertificateConfigX509ConfigCaOptions(original["caOptions"], d, config) - transformed["key_usage"] = - flattenPrivatecaCertificateConfigX509ConfigKeyUsage(original["keyUsage"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["subject"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubject(original["subject"], d, config) - transformed["subject_alt_name"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(original["subjectAltName"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["country_code"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(original["countryCode"], d, config) - transformed["organization"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(original["organization"], d, config) - transformed["organizational_unit"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(original["organizationalUnit"], d, config) - transformed["locality"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(original["locality"], d, config) - transformed["province"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(original["province"], d, config) - transformed["street_address"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(original["streetAddress"], d, config) - transformed["postal_code"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(original["postalCode"], d, config) - transformed["common_name"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(original["commonName"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dns_names"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(original["dnsNames"], d, config) - transformed["uris"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(original["uris"], d, config) - transformed["email_addresses"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(original["emailAddresses"], d, config) - transformed["ip_addresses"] = - flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(original["ipAddresses"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityLifetime(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityKeySpec(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["cloud_kms_key_version"] = - flattenPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(original["cloudKmsKeyVersion"], d, config) - transformed["algorithm"] = - flattenPrivatecaCertificateAuthorityKeySpecAlgorithm(original["algorithm"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityState(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityPemCaCertificates(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityGcsBucket(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityAccessUrls(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["ca_certificate_access_url"] = - flattenPrivatecaCertificateAuthorityAccessUrlsCaCertificateAccessUrl(original["caCertificateAccessUrl"], d, config) - transformed["crl_access_url"] = - flattenPrivatecaCertificateAuthorityAccessUrlsCrlAccessUrl(original["crlAccessUrl"], d, config) - return []interface{}{transformed} -} - -func flattenPrivatecaCertificateAuthorityAccessUrlsCaCertificateAccessUrl(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityAccessUrlsCrlAccessUrl(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityCreateTime(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityUpdateTime(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPrivatecaCertificateAuthorityLabels(v interface{}, d *resource_privateca_certificate_authority_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPrivatecaCertificateAuthorityType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedX509Config, err := expandPrivatecaCertificateAuthorityConfigX509Config(original["x509_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedX509Config); val.IsValid() && !isEmptyValue(val) { - transformed["x509Config"] = transformedX509Config - } - - transformedSubjectConfig, err := expandPrivatecaCertificateAuthorityConfigSubjectConfig(original["subject_config"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedSubjectConfig); val.IsValid() && !isEmptyValue(val) { - transformed["subjectConfig"] = transformedSubjectConfig - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509Config(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAdditionalExtensions, err := expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensions(original["additional_extensions"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedAdditionalExtensions); val.IsValid() && !isEmptyValue(val) { - transformed["additionalExtensions"] = transformedAdditionalExtensions - } - - transformedPolicyIds, err := expandPrivatecaCertificateAuthorityConfigX509ConfigPolicyIds(original["policy_ids"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedPolicyIds); val.IsValid() && !isEmptyValue(val) { - transformed["policyIds"] = transformedPolicyIds - } - - transformedAiaOcspServers, err := expandPrivatecaCertificateAuthorityConfigX509ConfigAiaOcspServers(original["aia_ocsp_servers"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedAiaOcspServers); val.IsValid() && !isEmptyValue(val) { - transformed["aiaOcspServers"] = transformedAiaOcspServers - } - - transformedCaOptions, err := expandPrivatecaCertificateAuthorityConfigX509ConfigCaOptions(original["ca_options"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCaOptions); val.IsValid() && !isEmptyValue(val) { - transformed["caOptions"] = transformedCaOptions - } - - transformedKeyUsage, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsage(original["key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["keyUsage"] = transformedKeyUsage - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCritical, err := expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsCritical(original["critical"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCritical); val.IsValid() && !isEmptyValue(val) { - transformed["critical"] = transformedCritical - } - - transformedValue, err := expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsValue(original["value"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { - transformed["value"] = transformedValue - } - - transformedObjectId, err := expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsObjectId(original["object_id"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedObjectId); val.IsValid() && !isEmptyValue(val) { - transformed["objectId"] = transformedObjectId - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsCritical(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsObjectId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigAdditionalExtensionsObjectIdObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigPolicyIds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCertificateAuthorityConfigX509ConfigPolicyIdsObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigPolicyIdsObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigAiaOcspServers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigCaOptions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedIsCa, err := expandPrivatecaCertificateAuthorityConfigX509ConfigCaOptionsIsCa(original["is_ca"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedIsCa); val.IsValid() && !isEmptyValue(val) { - transformed["isCa"] = transformedIsCa - } - - transformedMaxIssuerPathLength, err := expandPrivatecaCertificateAuthorityConfigX509ConfigCaOptionsMaxIssuerPathLength(original["max_issuer_path_length"], d, config) - if err != nil { - return nil, err - } else { - transformed["maxIssuerPathLength"] = transformedMaxIssuerPathLength - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigCaOptionsIsCa(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigCaOptionsMaxIssuerPathLength(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedBaseKeyUsage, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsage(original["base_key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedBaseKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["baseKeyUsage"] = transformedBaseKeyUsage - } - - transformedExtendedKeyUsage, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsage(original["extended_key_usage"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedExtendedKeyUsage); val.IsValid() && !isEmptyValue(val) { - transformed["extendedKeyUsage"] = transformedExtendedKeyUsage - } - - transformedUnknownExtendedKeyUsages, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(original["unknown_extended_key_usages"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedUnknownExtendedKeyUsages); val.IsValid() && !isEmptyValue(val) { - transformed["unknownExtendedKeyUsages"] = transformedUnknownExtendedKeyUsages - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDigitalSignature, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(original["digital_signature"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedDigitalSignature); val.IsValid() && !isEmptyValue(val) { - transformed["digitalSignature"] = transformedDigitalSignature - } - - transformedContentCommitment, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(original["content_commitment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedContentCommitment); val.IsValid() && !isEmptyValue(val) { - transformed["contentCommitment"] = transformedContentCommitment - } - - transformedKeyEncipherment, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(original["key_encipherment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedKeyEncipherment); val.IsValid() && !isEmptyValue(val) { - transformed["keyEncipherment"] = transformedKeyEncipherment - } - - transformedDataEncipherment, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(original["data_encipherment"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedDataEncipherment); val.IsValid() && !isEmptyValue(val) { - transformed["dataEncipherment"] = transformedDataEncipherment - } - - transformedKeyAgreement, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(original["key_agreement"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedKeyAgreement); val.IsValid() && !isEmptyValue(val) { - transformed["keyAgreement"] = transformedKeyAgreement - } - - transformedCertSign, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageCertSign(original["cert_sign"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCertSign); val.IsValid() && !isEmptyValue(val) { - transformed["certSign"] = transformedCertSign - } - - transformedCrlSign, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(original["crl_sign"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCrlSign); val.IsValid() && !isEmptyValue(val) { - transformed["crlSign"] = transformedCrlSign - } - - transformedEncipherOnly, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(original["encipher_only"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedEncipherOnly); val.IsValid() && !isEmptyValue(val) { - transformed["encipherOnly"] = transformedEncipherOnly - } - - transformedDecipherOnly, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(original["decipher_only"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedDecipherOnly); val.IsValid() && !isEmptyValue(val) { - transformed["decipherOnly"] = transformedDecipherOnly - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageDigitalSignature(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageContentCommitment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageKeyEncipherment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageDataEncipherment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageKeyAgreement(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageCertSign(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageCrlSign(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageEncipherOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageBaseKeyUsageDecipherOnly(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServerAuth, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(original["server_auth"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedServerAuth); val.IsValid() && !isEmptyValue(val) { - transformed["serverAuth"] = transformedServerAuth - } - - transformedClientAuth, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(original["client_auth"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedClientAuth); val.IsValid() && !isEmptyValue(val) { - transformed["clientAuth"] = transformedClientAuth - } - - transformedCodeSigning, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(original["code_signing"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCodeSigning); val.IsValid() && !isEmptyValue(val) { - transformed["codeSigning"] = transformedCodeSigning - } - - transformedEmailProtection, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(original["email_protection"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedEmailProtection); val.IsValid() && !isEmptyValue(val) { - transformed["emailProtection"] = transformedEmailProtection - } - - transformedTimeStamping, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(original["time_stamping"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedTimeStamping); val.IsValid() && !isEmptyValue(val) { - transformed["timeStamping"] = transformedTimeStamping - } - - transformedOcspSigning, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(original["ocsp_signing"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedOcspSigning); val.IsValid() && !isEmptyValue(val) { - transformed["ocspSigning"] = transformedOcspSigning - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageServerAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageClientAuth(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageCodeSigning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageEmailProtection(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageTimeStamping(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageExtendedKeyUsageOcspSigning(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedObjectIdPath, err := expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(original["object_id_path"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedObjectIdPath); val.IsValid() && !isEmptyValue(val) { - transformed["objectIdPath"] = transformedObjectIdPath - } - - req = append(req, transformed) - } - return req, nil -} - -func expandPrivatecaCertificateAuthorityConfigX509ConfigKeyUsageUnknownExtendedKeyUsagesObjectIdPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSubject, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubject(original["subject"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedSubject); val.IsValid() && !isEmptyValue(val) { - transformed["subject"] = transformedSubject - } - - transformedSubjectAltName, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(original["subject_alt_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedSubjectAltName); val.IsValid() && !isEmptyValue(val) { - transformed["subjectAltName"] = transformedSubjectAltName - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCountryCode, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(original["country_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCountryCode); val.IsValid() && !isEmptyValue(val) { - transformed["countryCode"] = transformedCountryCode - } - - transformedOrganization, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(original["organization"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedOrganization); val.IsValid() && !isEmptyValue(val) { - transformed["organization"] = transformedOrganization - } - - transformedOrganizationalUnit, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(original["organizational_unit"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedOrganizationalUnit); val.IsValid() && !isEmptyValue(val) { - transformed["organizationalUnit"] = transformedOrganizationalUnit - } - - transformedLocality, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(original["locality"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedLocality); val.IsValid() && !isEmptyValue(val) { - transformed["locality"] = transformedLocality - } - - transformedProvince, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(original["province"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedProvince); val.IsValid() && !isEmptyValue(val) { - transformed["province"] = transformedProvince - } - - transformedStreetAddress, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(original["street_address"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedStreetAddress); val.IsValid() && !isEmptyValue(val) { - transformed["streetAddress"] = transformedStreetAddress - } - - transformedPostalCode, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(original["postal_code"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedPostalCode); val.IsValid() && !isEmptyValue(val) { - transformed["postalCode"] = transformedPostalCode - } - - transformedCommonName, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(original["common_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCommonName); val.IsValid() && !isEmptyValue(val) { - transformed["commonName"] = transformedCommonName - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCountryCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectOrganizationalUnit(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectLocality(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectProvince(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectStreetAddress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectPostalCode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectCommonName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDnsNames, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(original["dns_names"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedDnsNames); val.IsValid() && !isEmptyValue(val) { - transformed["dnsNames"] = transformedDnsNames - } - - transformedUris, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(original["uris"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedUris); val.IsValid() && !isEmptyValue(val) { - transformed["uris"] = transformedUris - } - - transformedEmailAddresses, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(original["email_addresses"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedEmailAddresses); val.IsValid() && !isEmptyValue(val) { - transformed["emailAddresses"] = transformedEmailAddresses - } - - transformedIpAddresses, err := expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(original["ip_addresses"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedIpAddresses); val.IsValid() && !isEmptyValue(val) { - transformed["ipAddresses"] = transformedIpAddresses - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameDnsNames(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameUris(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameEmailAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityConfigSubjectConfigSubjectAltNameIpAddresses(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityLifetime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityKeySpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudKmsKeyVersion, err := expandPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(original["cloud_kms_key_version"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedCloudKmsKeyVersion); val.IsValid() && !isEmptyValue(val) { - transformed["cloudKmsKeyVersion"] = transformedCloudKmsKeyVersion - } - - transformedAlgorithm, err := expandPrivatecaCertificateAuthorityKeySpecAlgorithm(original["algorithm"], d, config) - if err != nil { - return nil, err - } else if val := resource_privateca_certificate_authority_reflect.ValueOf(transformedAlgorithm); val.IsValid() && !isEmptyValue(val) { - transformed["algorithm"] = transformedAlgorithm - } - - return transformed, nil -} - -func expandPrivatecaCertificateAuthorityKeySpecCloudKmsKeyVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityKeySpecAlgorithm(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityGcsBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPrivatecaCertificateAuthorityLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourcePrivatecaCertificateAuthorityDecoder(d *resource_privateca_certificate_authority_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DELETED" { - return nil, nil - } - - return res, nil -} - -func resourcePrivatecaCertificateTemplate() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Create: resourcePrivatecaCertificateTemplateCreate, - Read: resourcePrivatecaCertificateTemplateRead, - Update: resourcePrivatecaCertificateTemplateUpdate, - Delete: resourcePrivatecaCertificateTemplateDelete, - - Importer: &resource_privateca_certificate_template_schema.ResourceImporter{ - State: resourcePrivatecaCertificateTemplateImport, - }, - - Timeouts: &resource_privateca_certificate_template_schema.ResourceTimeout{ - Create: resource_privateca_certificate_template_schema.DefaultTimeout(10 * resource_privateca_certificate_template_time.Minute), - Update: resource_privateca_certificate_template_schema.DefaultTimeout(10 * resource_privateca_certificate_template_time.Minute), - Delete: resource_privateca_certificate_template_schema.DefaultTimeout(10 * resource_privateca_certificate_template_time.Minute), - }, - - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "location": { - Type: resource_privateca_certificate_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The location for the resource", - }, - - "name": { - Type: resource_privateca_certificate_template_schema.TypeString, - Required: true, - ForceNew: true, - Description: "The resource name for this CertificateTemplate in the format `projects/*/locations/*/certificateTemplates/*`.", - }, - - "description": { - Type: resource_privateca_certificate_template_schema.TypeString, - Optional: true, - Description: "Optional. A human-readable description of scenarios this template is intended for.", - }, - - "identity_constraints": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. Describes constraints on identities that may be appear in Certificates issued using this template. If this is omitted, then this template will not add restrictions on a certificate's identity.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplateIdentityConstraintsSchema(), - }, - - "labels": { - Type: resource_privateca_certificate_template_schema.TypeMap, - Optional: true, - Description: "Optional. Labels with user-defined metadata.", - Elem: &resource_privateca_certificate_template_schema.Schema{Type: resource_privateca_certificate_template_schema.TypeString}, - }, - - "passthrough_extensions": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. Describes the set of X.509 extensions that may appear in a Certificate issued using this CertificateTemplate. If a certificate request sets extensions that don't appear in the passthrough_extensions, those extensions will be dropped. If the issuing CaPool's IssuancePolicy defines baseline_values that don't appear here, the certificate issuance request will fail. If this is omitted, then this template will not add restrictions on a certificate's X.509 extensions. These constraints do not apply to X.509 extensions set in this CertificateTemplate's predefined_values.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplatePassthroughExtensionsSchema(), - }, - - "predefined_values": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. A set of X.509 values that will be applied to all issued certificates that use this template. If the certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If the issuing CaPool's IssuancePolicy defines conflicting baseline_values for the same properties, the certificate issuance request will fail.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplatePredefinedValuesSchema(), - }, - - "project": { - Type: resource_privateca_certificate_template_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: "The project for the resource", - }, - - "create_time": { - Type: resource_privateca_certificate_template_schema.TypeString, - Computed: true, - Description: "Output only. The time at which this CertificateTemplate was created.", - }, - - "update_time": { - Type: resource_privateca_certificate_template_schema.TypeString, - Computed: true, - Description: "Output only. The time at which this CertificateTemplate was updated.", - }, - }, - } -} - -func PrivatecaCertificateTemplateIdentityConstraintsSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "allow_subject_alt_names_passthrough": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Required: true, - Description: "Required. If this is true, the SubjectAltNames extension may be copied from a certificate request into the signed certificate. Otherwise, the requested SubjectAltNames will be discarded.", - }, - - "allow_subject_passthrough": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Required: true, - Description: "Required. If this is true, the Subject field may be copied from a certificate request into the signed certificate. Otherwise, the requested Subject will be discarded.", - }, - - "cel_expression": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. A CEL expression that may be used to validate the resolved X.509 Subject and/or Subject Alternative Name before a certificate is signed. To see the full allowed syntax and some examples, see https://cloud.google.com/certificate-authority-service/docs/using-cel", - MaxItems: 1, - Elem: PrivatecaCertificateTemplateIdentityConstraintsCelExpressionSchema(), - }, - }, - } -} - -func PrivatecaCertificateTemplateIdentityConstraintsCelExpressionSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "description": { - Type: resource_privateca_certificate_template_schema.TypeString, - Optional: true, - Description: "Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", - }, - - "expression": { - Type: resource_privateca_certificate_template_schema.TypeString, - Optional: true, - Description: "Textual representation of an expression in Common Expression Language syntax.", - }, - - "location": { - Type: resource_privateca_certificate_template_schema.TypeString, - Optional: true, - Description: "Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", - }, - - "title": { - Type: resource_privateca_certificate_template_schema.TypeString, - Optional: true, - Description: "Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", - }, - }, - } -} - -func PrivatecaCertificateTemplatePassthroughExtensionsSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "additional_extensions": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. A set of ObjectIds identifying custom X.509 extensions. Will be combined with known_extensions to determine the full set of X.509 extensions.", - Elem: PrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsSchema(), - }, - - "known_extensions": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. A set of named X.509 extensions. Will be combined with additional_extensions to determine the full set of X.509 extensions.", - Elem: &resource_privateca_certificate_template_schema.Schema{Type: resource_privateca_certificate_template_schema.TypeString}, - }, - }, - } -} - -func PrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_template_schema.TypeList, - Required: true, - Description: "Required. The parts of an OID path. The most significant parts of the path come first.", - Elem: &resource_privateca_certificate_template_schema.Schema{Type: resource_privateca_certificate_template_schema.TypeInt}, - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "additional_extensions": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. Describes custom X.509 extensions.", - Elem: PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsSchema(), - }, - - "aia_ocsp_servers": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. Describes Online Certificate Status Protocol (OCSP) endpoint addresses that appear in the \"Authority Information Access\" extension in the certificate.", - Elem: &resource_privateca_certificate_template_schema.Schema{Type: resource_privateca_certificate_template_schema.TypeString}, - }, - - "ca_options": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. Describes options in this X509Parameters that are relevant in a CA certificate.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplatePredefinedValuesCaOptionsSchema(), - }, - - "key_usage": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. Indicates the intended use for keys that correspond to a certificate.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageSchema(), - }, - - "policy_ids": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Optional. Describes the X.509 certificate policy object identifiers, per https://tools.ietf.org/html/rfc5280#section-4.2.1.4.", - Elem: PrivatecaCertificateTemplatePredefinedValuesPolicyIdsSchema(), - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "object_id": { - Type: resource_privateca_certificate_template_schema.TypeList, - Required: true, - Description: "Required. The OID for this X.509 extension.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectIdSchema(), - }, - - "value": { - Type: resource_privateca_certificate_template_schema.TypeString, - Required: true, - Description: "Required. The value of this X.509 extension.", - }, - - "critical": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Optional. Indicates whether or not this extension is critical (i.e., if the client does not know how to handle this extension, the client should consider this to be an error).", - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectIdSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_template_schema.TypeList, - Required: true, - Description: "Required. The parts of an OID path. The most significant parts of the path come first.", - Elem: &resource_privateca_certificate_template_schema.Schema{Type: resource_privateca_certificate_template_schema.TypeInt}, - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesCaOptionsSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "is_ca": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Optional. Refers to the \"CA\" X.509 extension, which is a boolean value. When this value is missing, the extension will be omitted from the CA certificate.", - }, - - "max_issuer_path_length": { - Type: resource_privateca_certificate_template_schema.TypeInt, - Optional: true, - Description: "Optional. Refers to the path length restriction X.509 extension. For a CA certificate, this value describes the depth of subordinate CA certificates that are allowed. If this value is less than 0, the request will fail. If this value is missing, the max path length will be omitted from the CA certificate.", - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesKeyUsageSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "base_key_usage": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Describes high-level ways in which a key may be used.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsageSchema(), - }, - - "extended_key_usage": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Detailed scenarios in which a key may be used.", - MaxItems: 1, - Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsageSchema(), - }, - - "unknown_extended_key_usages": { - Type: resource_privateca_certificate_template_schema.TypeList, - Optional: true, - Description: "Used to describe extended key usages that are not listed in the KeyUsage.ExtendedKeyUsageOptions message.", - Elem: PrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesSchema(), - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsageSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "cert_sign": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used to sign certificates.", - }, - - "content_commitment": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used for cryptographic commitments. Note that this may also be referred to as \"non-repudiation\".", - }, - - "crl_sign": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used sign certificate revocation lists.", - }, - - "data_encipherment": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used to encipher data.", - }, - - "decipher_only": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used to decipher only.", - }, - - "digital_signature": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used for digital signatures.", - }, - - "encipher_only": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used to encipher only.", - }, - - "key_agreement": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used in a key agreement protocol.", - }, - - "key_encipherment": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "The key may be used to encipher other keys.", - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsageSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "client_auth": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Corresponds to OID 1.3.6.1.5.5.7.3.2. Officially described as \"TLS WWW client authentication\", though regularly used for non-WWW TLS.", - }, - - "code_signing": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Corresponds to OID 1.3.6.1.5.5.7.3.3. Officially described as \"Signing of downloadable executable code client authentication\".", - }, - - "email_protection": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Corresponds to OID 1.3.6.1.5.5.7.3.4. Officially described as \"Email protection\".", - }, - - "ocsp_signing": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Corresponds to OID 1.3.6.1.5.5.7.3.9. Officially described as \"Signing OCSP responses\".", - }, - - "server_auth": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Corresponds to OID 1.3.6.1.5.5.7.3.1. Officially described as \"TLS WWW server authentication\", though regularly used for non-WWW TLS.", - }, - - "time_stamping": { - Type: resource_privateca_certificate_template_schema.TypeBool, - Optional: true, - Description: "Corresponds to OID 1.3.6.1.5.5.7.3.8. Officially described as \"Binding the hash of an object to a time\".", - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_template_schema.TypeList, - Required: true, - Description: "Required. The parts of an OID path. The most significant parts of the path come first.", - Elem: &resource_privateca_certificate_template_schema.Schema{Type: resource_privateca_certificate_template_schema.TypeInt}, - }, - }, - } -} - -func PrivatecaCertificateTemplatePredefinedValuesPolicyIdsSchema() *resource_privateca_certificate_template_schema.Resource { - return &resource_privateca_certificate_template_schema.Resource{ - Schema: map[string]*resource_privateca_certificate_template_schema.Schema{ - "object_id_path": { - Type: resource_privateca_certificate_template_schema.TypeList, - Required: true, - Description: "Required. The parts of an OID path. The most significant parts of the path come first.", - Elem: &resource_privateca_certificate_template_schema.Schema{Type: resource_privateca_certificate_template_schema.TypeInt}, - }, - }, - } -} - -func resourcePrivatecaCertificateTemplateCreate(d *resource_privateca_certificate_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplate{ - Location: resource_privateca_certificate_template_dcldcl.String(d.Get("location").(string)), - Name: resource_privateca_certificate_template_dcldcl.String(d.Get("name").(string)), - Description: resource_privateca_certificate_template_dcldcl.String(d.Get("description").(string)), - IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), - PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), - PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), - Project: resource_privateca_certificate_template_dcldcl.String(project), - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") - if err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error constructing id: %s", err) - } - d.SetId(id) - createDirective := CreateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(resource_privateca_certificate_template_schema.TimeoutCreate)) - res, err := client.ApplyCertificateTemplate(resource_privateca_certificate_template_context.Background(), obj, createDirective...) - - if _, ok := err.(resource_privateca_certificate_template_dcldcl.DiffAfterApplyError); ok { - resource_privateca_certificate_template_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_privateca_certificate_template_fmt.Errorf("Error creating CertificateTemplate: %s", err) - } - - resource_privateca_certificate_template_log.Printf("[DEBUG] Finished creating CertificateTemplate %q: %#v", d.Id(), res) - - return resourcePrivatecaCertificateTemplateRead(d, meta) -} - -func resourcePrivatecaCertificateTemplateRead(d *resource_privateca_certificate_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplate{ - Location: resource_privateca_certificate_template_dcldcl.String(d.Get("location").(string)), - Name: resource_privateca_certificate_template_dcldcl.String(d.Get("name").(string)), - Description: resource_privateca_certificate_template_dcldcl.String(d.Get("description").(string)), - IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), - PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), - PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), - Project: resource_privateca_certificate_template_dcldcl.String(project), - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(resource_privateca_certificate_template_schema.TimeoutRead)) - res, err := client.GetCertificateTemplate(resource_privateca_certificate_template_context.Background(), obj) - if err != nil { - resourceName := resource_privateca_certificate_template_fmt.Sprintf("PrivatecaCertificateTemplate %q", d.Id()) - return handleNotFoundDCLError(err, d, resourceName) - } - - if err = d.Set("location", res.Location); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting location in state: %s", err) - } - if err = d.Set("name", res.Name); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting name in state: %s", err) - } - if err = d.Set("description", res.Description); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting description in state: %s", err) - } - if err = d.Set("identity_constraints", flattenPrivatecaCertificateTemplateIdentityConstraints(res.IdentityConstraints)); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting identity_constraints in state: %s", err) - } - if err = d.Set("labels", res.Labels); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting labels in state: %s", err) - } - if err = d.Set("passthrough_extensions", flattenPrivatecaCertificateTemplatePassthroughExtensions(res.PassthroughExtensions)); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting passthrough_extensions in state: %s", err) - } - if err = d.Set("predefined_values", flattenPrivatecaCertificateTemplatePredefinedValues(res.PredefinedValues)); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting predefined_values in state: %s", err) - } - if err = d.Set("project", res.Project); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting project in state: %s", err) - } - if err = d.Set("create_time", res.CreateTime); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting create_time in state: %s", err) - } - if err = d.Set("update_time", res.UpdateTime); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("error setting update_time in state: %s", err) - } - - return nil -} - -func resourcePrivatecaCertificateTemplateUpdate(d *resource_privateca_certificate_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplate{ - Location: resource_privateca_certificate_template_dcldcl.String(d.Get("location").(string)), - Name: resource_privateca_certificate_template_dcldcl.String(d.Get("name").(string)), - Description: resource_privateca_certificate_template_dcldcl.String(d.Get("description").(string)), - IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), - PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), - PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), - Project: resource_privateca_certificate_template_dcldcl.String(project), - } - directive := UpdateDirective - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(resource_privateca_certificate_template_schema.TimeoutUpdate)) - res, err := client.ApplyCertificateTemplate(resource_privateca_certificate_template_context.Background(), obj, directive...) - - if _, ok := err.(resource_privateca_certificate_template_dcldcl.DiffAfterApplyError); ok { - resource_privateca_certificate_template_log.Printf("[DEBUG] Diff after apply returned from the DCL: %s", err) - } else if err != nil { - - d.SetId("") - return resource_privateca_certificate_template_fmt.Errorf("Error updating CertificateTemplate: %s", err) - } - - resource_privateca_certificate_template_log.Printf("[DEBUG] Finished creating CertificateTemplate %q: %#v", d.Id(), res) - - return resourcePrivatecaCertificateTemplateRead(d, meta) -} - -func resourcePrivatecaCertificateTemplateDelete(d *resource_privateca_certificate_template_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - - obj := &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplate{ - Location: resource_privateca_certificate_template_dcldcl.String(d.Get("location").(string)), - Name: resource_privateca_certificate_template_dcldcl.String(d.Get("name").(string)), - Description: resource_privateca_certificate_template_dcldcl.String(d.Get("description").(string)), - IdentityConstraints: expandPrivatecaCertificateTemplateIdentityConstraints(d.Get("identity_constraints")), - Labels: checkStringMap(d.Get("labels")), - PassthroughExtensions: expandPrivatecaCertificateTemplatePassthroughExtensions(d.Get("passthrough_extensions")), - PredefinedValues: expandPrivatecaCertificateTemplatePredefinedValues(d.Get("predefined_values")), - Project: resource_privateca_certificate_template_dcldcl.String(project), - } - - resource_privateca_certificate_template_log.Printf("[DEBUG] Deleting CertificateTemplate %q", d.Id()) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - client := NewDCLPrivatecaClient(config, userAgent, billingProject, d.Timeout(resource_privateca_certificate_template_schema.TimeoutDelete)) - if err := client.DeleteCertificateTemplate(resource_privateca_certificate_template_context.Background(), obj); err != nil { - return resource_privateca_certificate_template_fmt.Errorf("Error deleting CertificateTemplate: %s", err) - } - - resource_privateca_certificate_template_log.Printf("[DEBUG] Finished deleting CertificateTemplate %q", d.Id()) - return nil -} - -func resourcePrivatecaCertificateTemplateImport(d *resource_privateca_certificate_template_schema.ResourceData, meta interface{}) ([]*resource_privateca_certificate_template_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/certificateTemplates/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVarsForId(d, config, "projects/{{project}}/locations/{{location}}/certificateTemplates/{{name}}") - if err != nil { - return nil, resource_privateca_certificate_template_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_privateca_certificate_template_schema.ResourceData{d}, nil -} - -func expandPrivatecaCertificateTemplateIdentityConstraints(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplateIdentityConstraints { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplateIdentityConstraints - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplateIdentityConstraints - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplateIdentityConstraints{ - AllowSubjectAltNamesPassthrough: resource_privateca_certificate_template_dcldcl.Bool(obj["allow_subject_alt_names_passthrough"].(bool)), - AllowSubjectPassthrough: resource_privateca_certificate_template_dcldcl.Bool(obj["allow_subject_passthrough"].(bool)), - CelExpression: expandPrivatecaCertificateTemplateIdentityConstraintsCelExpression(obj["cel_expression"]), - } -} - -func flattenPrivatecaCertificateTemplateIdentityConstraints(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplateIdentityConstraints) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "allow_subject_alt_names_passthrough": obj.AllowSubjectAltNamesPassthrough, - "allow_subject_passthrough": obj.AllowSubjectPassthrough, - "cel_expression": flattenPrivatecaCertificateTemplateIdentityConstraintsCelExpression(obj.CelExpression), - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplateIdentityConstraintsCelExpression(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplateIdentityConstraintsCelExpression { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplateIdentityConstraintsCelExpression - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplateIdentityConstraintsCelExpression - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplateIdentityConstraintsCelExpression{ - Description: resource_privateca_certificate_template_dcldcl.String(obj["description"].(string)), - Expression: resource_privateca_certificate_template_dcldcl.String(obj["expression"].(string)), - Location: resource_privateca_certificate_template_dcldcl.String(obj["location"].(string)), - Title: resource_privateca_certificate_template_dcldcl.String(obj["title"].(string)), - } -} - -func flattenPrivatecaCertificateTemplateIdentityConstraintsCelExpression(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplateIdentityConstraintsCelExpression) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "description": obj.Description, - "expression": obj.Expression, - "location": obj.Location, - "title": obj.Title, - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePassthroughExtensions(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensions { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePassthroughExtensions - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePassthroughExtensions - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensions{ - AdditionalExtensions: expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(obj["additional_extensions"]), - KnownExtensions: expandPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(obj["known_extensions"]), - } -} - -func flattenPrivatecaCertificateTemplatePassthroughExtensions(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensions) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "additional_extensions": flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(obj.AdditionalExtensions), - "known_extensions": flattenPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(obj.KnownExtensions), - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(o interface{}) []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions { - if o == nil { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions, 0) - } - - items := make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions, 0, len(objs)) - for _, item := range objs { - i := expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(item) - items = append(items, *i) - } - - return items -} - -func expandPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePassthroughExtensionsAdditionalExtensions - } - - obj := o.(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), - } -} - -func flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensionsArray(objs []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(&item) - items = append(items, i) - } - - return items -} - -func flattenPrivatecaCertificateTemplatePassthroughExtensionsAdditionalExtensions(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsAdditionalExtensions) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "object_id_path": obj.ObjectIdPath, - } - - return transformed - -} - -func expandPrivatecaCertificateTemplatePredefinedValues(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValues { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValues - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValues - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValues{ - AdditionalExtensions: expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(obj["additional_extensions"]), - AiaOcspServers: expandStringArray(obj["aia_ocsp_servers"]), - CaOptions: expandPrivatecaCertificateTemplatePredefinedValuesCaOptions(obj["ca_options"]), - KeyUsage: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsage(obj["key_usage"]), - PolicyIds: expandPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(obj["policy_ids"]), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValues(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValues) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "additional_extensions": flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(obj.AdditionalExtensions), - "aia_ocsp_servers": obj.AiaOcspServers, - "ca_options": flattenPrivatecaCertificateTemplatePredefinedValuesCaOptions(obj.CaOptions), - "key_usage": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsage(obj.KeyUsage), - "policy_ids": flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(obj.PolicyIds), - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(o interface{}) []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions { - if o == nil { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions, 0) - } - - items := make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions, 0, len(objs)) - for _, item := range objs { - i := expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(item) - items = append(items, *i) - } - - return items -} - -func expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesAdditionalExtensions - } - - obj := o.(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions{ - ObjectId: expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(obj["object_id"]), - Value: resource_privateca_certificate_template_dcldcl.String(obj["value"].(string)), - Critical: resource_privateca_certificate_template_dcldcl.Bool(obj["critical"].(bool)), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsArray(objs []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(&item) - items = append(items, i) - } - - return items -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensions(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensions) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "object_id": flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(obj.ObjectId), - "value": obj.Value, - "critical": obj.Critical, - } - - return transformed - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensionsObjectId { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensionsObjectId{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesAdditionalExtensionsObjectId(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesAdditionalExtensionsObjectId) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "object_id_path": obj.ObjectIdPath, - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesCaOptions(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesCaOptions { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesCaOptions - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesCaOptions - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesCaOptions{ - IsCa: resource_privateca_certificate_template_dcldcl.Bool(obj["is_ca"].(bool)), - MaxIssuerPathLength: resource_privateca_certificate_template_dcldcl.Int64(int64(obj["max_issuer_path_length"].(int))), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesCaOptions(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesCaOptions) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "is_ca": obj.IsCa, - "max_issuer_path_length": obj.MaxIssuerPathLength, - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsage(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsage { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesKeyUsage - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesKeyUsage - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsage{ - BaseKeyUsage: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(obj["base_key_usage"]), - ExtendedKeyUsage: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(obj["extended_key_usage"]), - UnknownExtendedKeyUsages: expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(obj["unknown_extended_key_usages"]), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsage(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsage) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "base_key_usage": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(obj.BaseKeyUsage), - "extended_key_usage": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(obj.ExtendedKeyUsage), - "unknown_extended_key_usages": flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(obj.UnknownExtendedKeyUsages), - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage{ - CertSign: resource_privateca_certificate_template_dcldcl.Bool(obj["cert_sign"].(bool)), - ContentCommitment: resource_privateca_certificate_template_dcldcl.Bool(obj["content_commitment"].(bool)), - CrlSign: resource_privateca_certificate_template_dcldcl.Bool(obj["crl_sign"].(bool)), - DataEncipherment: resource_privateca_certificate_template_dcldcl.Bool(obj["data_encipherment"].(bool)), - DecipherOnly: resource_privateca_certificate_template_dcldcl.Bool(obj["decipher_only"].(bool)), - DigitalSignature: resource_privateca_certificate_template_dcldcl.Bool(obj["digital_signature"].(bool)), - EncipherOnly: resource_privateca_certificate_template_dcldcl.Bool(obj["encipher_only"].(bool)), - KeyAgreement: resource_privateca_certificate_template_dcldcl.Bool(obj["key_agreement"].(bool)), - KeyEncipherment: resource_privateca_certificate_template_dcldcl.Bool(obj["key_encipherment"].(bool)), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageBaseKeyUsage) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "cert_sign": obj.CertSign, - "content_commitment": obj.ContentCommitment, - "crl_sign": obj.CrlSign, - "data_encipherment": obj.DataEncipherment, - "decipher_only": obj.DecipherOnly, - "digital_signature": obj.DigitalSignature, - "encipher_only": obj.EncipherOnly, - "key_agreement": obj.KeyAgreement, - "key_encipherment": obj.KeyEncipherment, - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage - } - objArr := o.([]interface{}) - if len(objArr) == 0 { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage - } - obj := objArr[0].(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage{ - ClientAuth: resource_privateca_certificate_template_dcldcl.Bool(obj["client_auth"].(bool)), - CodeSigning: resource_privateca_certificate_template_dcldcl.Bool(obj["code_signing"].(bool)), - EmailProtection: resource_privateca_certificate_template_dcldcl.Bool(obj["email_protection"].(bool)), - OcspSigning: resource_privateca_certificate_template_dcldcl.Bool(obj["ocsp_signing"].(bool)), - ServerAuth: resource_privateca_certificate_template_dcldcl.Bool(obj["server_auth"].(bool)), - TimeStamping: resource_privateca_certificate_template_dcldcl.Bool(obj["time_stamping"].(bool)), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageExtendedKeyUsage) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "client_auth": obj.ClientAuth, - "code_signing": obj.CodeSigning, - "email_protection": obj.EmailProtection, - "ocsp_signing": obj.OcspSigning, - "server_auth": obj.ServerAuth, - "time_stamping": obj.TimeStamping, - } - - return []interface{}{transformed} - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(o interface{}) []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages { - if o == nil { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages, 0) - } - - items := make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages, 0, len(objs)) - for _, item := range objs { - i := expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(item) - items = append(items, *i) - } - - return items -} - -func expandPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages - } - - obj := o.(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsagesArray(objs []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(&item) - items = append(items, i) - } - - return items -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesKeyUsageUnknownExtendedKeyUsages) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "object_id_path": obj.ObjectIdPath, - } - - return transformed - -} - -func expandPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(o interface{}) []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds { - if o == nil { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds, 0) - } - - objs := o.([]interface{}) - if len(objs) == 0 { - return make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds, 0) - } - - items := make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds, 0, len(objs)) - for _, item := range objs { - i := expandPrivatecaCertificateTemplatePredefinedValuesPolicyIds(item) - items = append(items, *i) - } - - return items -} - -func expandPrivatecaCertificateTemplatePredefinedValuesPolicyIds(o interface{}) *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds { - if o == nil { - return resource_privateca_certificate_template_privatecaprivateca.EmptyCertificateTemplatePredefinedValuesPolicyIds - } - - obj := o.(map[string]interface{}) - return &resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds{ - ObjectIdPath: expandIntegerArray(obj["object_id_path"]), - } -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIdsArray(objs []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds) []interface{} { - if objs == nil { - return nil - } - - items := []interface{}{} - for _, item := range objs { - i := flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIds(&item) - items = append(items, i) - } - - return items -} - -func flattenPrivatecaCertificateTemplatePredefinedValuesPolicyIds(obj *resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePredefinedValuesPolicyIds) interface{} { - if obj == nil || obj.Empty() { - return nil - } - transformed := map[string]interface{}{ - "object_id_path": obj.ObjectIdPath, - } - - return transformed - -} - -func flattenPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(obj []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum) interface{} { - if obj == nil { - return nil - } - items := []string{} - for _, item := range obj { - items = append(items, string(item)) - } - return items -} - -func expandPrivatecaCertificateTemplatePassthroughExtensionsKnownExtensionsArray(o interface{}) []resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum { - objs := o.([]interface{}) - items := make([]resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnum, 0, len(objs)) - for _, item := range objs { - i := resource_privateca_certificate_template_privatecaprivateca.CertificateTemplatePassthroughExtensionsKnownExtensionsEnumRef(item.(string)) - items = append(items, *i) - } - return items -} - -func resourceAccessApprovalProjectSettings() *resource_project_access_approval_settings_schema.Resource { - return &resource_project_access_approval_settings_schema.Resource{ - Create: resourceAccessApprovalProjectSettingsCreate, - Read: resourceAccessApprovalProjectSettingsRead, - Update: resourceAccessApprovalProjectSettingsUpdate, - Delete: resourceAccessApprovalProjectSettingsDelete, - - Importer: &resource_project_access_approval_settings_schema.ResourceImporter{ - State: resourceAccessApprovalProjectSettingsImport, - }, - - Timeouts: &resource_project_access_approval_settings_schema.ResourceTimeout{ - Create: resource_project_access_approval_settings_schema.DefaultTimeout(4 * resource_project_access_approval_settings_time.Minute), - Update: resource_project_access_approval_settings_schema.DefaultTimeout(4 * resource_project_access_approval_settings_time.Minute), - Delete: resource_project_access_approval_settings_schema.DefaultTimeout(4 * resource_project_access_approval_settings_time.Minute), - }, - - Schema: map[string]*resource_project_access_approval_settings_schema.Schema{ - "enrolled_services": { - Type: resource_project_access_approval_settings_schema.TypeSet, - Required: true, - Description: `A list of Google Cloud Services for which the given resource has Access Approval enrolled. -Access requests for the resource given by name against any of these services contained here will be required -to have explicit approval. Enrollment can only be done on an all or nothing basis. - -A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded.`, - Elem: accessapprovalProjectSettingsEnrolledServicesSchema(), - Set: accessApprovalEnrolledServicesHash, - }, - "project_id": { - Type: resource_project_access_approval_settings_schema.TypeString, - Required: true, - ForceNew: true, - Description: `ID of the project of the access approval settings.`, - }, - "notification_emails": { - Type: resource_project_access_approval_settings_schema.TypeSet, - Computed: true, - Optional: true, - Description: `A list of email addresses to which notifications relating to approval requests should be sent. -Notifications relating to a resource will be sent to all emails in the settings of ancestor -resources of that resource. A maximum of 50 email addresses are allowed.`, - MaxItems: 50, - Elem: &resource_project_access_approval_settings_schema.Schema{ - Type: resource_project_access_approval_settings_schema.TypeString, - }, - Set: resource_project_access_approval_settings_schema.HashString, - }, - "project": { - Type: resource_project_access_approval_settings_schema.TypeString, - Optional: true, - Deprecated: "Deprecated in favor of `project_id`", - Description: `Deprecated in favor of 'project_id'`, - }, - "enrolled_ancestor": { - Type: resource_project_access_approval_settings_schema.TypeBool, - Computed: true, - Description: `If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project.`, - }, - "name": { - Type: resource_project_access_approval_settings_schema.TypeString, - Computed: true, - Description: `The resource name of the settings. Format is "projects/{project_id}/accessApprovalSettings"`, - }, - }, - UseJSONNumber: true, - } -} - -func accessapprovalProjectSettingsEnrolledServicesSchema() *resource_project_access_approval_settings_schema.Resource { - return &resource_project_access_approval_settings_schema.Resource{ - Schema: map[string]*resource_project_access_approval_settings_schema.Schema{ - "cloud_product": { - Type: resource_project_access_approval_settings_schema.TypeString, - Required: true, - Description: `The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): - all - appengine.googleapis.com - bigquery.googleapis.com - bigtable.googleapis.com - cloudkms.googleapis.com - compute.googleapis.com - dataflow.googleapis.com - iam.googleapis.com - pubsub.googleapis.com - storage.googleapis.com`, - }, - "enrollment_level": { - Type: resource_project_access_approval_settings_schema.TypeString, - Optional: true, - ValidateFunc: resource_project_access_approval_settings_validation.StringInSlice([]string{"BLOCK_ALL", ""}, false), - Description: `The enrollment level of the service. Default value: "BLOCK_ALL" Possible values: ["BLOCK_ALL"]`, - Default: "BLOCK_ALL", - }, - }, - } -} - -func resourceAccessApprovalProjectSettingsCreate(d *resource_project_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalProjectSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(resource_project_access_approval_settings_reflect.ValueOf(notificationEmailsProp)) && (ok || !resource_project_access_approval_settings_reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalProjectSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(resource_project_access_approval_settings_reflect.ValueOf(enrolledServicesProp)) && (ok || !resource_project_access_approval_settings_reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - projectProp, err := expandAccessApprovalProjectSettingsProject(d.Get("project"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("project"); !isEmptyValue(resource_project_access_approval_settings_reflect.ValueOf(projectProp)) && (ok || !resource_project_access_approval_settings_reflect.DeepEqual(v, projectProp)) { - obj["project"] = projectProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_project_access_approval_settings_log.Printf("[DEBUG] Creating new ProjectSettings: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("project") { - updateMask = append(updateMask, "project") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_project_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_project_access_approval_settings_schema.TimeoutCreate)) - if err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error creating ProjectSettings: %s", err) - } - if err := d.Set("name", flattenAccessApprovalProjectSettingsName(res["name"], d, config)); err != nil { - return resource_project_access_approval_settings_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_project_access_approval_settings_log.Printf("[DEBUG] Finished creating ProjectSettings %q: %#v", d.Id(), res) - - return resourceAccessApprovalProjectSettingsRead(d, meta) -} - -func resourceAccessApprovalProjectSettingsRead(d *resource_project_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_project_access_approval_settings_fmt.Sprintf("AccessApprovalProjectSettings %q", d.Id())) - } - - if err := d.Set("name", flattenAccessApprovalProjectSettingsName(res["name"], d, config)); err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("notification_emails", flattenAccessApprovalProjectSettingsNotificationEmails(res["notificationEmails"], d, config)); err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("enrolled_services", flattenAccessApprovalProjectSettingsEnrolledServices(res["enrolledServices"], d, config)); err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("enrolled_ancestor", flattenAccessApprovalProjectSettingsEnrolledAncestor(res["enrolledAncestor"], d, config)); err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error reading ProjectSettings: %s", err) - } - if err := d.Set("project", flattenAccessApprovalProjectSettingsProject(res["project"], d, config)); err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error reading ProjectSettings: %s", err) - } - - return nil -} - -func resourceAccessApprovalProjectSettingsUpdate(d *resource_project_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - notificationEmailsProp, err := expandAccessApprovalProjectSettingsNotificationEmails(d.Get("notification_emails"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("notification_emails"); !isEmptyValue(resource_project_access_approval_settings_reflect.ValueOf(v)) && (ok || !resource_project_access_approval_settings_reflect.DeepEqual(v, notificationEmailsProp)) { - obj["notificationEmails"] = notificationEmailsProp - } - enrolledServicesProp, err := expandAccessApprovalProjectSettingsEnrolledServices(d.Get("enrolled_services"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enrolled_services"); !isEmptyValue(resource_project_access_approval_settings_reflect.ValueOf(v)) && (ok || !resource_project_access_approval_settings_reflect.DeepEqual(v, enrolledServicesProp)) { - obj["enrolledServices"] = enrolledServicesProp - } - projectProp, err := expandAccessApprovalProjectSettingsProject(d.Get("project"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("project"); !isEmptyValue(resource_project_access_approval_settings_reflect.ValueOf(v)) && (ok || !resource_project_access_approval_settings_reflect.DeepEqual(v, projectProp)) { - obj["project"] = projectProp - } - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_project_access_approval_settings_log.Printf("[DEBUG] Updating ProjectSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("notification_emails") { - updateMask = append(updateMask, "notificationEmails") - } - - if d.HasChange("enrolled_services") { - updateMask = append(updateMask, "enrolledServices") - } - - if d.HasChange("project") { - updateMask = append(updateMask, "project") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_project_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_project_access_approval_settings_schema.TimeoutUpdate)) - - if err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error updating ProjectSettings %q: %s", d.Id(), err) - } else { - resource_project_access_approval_settings_log.Printf("[DEBUG] Finished updating ProjectSettings %q: %#v", d.Id(), res) - } - - return resourceAccessApprovalProjectSettingsRead(d, meta) -} - -func resourceAccessApprovalProjectSettingsDelete(d *resource_project_access_approval_settings_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - obj["notificationEmails"] = []string{} - obj["enrolledServices"] = []string{} - - url, err := replaceVars(d, config, "{{AccessApprovalBasePath}}projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return err - } - - resource_project_access_approval_settings_log.Printf("[DEBUG] Emptying ProjectSettings %q: %#v", d.Id(), obj) - updateMask := []string{} - - updateMask = append(updateMask, "notificationEmails") - updateMask = append(updateMask, "enrolledServices") - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_project_access_approval_settings_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - res, err := sendRequestWithTimeout(config, "PATCH", "", url, userAgent, obj, d.Timeout(resource_project_access_approval_settings_schema.TimeoutUpdate)) - - if err != nil { - return resource_project_access_approval_settings_fmt.Errorf("Error emptying ProjectSettings %q: %s", d.Id(), err) - } else { - resource_project_access_approval_settings_log.Printf("[DEBUG] Finished emptying ProjectSettings %q: %#v", d.Id(), res) - } - - return nil -} - -func resourceAccessApprovalProjectSettingsImport(d *resource_project_access_approval_settings_schema.ResourceData, meta interface{}) ([]*resource_project_access_approval_settings_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/accessApprovalSettings", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project_id}}/accessApprovalSettings") - if err != nil { - return nil, resource_project_access_approval_settings_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_project_access_approval_settings_schema.ResourceData{d}, nil -} - -func flattenAccessApprovalProjectSettingsName(v interface{}, d *resource_project_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsNotificationEmails(v interface{}, d *resource_project_access_approval_settings_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return resource_project_access_approval_settings_schema.NewSet(resource_project_access_approval_settings_schema.HashString, v.([]interface{})) -} - -func flattenAccessApprovalProjectSettingsEnrolledServices(v interface{}, d *resource_project_access_approval_settings_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := resource_project_access_approval_settings_schema.NewSet(accessApprovalEnrolledServicesHash, []interface{}{}) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed.Add(map[string]interface{}{ - "cloud_product": flattenAccessApprovalProjectSettingsEnrolledServicesCloudProduct(original["cloudProduct"], d, config), - "enrollment_level": flattenAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(original["enrollmentLevel"], d, config), - }) - } - return transformed -} - -func flattenAccessApprovalProjectSettingsEnrolledServicesCloudProduct(v interface{}, d *resource_project_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(v interface{}, d *resource_project_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsEnrolledAncestor(v interface{}, d *resource_project_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenAccessApprovalProjectSettingsProject(v interface{}, d *resource_project_access_approval_settings_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandAccessApprovalProjectSettingsNotificationEmails(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_project_access_approval_settings_schema.Set).List() - return v, nil -} - -func expandAccessApprovalProjectSettingsEnrolledServices(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - v = v.(*resource_project_access_approval_settings_schema.Set).List() - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCloudProduct, err := expandAccessApprovalProjectSettingsEnrolledServicesCloudProduct(original["cloud_product"], d, config) - if err != nil { - return nil, err - } else if val := resource_project_access_approval_settings_reflect.ValueOf(transformedCloudProduct); val.IsValid() && !isEmptyValue(val) { - transformed["cloudProduct"] = transformedCloudProduct - } - - transformedEnrollmentLevel, err := expandAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(original["enrollment_level"], d, config) - if err != nil { - return nil, err - } else if val := resource_project_access_approval_settings_reflect.ValueOf(transformedEnrollmentLevel); val.IsValid() && !isEmptyValue(val) { - transformed["enrollmentLevel"] = transformedEnrollmentLevel - } - - req = append(req, transformed) - } - return req, nil -} - -func expandAccessApprovalProjectSettingsEnrolledServicesCloudProduct(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalProjectSettingsEnrolledServicesEnrollmentLevel(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandAccessApprovalProjectSettingsProject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePubsubLiteReservation() *resource_pubsub_lite_reservation_schema.Resource { - return &resource_pubsub_lite_reservation_schema.Resource{ - Create: resourcePubsubLiteReservationCreate, - Read: resourcePubsubLiteReservationRead, - Update: resourcePubsubLiteReservationUpdate, - Delete: resourcePubsubLiteReservationDelete, - - Importer: &resource_pubsub_lite_reservation_schema.ResourceImporter{ - State: resourcePubsubLiteReservationImport, - }, - - Timeouts: &resource_pubsub_lite_reservation_schema.ResourceTimeout{ - Create: resource_pubsub_lite_reservation_schema.DefaultTimeout(4 * resource_pubsub_lite_reservation_time.Minute), - Update: resource_pubsub_lite_reservation_schema.DefaultTimeout(4 * resource_pubsub_lite_reservation_time.Minute), - Delete: resource_pubsub_lite_reservation_schema.DefaultTimeout(4 * resource_pubsub_lite_reservation_time.Minute), - }, - - Schema: map[string]*resource_pubsub_lite_reservation_schema.Schema{ - "name": { - Type: resource_pubsub_lite_reservation_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the reservation.`, - }, - "throughput_capacity": { - Type: resource_pubsub_lite_reservation_schema.TypeInt, - Required: true, - Description: `The reserved throughput capacity. Every unit of throughput capacity is -equivalent to 1 MiB/s of published messages or 2 MiB/s of subscribed -messages.`, - }, - "region": { - Type: resource_pubsub_lite_reservation_schema.TypeString, - Optional: true, - Description: `The region of the pubsub lite reservation.`, - }, - "project": { - Type: resource_pubsub_lite_reservation_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubLiteReservationCreate(d *resource_pubsub_lite_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - throughputCapacityProp, err := expandPubsubLiteReservationThroughputCapacity(d.Get("throughput_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("throughput_capacity"); !isEmptyValue(resource_pubsub_lite_reservation_reflect.ValueOf(throughputCapacityProp)) && (ok || !resource_pubsub_lite_reservation_reflect.DeepEqual(v, throughputCapacityProp)) { - obj["throughputCapacity"] = throughputCapacityProp - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations?reservationId={{name}}") - if err != nil { - return err - } - - resource_pubsub_lite_reservation_log.Printf("[DEBUG] Creating new Reservation: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_reservation_schema.TimeoutCreate)) - if err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error creating Reservation: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_pubsub_lite_reservation_log.Printf("[DEBUG] Finished creating Reservation %q: %#v", d.Id(), res) - - return resourcePubsubLiteReservationRead(d, meta) -} - -func resourcePubsubLiteReservationRead(d *resource_pubsub_lite_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_pubsub_lite_reservation_fmt.Sprintf("PubsubLiteReservation %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - - if err := d.Set("throughput_capacity", flattenPubsubLiteReservationThroughputCapacity(res["throughputCapacity"], d, config)); err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error reading Reservation: %s", err) - } - - return nil -} - -func resourcePubsubLiteReservationUpdate(d *resource_pubsub_lite_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - throughputCapacityProp, err := expandPubsubLiteReservationThroughputCapacity(d.Get("throughput_capacity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("throughput_capacity"); !isEmptyValue(resource_pubsub_lite_reservation_reflect.ValueOf(v)) && (ok || !resource_pubsub_lite_reservation_reflect.DeepEqual(v, throughputCapacityProp)) { - obj["throughputCapacity"] = throughputCapacityProp - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return err - } - - resource_pubsub_lite_reservation_log.Printf("[DEBUG] Updating Reservation %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("throughput_capacity") { - updateMask = append(updateMask, "throughputCapacity") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_pubsub_lite_reservation_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_reservation_schema.TimeoutUpdate)) - - if err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error updating Reservation %q: %s", d.Id(), err) - } else { - resource_pubsub_lite_reservation_log.Printf("[DEBUG] Finished updating Reservation %q: %#v", d.Id(), res) - } - - return resourcePubsubLiteReservationRead(d, meta) -} - -func resourcePubsubLiteReservationDelete(d *resource_pubsub_lite_reservation_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_reservation_fmt.Errorf("Error fetching project for Reservation: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_pubsub_lite_reservation_log.Printf("[DEBUG] Deleting Reservation %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_reservation_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Reservation") - } - - resource_pubsub_lite_reservation_log.Printf("[DEBUG] Finished deleting Reservation %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubLiteReservationImport(d *resource_pubsub_lite_reservation_schema.ResourceData, meta interface{}) ([]*resource_pubsub_lite_reservation_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/reservations/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/reservations/{{name}}") - if err != nil { - return nil, resource_pubsub_lite_reservation_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_pubsub_lite_reservation_schema.ResourceData{d}, nil -} - -func flattenPubsubLiteReservationThroughputCapacity(v interface{}, d *resource_pubsub_lite_reservation_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_pubsub_lite_reservation_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandPubsubLiteReservationThroughputCapacity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePubsubLiteSubscription() *resource_pubsub_lite_subscription_schema.Resource { - return &resource_pubsub_lite_subscription_schema.Resource{ - Create: resourcePubsubLiteSubscriptionCreate, - Read: resourcePubsubLiteSubscriptionRead, - Update: resourcePubsubLiteSubscriptionUpdate, - Delete: resourcePubsubLiteSubscriptionDelete, - - Importer: &resource_pubsub_lite_subscription_schema.ResourceImporter{ - State: resourcePubsubLiteSubscriptionImport, - }, - - Timeouts: &resource_pubsub_lite_subscription_schema.ResourceTimeout{ - Create: resource_pubsub_lite_subscription_schema.DefaultTimeout(4 * resource_pubsub_lite_subscription_time.Minute), - Update: resource_pubsub_lite_subscription_schema.DefaultTimeout(4 * resource_pubsub_lite_subscription_time.Minute), - Delete: resource_pubsub_lite_subscription_schema.DefaultTimeout(4 * resource_pubsub_lite_subscription_time.Minute), - }, - - Schema: map[string]*resource_pubsub_lite_subscription_schema.Schema{ - "name": { - Type: resource_pubsub_lite_subscription_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the subscription.`, - }, - "topic": { - Type: resource_pubsub_lite_subscription_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to a Topic resource.`, - }, - "delivery_config": { - Type: resource_pubsub_lite_subscription_schema.TypeList, - Optional: true, - Description: `The settings for this subscription's message delivery.`, - MaxItems: 1, - Elem: &resource_pubsub_lite_subscription_schema.Resource{ - Schema: map[string]*resource_pubsub_lite_subscription_schema.Schema{ - "delivery_requirement": { - Type: resource_pubsub_lite_subscription_schema.TypeString, - Required: true, - ValidateFunc: resource_pubsub_lite_subscription_validation.StringInSlice([]string{"DELIVER_IMMEDIATELY", "DELIVER_AFTER_STORED", "DELIVERY_REQUIREMENT_UNSPECIFIED"}, false), - Description: `When this subscription should send messages to subscribers relative to messages persistence in storage. Possible values: ["DELIVER_IMMEDIATELY", "DELIVER_AFTER_STORED", "DELIVERY_REQUIREMENT_UNSPECIFIED"]`, - }, - }, - }, - }, - "region": { - Type: resource_pubsub_lite_subscription_schema.TypeString, - Optional: true, - Description: `The region of the pubsub lite topic.`, - }, - "zone": { - Type: resource_pubsub_lite_subscription_schema.TypeString, - Optional: true, - Description: `The zone of the pubsub lite topic.`, - }, - "project": { - Type: resource_pubsub_lite_subscription_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubLiteSubscriptionCreate(d *resource_pubsub_lite_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - topicProp, err := expandPubsubLiteSubscriptionTopic(d.Get("topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("topic"); !isEmptyValue(resource_pubsub_lite_subscription_reflect.ValueOf(topicProp)) && (ok || !resource_pubsub_lite_subscription_reflect.DeepEqual(v, topicProp)) { - obj["topic"] = topicProp - } - deliveryConfigProp, err := expandPubsubLiteSubscriptionDeliveryConfig(d.Get("delivery_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("delivery_config"); !isEmptyValue(resource_pubsub_lite_subscription_reflect.ValueOf(deliveryConfigProp)) && (ok || !resource_pubsub_lite_subscription_reflect.DeepEqual(v, deliveryConfigProp)) { - obj["deliveryConfig"] = deliveryConfigProp - } - - obj, err = resourcePubsubLiteSubscriptionEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions?subscriptionId={{name}}") - if err != nil { - return err - } - - resource_pubsub_lite_subscription_log.Printf("[DEBUG] Creating new Subscription: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_subscription_schema.TimeoutCreate)) - if err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error creating Subscription: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_pubsub_lite_subscription_log.Printf("[DEBUG] Finished creating Subscription %q: %#v", d.Id(), res) - - return resourcePubsubLiteSubscriptionRead(d, meta) -} - -func resourcePubsubLiteSubscriptionRead(d *resource_pubsub_lite_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_pubsub_lite_subscription_fmt.Sprintf("PubsubLiteSubscription %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - - if err := d.Set("topic", flattenPubsubLiteSubscriptionTopic(res["topic"], d, config)); err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("delivery_config", flattenPubsubLiteSubscriptionDeliveryConfig(res["deliveryConfig"], d, config)); err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - - return nil -} - -func resourcePubsubLiteSubscriptionUpdate(d *resource_pubsub_lite_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - deliveryConfigProp, err := expandPubsubLiteSubscriptionDeliveryConfig(d.Get("delivery_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("delivery_config"); !isEmptyValue(resource_pubsub_lite_subscription_reflect.ValueOf(v)) && (ok || !resource_pubsub_lite_subscription_reflect.DeepEqual(v, deliveryConfigProp)) { - obj["deliveryConfig"] = deliveryConfigProp - } - - obj, err = resourcePubsubLiteSubscriptionEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return err - } - - resource_pubsub_lite_subscription_log.Printf("[DEBUG] Updating Subscription %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("delivery_config") { - updateMask = append(updateMask, "deliveryConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_pubsub_lite_subscription_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_subscription_schema.TimeoutUpdate)) - - if err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error updating Subscription %q: %s", d.Id(), err) - } else { - resource_pubsub_lite_subscription_log.Printf("[DEBUG] Finished updating Subscription %q: %#v", d.Id(), res) - } - - return resourcePubsubLiteSubscriptionRead(d, meta) -} - -func resourcePubsubLiteSubscriptionDelete(d *resource_pubsub_lite_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_pubsub_lite_subscription_log.Printf("[DEBUG] Deleting Subscription %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_subscription_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Subscription") - } - - resource_pubsub_lite_subscription_log.Printf("[DEBUG] Finished deleting Subscription %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubLiteSubscriptionImport(d *resource_pubsub_lite_subscription_schema.ResourceData, meta interface{}) ([]*resource_pubsub_lite_subscription_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/subscriptions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/subscriptions/{{name}}") - if err != nil { - return nil, resource_pubsub_lite_subscription_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_pubsub_lite_subscription_schema.ResourceData{d}, nil -} - -func flattenPubsubLiteSubscriptionTopic(v interface{}, d *resource_pubsub_lite_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenPubsubLiteSubscriptionDeliveryConfig(v interface{}, d *resource_pubsub_lite_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["delivery_requirement"] = - flattenPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(original["deliveryRequirement"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(v interface{}, d *resource_pubsub_lite_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPubsubLiteSubscriptionTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return "", err - } - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - - if zone == "" { - return nil, resource_pubsub_lite_subscription_fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - topic := d.Get("topic").(string) - - re := resource_pubsub_lite_subscription_regexp.MustCompile(`projects\/(.*)\/locations\/(.*)\/topics\/(.*)`) - match := re.FindStringSubmatch(topic) - if len(match) == 4 { - return topic, nil - } else { - - fullTopic := resource_pubsub_lite_subscription_fmt.Sprintf("projects/%s/locations/%s/topics/%s", project, zone, topic) - if err := d.Set("topic", fullTopic); err != nil { - return nil, resource_pubsub_lite_subscription_fmt.Errorf("Error setting topic: %s", err) - } - return fullTopic, nil - } -} - -func expandPubsubLiteSubscriptionDeliveryConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDeliveryRequirement, err := expandPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(original["delivery_requirement"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_subscription_reflect.ValueOf(transformedDeliveryRequirement); val.IsValid() && !isEmptyValue(val) { - transformed["deliveryRequirement"] = transformedDeliveryRequirement - } - - return transformed, nil -} - -func expandPubsubLiteSubscriptionDeliveryConfigDeliveryRequirement(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePubsubLiteSubscriptionEncoder(d *resource_pubsub_lite_subscription_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - - if zone == "" { - return nil, resource_pubsub_lite_subscription_fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - region := getRegionFromZone(zone) - - if region == "" { - return nil, resource_pubsub_lite_subscription_fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - return obj, nil -} - -func resourcePubsubLiteTopic() *resource_pubsub_lite_topic_schema.Resource { - return &resource_pubsub_lite_topic_schema.Resource{ - Create: resourcePubsubLiteTopicCreate, - Read: resourcePubsubLiteTopicRead, - Update: resourcePubsubLiteTopicUpdate, - Delete: resourcePubsubLiteTopicDelete, - - Importer: &resource_pubsub_lite_topic_schema.ResourceImporter{ - State: resourcePubsubLiteTopicImport, - }, - - Timeouts: &resource_pubsub_lite_topic_schema.ResourceTimeout{ - Create: resource_pubsub_lite_topic_schema.DefaultTimeout(4 * resource_pubsub_lite_topic_time.Minute), - Update: resource_pubsub_lite_topic_schema.DefaultTimeout(4 * resource_pubsub_lite_topic_time.Minute), - Delete: resource_pubsub_lite_topic_schema.DefaultTimeout(4 * resource_pubsub_lite_topic_time.Minute), - }, - - Schema: map[string]*resource_pubsub_lite_topic_schema.Schema{ - "name": { - Type: resource_pubsub_lite_topic_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the topic.`, - }, - "partition_config": { - Type: resource_pubsub_lite_topic_schema.TypeList, - Optional: true, - Description: `The settings for this topic's partitions.`, - MaxItems: 1, - Elem: &resource_pubsub_lite_topic_schema.Resource{ - Schema: map[string]*resource_pubsub_lite_topic_schema.Schema{ - "count": { - Type: resource_pubsub_lite_topic_schema.TypeInt, - Required: true, - Description: `The number of partitions in the topic. Must be at least 1.`, - }, - "capacity": { - Type: resource_pubsub_lite_topic_schema.TypeList, - Optional: true, - Description: `The capacity configuration.`, - MaxItems: 1, - Elem: &resource_pubsub_lite_topic_schema.Resource{ - Schema: map[string]*resource_pubsub_lite_topic_schema.Schema{ - "publish_mib_per_sec": { - Type: resource_pubsub_lite_topic_schema.TypeInt, - Required: true, - Description: `Subscribe throughput capacity per partition in MiB/s. Must be >= 4 and <= 16.`, - }, - "subscribe_mib_per_sec": { - Type: resource_pubsub_lite_topic_schema.TypeInt, - Required: true, - Description: `Publish throughput capacity per partition in MiB/s. Must be >= 4 and <= 16.`, - }, - }, - }, - }, - }, - }, - }, - "region": { - Type: resource_pubsub_lite_topic_schema.TypeString, - Optional: true, - Description: `The region of the pubsub lite topic.`, - }, - "reservation_config": { - Type: resource_pubsub_lite_topic_schema.TypeList, - Optional: true, - Description: `The settings for this topic's Reservation usage.`, - MaxItems: 1, - Elem: &resource_pubsub_lite_topic_schema.Resource{ - Schema: map[string]*resource_pubsub_lite_topic_schema.Schema{ - "throughput_reservation": { - Type: resource_pubsub_lite_topic_schema.TypeString, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Reservation to use for this topic's throughput capacity.`, - }, - }, - }, - }, - "retention_config": { - Type: resource_pubsub_lite_topic_schema.TypeList, - Optional: true, - Description: `The settings for a topic's message retention.`, - MaxItems: 1, - Elem: &resource_pubsub_lite_topic_schema.Resource{ - Schema: map[string]*resource_pubsub_lite_topic_schema.Schema{ - "per_partition_bytes": { - Type: resource_pubsub_lite_topic_schema.TypeString, - Required: true, - Description: `The provisioned storage, in bytes, per partition. If the number of bytes stored -in any of the topic's partitions grows beyond this value, older messages will be -dropped to make room for newer ones, regardless of the value of period.`, - }, - "period": { - Type: resource_pubsub_lite_topic_schema.TypeString, - Optional: true, - Description: `How long a published message is retained. If unset, messages will be retained as -long as the bytes retained for each partition is below perPartitionBytes. A -duration in seconds with up to nine fractional digits, terminated by 's'. -Example: "3.5s".`, - }, - }, - }, - }, - "zone": { - Type: resource_pubsub_lite_topic_schema.TypeString, - Optional: true, - Description: `The zone of the pubsub lite topic.`, - }, - "project": { - Type: resource_pubsub_lite_topic_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubLiteTopicCreate(d *resource_pubsub_lite_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - partitionConfigProp, err := expandPubsubLiteTopicPartitionConfig(d.Get("partition_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("partition_config"); !isEmptyValue(resource_pubsub_lite_topic_reflect.ValueOf(partitionConfigProp)) && (ok || !resource_pubsub_lite_topic_reflect.DeepEqual(v, partitionConfigProp)) { - obj["partitionConfig"] = partitionConfigProp - } - retentionConfigProp, err := expandPubsubLiteTopicRetentionConfig(d.Get("retention_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retention_config"); !isEmptyValue(resource_pubsub_lite_topic_reflect.ValueOf(retentionConfigProp)) && (ok || !resource_pubsub_lite_topic_reflect.DeepEqual(v, retentionConfigProp)) { - obj["retentionConfig"] = retentionConfigProp - } - reservationConfigProp, err := expandPubsubLiteTopicReservationConfig(d.Get("reservation_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reservation_config"); !isEmptyValue(resource_pubsub_lite_topic_reflect.ValueOf(reservationConfigProp)) && (ok || !resource_pubsub_lite_topic_reflect.DeepEqual(v, reservationConfigProp)) { - obj["reservationConfig"] = reservationConfigProp - } - - obj, err = resourcePubsubLiteTopicEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics?topicId={{name}}") - if err != nil { - return err - } - - resource_pubsub_lite_topic_log.Printf("[DEBUG] Creating new Topic: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_topic_schema.TimeoutCreate)) - if err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error creating Topic: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_pubsub_lite_topic_log.Printf("[DEBUG] Finished creating Topic %q: %#v", d.Id(), res) - - return resourcePubsubLiteTopicRead(d, meta) -} - -func resourcePubsubLiteTopicRead(d *resource_pubsub_lite_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_pubsub_lite_topic_fmt.Sprintf("PubsubLiteTopic %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error reading Topic: %s", err) - } - - if err := d.Set("partition_config", flattenPubsubLiteTopicPartitionConfig(res["partitionConfig"], d, config)); err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("retention_config", flattenPubsubLiteTopicRetentionConfig(res["retentionConfig"], d, config)); err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("reservation_config", flattenPubsubLiteTopicReservationConfig(res["reservationConfig"], d, config)); err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error reading Topic: %s", err) - } - - return nil -} - -func resourcePubsubLiteTopicUpdate(d *resource_pubsub_lite_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - partitionConfigProp, err := expandPubsubLiteTopicPartitionConfig(d.Get("partition_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("partition_config"); !isEmptyValue(resource_pubsub_lite_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_lite_topic_reflect.DeepEqual(v, partitionConfigProp)) { - obj["partitionConfig"] = partitionConfigProp - } - retentionConfigProp, err := expandPubsubLiteTopicRetentionConfig(d.Get("retention_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retention_config"); !isEmptyValue(resource_pubsub_lite_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_lite_topic_reflect.DeepEqual(v, retentionConfigProp)) { - obj["retentionConfig"] = retentionConfigProp - } - reservationConfigProp, err := expandPubsubLiteTopicReservationConfig(d.Get("reservation_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reservation_config"); !isEmptyValue(resource_pubsub_lite_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_lite_topic_reflect.DeepEqual(v, reservationConfigProp)) { - obj["reservationConfig"] = reservationConfigProp - } - - obj, err = resourcePubsubLiteTopicEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return err - } - - resource_pubsub_lite_topic_log.Printf("[DEBUG] Updating Topic %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("partition_config") { - updateMask = append(updateMask, "partitionConfig") - } - - if d.HasChange("retention_config") { - updateMask = append(updateMask, "retentionConfig") - } - - if d.HasChange("reservation_config") { - updateMask = append(updateMask, "reservationConfig") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_pubsub_lite_topic_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_topic_schema.TimeoutUpdate)) - - if err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error updating Topic %q: %s", d.Id(), err) - } else { - resource_pubsub_lite_topic_log.Printf("[DEBUG] Finished updating Topic %q: %#v", d.Id(), res) - } - - return resourcePubsubLiteTopicRead(d, meta) -} - -func resourcePubsubLiteTopicDelete(d *resource_pubsub_lite_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_lite_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubLiteBasePath}}projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_pubsub_lite_topic_log.Printf("[DEBUG] Deleting Topic %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_lite_topic_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Topic") - } - - resource_pubsub_lite_topic_log.Printf("[DEBUG] Finished deleting Topic %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubLiteTopicImport(d *resource_pubsub_lite_topic_schema.ResourceData, meta interface{}) ([]*resource_pubsub_lite_topic_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/topics/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/topics/{{name}}") - if err != nil { - return nil, resource_pubsub_lite_topic_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_pubsub_lite_topic_schema.ResourceData{d}, nil -} - -func flattenPubsubLiteTopicPartitionConfig(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["count"] = - flattenPubsubLiteTopicPartitionConfigCount(original["count"], d, config) - transformed["capacity"] = - flattenPubsubLiteTopicPartitionConfigCapacity(original["capacity"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubLiteTopicPartitionConfigCount(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_pubsub_lite_topic_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenPubsubLiteTopicPartitionConfigCapacity(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["publish_mib_per_sec"] = - flattenPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(original["publishMibPerSec"], d, config) - transformed["subscribe_mib_per_sec"] = - flattenPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(original["subscribeMibPerSec"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_pubsub_lite_topic_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_pubsub_lite_topic_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenPubsubLiteTopicRetentionConfig(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["per_partition_bytes"] = - flattenPubsubLiteTopicRetentionConfigPerPartitionBytes(original["perPartitionBytes"], d, config) - transformed["period"] = - flattenPubsubLiteTopicRetentionConfigPeriod(original["period"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubLiteTopicRetentionConfigPerPartitionBytes(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubLiteTopicRetentionConfigPeriod(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubLiteTopicReservationConfig(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["throughput_reservation"] = - flattenPubsubLiteTopicReservationConfigThroughputReservation(original["throughputReservation"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubLiteTopicReservationConfigThroughputReservation(v interface{}, d *resource_pubsub_lite_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandPubsubLiteTopicPartitionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedCount, err := expandPubsubLiteTopicPartitionConfigCount(original["count"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_topic_reflect.ValueOf(transformedCount); val.IsValid() && !isEmptyValue(val) { - transformed["count"] = transformedCount - } - - transformedCapacity, err := expandPubsubLiteTopicPartitionConfigCapacity(original["capacity"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_topic_reflect.ValueOf(transformedCapacity); val.IsValid() && !isEmptyValue(val) { - transformed["capacity"] = transformedCapacity - } - - return transformed, nil -} - -func expandPubsubLiteTopicPartitionConfigCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicPartitionConfigCapacity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPublishMibPerSec, err := expandPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(original["publish_mib_per_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_topic_reflect.ValueOf(transformedPublishMibPerSec); val.IsValid() && !isEmptyValue(val) { - transformed["publishMibPerSec"] = transformedPublishMibPerSec - } - - transformedSubscribeMibPerSec, err := expandPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(original["subscribe_mib_per_sec"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_topic_reflect.ValueOf(transformedSubscribeMibPerSec); val.IsValid() && !isEmptyValue(val) { - transformed["subscribeMibPerSec"] = transformedSubscribeMibPerSec - } - - return transformed, nil -} - -func expandPubsubLiteTopicPartitionConfigCapacityPublishMibPerSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicPartitionConfigCapacitySubscribeMibPerSec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicRetentionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPerPartitionBytes, err := expandPubsubLiteTopicRetentionConfigPerPartitionBytes(original["per_partition_bytes"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_topic_reflect.ValueOf(transformedPerPartitionBytes); val.IsValid() && !isEmptyValue(val) { - transformed["perPartitionBytes"] = transformedPerPartitionBytes - } - - transformedPeriod, err := expandPubsubLiteTopicRetentionConfigPeriod(original["period"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_topic_reflect.ValueOf(transformedPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["period"] = transformedPeriod - } - - return transformed, nil -} - -func expandPubsubLiteTopicRetentionConfigPerPartitionBytes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicRetentionConfigPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubLiteTopicReservationConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedThroughputReservation, err := expandPubsubLiteTopicReservationConfigThroughputReservation(original["throughput_reservation"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_lite_topic_reflect.ValueOf(transformedThroughputReservation); val.IsValid() && !isEmptyValue(val) { - transformed["throughputReservation"] = transformedThroughputReservation - } - - return transformed, nil -} - -func expandPubsubLiteTopicReservationConfigThroughputReservation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseRegionalFieldValue("reservations", v.(string), "project", "region", "zone", d, config, true) - if err != nil { - return nil, resource_pubsub_lite_topic_fmt.Errorf("Invalid value for throughput_reservation: %s", err) - } - - return resource_pubsub_lite_topic_fmt.Sprintf("projects/%s/locations/%s/reservations/%s", f.Project, f.Region, f.Name), nil -} - -func resourcePubsubLiteTopicEncoder(d *resource_pubsub_lite_topic_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - zone, err := getZone(d, config) - if err != nil { - return nil, err - } - - if zone == "" { - return nil, resource_pubsub_lite_topic_fmt.Errorf("zone must be non-empty - set in resource or at provider-level") - } - - region := getRegionFromZone(zone) - - if region == "" { - return nil, resource_pubsub_lite_topic_fmt.Errorf("invalid zone %q, unable to infer region from zone", zone) - } - - return obj, nil -} - -func resourcePubsubSchema() *resource_pubsub_schema_schema.Resource { - return &resource_pubsub_schema_schema.Resource{ - Create: resourcePubsubSchemaCreate, - Read: resourcePubsubSchemaRead, - Update: resourcePubsubSchemaUpdate, - Delete: resourcePubsubSchemaDelete, - - Importer: &resource_pubsub_schema_schema.ResourceImporter{ - State: resourcePubsubSchemaImport, - }, - - Timeouts: &resource_pubsub_schema_schema.ResourceTimeout{ - Create: resource_pubsub_schema_schema.DefaultTimeout(4 * resource_pubsub_schema_time.Minute), - Update: resource_pubsub_schema_schema.DefaultTimeout(4 * resource_pubsub_schema_time.Minute), - Delete: resource_pubsub_schema_schema.DefaultTimeout(6 * resource_pubsub_schema_time.Minute), - }, - - Schema: map[string]*resource_pubsub_schema_schema.Schema{ - "name": { - Type: resource_pubsub_schema_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The ID to use for the schema, which will become the final component of the schema's resource name.`, - }, - "definition": { - Type: resource_pubsub_schema_schema.TypeString, - Optional: true, - Description: `The definition of the schema. -This should contain a string representing the full definition of the schema -that is a valid schema definition of the type specified in type.`, - }, - "type": { - Type: resource_pubsub_schema_schema.TypeString, - Optional: true, - ValidateFunc: resource_pubsub_schema_validation.StringInSlice([]string{"TYPE_UNSPECIFIED", "PROTOCOL_BUFFER", "AVRO", ""}, false), - Description: `The type of the schema definition Default value: "TYPE_UNSPECIFIED" Possible values: ["TYPE_UNSPECIFIED", "PROTOCOL_BUFFER", "AVRO"]`, - Default: "TYPE_UNSPECIFIED", - }, - "project": { - Type: resource_pubsub_schema_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubSchemaCreate(d *resource_pubsub_schema_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - typeProp, err := expandPubsubSchemaType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_pubsub_schema_reflect.ValueOf(typeProp)) && (ok || !resource_pubsub_schema_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - definitionProp, err := expandPubsubSchemaDefinition(d.Get("definition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("definition"); !isEmptyValue(resource_pubsub_schema_reflect.ValueOf(definitionProp)) && (ok || !resource_pubsub_schema_reflect.DeepEqual(v, definitionProp)) { - obj["definition"] = definitionProp - } - nameProp, err := expandPubsubSchemaName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_pubsub_schema_reflect.ValueOf(nameProp)) && (ok || !resource_pubsub_schema_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas?schemaId={{name}}") - if err != nil { - return err - } - - resource_pubsub_schema_log.Printf("[DEBUG] Creating new Schema: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_schema_schema.TimeoutCreate)) - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error creating Schema: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/schemas/{{name}}") - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_pubsub_schema_log.Printf("[DEBUG] Finished creating Schema %q: %#v", d.Id(), res) - - return resourcePubsubSchemaRead(d, meta) -} - -func resourcePubsubSchemaPollRead(d *resource_pubsub_schema_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_pubsub_schema_fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourcePubsubSchemaRead(d *resource_pubsub_schema_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_pubsub_schema_fmt.Sprintf("PubsubSchema %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_pubsub_schema_fmt.Errorf("Error reading Schema: %s", err) - } - - if err := d.Set("type", flattenPubsubSchemaType(res["type"], d, config)); err != nil { - return resource_pubsub_schema_fmt.Errorf("Error reading Schema: %s", err) - } - if err := d.Set("name", flattenPubsubSchemaName(res["name"], d, config)); err != nil { - return resource_pubsub_schema_fmt.Errorf("Error reading Schema: %s", err) - } - - return nil -} - -func resourcePubsubSchemaUpdate(d *resource_pubsub_schema_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - typeProp, err := expandPubsubSchemaType(d.Get("type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("type"); !isEmptyValue(resource_pubsub_schema_reflect.ValueOf(v)) && (ok || !resource_pubsub_schema_reflect.DeepEqual(v, typeProp)) { - obj["type"] = typeProp - } - definitionProp, err := expandPubsubSchemaDefinition(d.Get("definition"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("definition"); !isEmptyValue(resource_pubsub_schema_reflect.ValueOf(v)) && (ok || !resource_pubsub_schema_reflect.DeepEqual(v, definitionProp)) { - obj["definition"] = definitionProp - } - nameProp, err := expandPubsubSchemaName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_pubsub_schema_reflect.ValueOf(v)) && (ok || !resource_pubsub_schema_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") - if err != nil { - return err - } - - resource_pubsub_schema_log.Printf("[DEBUG] Updating Schema %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_schema_schema.TimeoutUpdate)) - - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error updating Schema %q: %s", d.Id(), err) - } else { - resource_pubsub_schema_log.Printf("[DEBUG] Finished updating Schema %q: %#v", d.Id(), res) - } - - return resourcePubsubSchemaRead(d, meta) -} - -func resourcePubsubSchemaDelete(d *resource_pubsub_schema_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error fetching project for Schema: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/schemas/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_pubsub_schema_log.Printf("[DEBUG] Deleting Schema %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_schema_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Schema") - } - - err = PollingWaitTime(resourcePubsubSchemaPollRead(d, meta), PollCheckForAbsence, "Deleting Schema", d.Timeout(resource_pubsub_schema_schema.TimeoutCreate), 10) - if err != nil { - return resource_pubsub_schema_fmt.Errorf("Error waiting to delete Schema: %s", err) - } - - resource_pubsub_schema_log.Printf("[DEBUG] Finished deleting Schema %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubSchemaImport(d *resource_pubsub_schema_schema.ResourceData, meta interface{}) ([]*resource_pubsub_schema_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/schemas/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/schemas/{{name}}") - if err != nil { - return nil, resource_pubsub_schema_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_pubsub_schema_schema.ResourceData{d}, nil -} - -func flattenPubsubSchemaType(v interface{}, d *resource_pubsub_schema_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSchemaName(v interface{}, d *resource_pubsub_schema_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func expandPubsubSchemaType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSchemaDefinition(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSchemaName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func comparePubsubSubscriptionExpirationPolicy(_, old, new string, _ *resource_pubsub_subscription_schema.ResourceData) bool { - trimmedNew := resource_pubsub_subscription_strings.TrimLeft(new, "0") - trimmedOld := resource_pubsub_subscription_strings.TrimLeft(old, "0") - if resource_pubsub_subscription_strings.Contains(trimmedNew, ".") { - trimmedNew = resource_pubsub_subscription_strings.TrimRight(resource_pubsub_subscription_strings.TrimSuffix(trimmedNew, "s"), "0") + "s" - } - if resource_pubsub_subscription_strings.Contains(trimmedOld, ".") { - trimmedOld = resource_pubsub_subscription_strings.TrimRight(resource_pubsub_subscription_strings.TrimSuffix(trimmedOld, "s"), "0") + "s" - } - return trimmedNew == trimmedOld -} - -func resourcePubsubSubscription() *resource_pubsub_subscription_schema.Resource { - return &resource_pubsub_subscription_schema.Resource{ - Create: resourcePubsubSubscriptionCreate, - Read: resourcePubsubSubscriptionRead, - Update: resourcePubsubSubscriptionUpdate, - Delete: resourcePubsubSubscriptionDelete, - - Importer: &resource_pubsub_subscription_schema.ResourceImporter{ - State: resourcePubsubSubscriptionImport, - }, - - Timeouts: &resource_pubsub_subscription_schema.ResourceTimeout{ - Create: resource_pubsub_subscription_schema.DefaultTimeout(6 * resource_pubsub_subscription_time.Minute), - Update: resource_pubsub_subscription_schema.DefaultTimeout(6 * resource_pubsub_subscription_time.Minute), - Delete: resource_pubsub_subscription_schema.DefaultTimeout(4 * resource_pubsub_subscription_time.Minute), - }, - - Schema: map[string]*resource_pubsub_subscription_schema.Schema{ - "name": { - Type: resource_pubsub_subscription_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Name of the subscription.`, - }, - "topic": { - Type: resource_pubsub_subscription_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `A reference to a Topic resource.`, - }, - "ack_deadline_seconds": { - Type: resource_pubsub_subscription_schema.TypeInt, - Computed: true, - Optional: true, - Description: `This value is the maximum time after a subscriber receives a message -before the subscriber should acknowledge the message. After message -delivery but before the ack deadline expires and before the message is -acknowledged, it is an outstanding message and will not be delivered -again during that time (on a best-effort basis). - -For pull subscriptions, this value is used as the initial value for -the ack deadline. To override this value for a given message, call -subscriptions.modifyAckDeadline with the corresponding ackId if using -pull. The minimum custom deadline you can specify is 10 seconds. The -maximum custom deadline you can specify is 600 seconds (10 minutes). -If this parameter is 0, a default value of 10 seconds is used. - -For push delivery, this value is also used to set the request timeout -for the call to the push endpoint. - -If the subscriber never acknowledges the message, the Pub/Sub system -will eventually redeliver the message.`, - }, - "dead_letter_policy": { - Type: resource_pubsub_subscription_schema.TypeList, - Optional: true, - Description: `A policy that specifies the conditions for dead lettering messages in -this subscription. If dead_letter_policy is not set, dead lettering -is disabled. - -The Cloud Pub/Sub service account associated with this subscription's -parent project (i.e., -service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have -permission to Acknowledge() messages on this subscription.`, - MaxItems: 1, - Elem: &resource_pubsub_subscription_schema.Resource{ - Schema: map[string]*resource_pubsub_subscription_schema.Schema{ - "dead_letter_topic": { - Type: resource_pubsub_subscription_schema.TypeString, - Optional: true, - Description: `The name of the topic to which dead letter messages should be published. -Format is 'projects/{project}/topics/{topic}'. - -The Cloud Pub/Sub service account associated with the enclosing subscription's -parent project (i.e., -service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have -permission to Publish() to this topic. - -The operation will fail if the topic does not exist. -Users should ensure that there is a subscription attached to this topic -since messages published to a topic with no subscriptions are lost.`, - }, - "max_delivery_attempts": { - Type: resource_pubsub_subscription_schema.TypeInt, - Optional: true, - Description: `The maximum number of delivery attempts for any message. The value must be -between 5 and 100. - -The number of delivery attempts is defined as 1 + (the sum of number of -NACKs and number of times the acknowledgement deadline has been exceeded for the message). - -A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that -client libraries may automatically extend ack_deadlines. - -This field will be honored on a best effort basis. - -If this parameter is 0, a default value of 5 is used.`, - }, - }, - }, - }, - "enable_message_ordering": { - Type: resource_pubsub_subscription_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `If 'true', messages published with the same orderingKey in PubsubMessage will be delivered to -the subscribers in the order in which they are received by the Pub/Sub system. Otherwise, they -may be delivered in any order.`, - }, - "expiration_policy": { - Type: resource_pubsub_subscription_schema.TypeList, - Computed: true, - Optional: true, - Description: `A policy that specifies the conditions for this subscription's expiration. -A subscription is considered active as long as any connected subscriber -is successfully consuming messages from the subscription or is issuing -operations on the subscription. If expirationPolicy is not set, a default -policy with ttl of 31 days will be used. If it is set but ttl is "", the -resource never expires. The minimum allowed value for expirationPolicy.ttl -is 1 day.`, - MaxItems: 1, - Elem: &resource_pubsub_subscription_schema.Resource{ - Schema: map[string]*resource_pubsub_subscription_schema.Schema{ - "ttl": { - Type: resource_pubsub_subscription_schema.TypeString, - Required: true, - DiffSuppressFunc: comparePubsubSubscriptionExpirationPolicy, - Description: `Specifies the "time-to-live" duration for an associated resource. The -resource expires if it is not active for a period of ttl. -If ttl is not set, the associated resource never expires. -A duration in seconds with up to nine fractional digits, terminated by 's'. -Example - "3.5s".`, - }, - }, - }, - }, - "filter": { - Type: resource_pubsub_subscription_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The subscription only delivers the messages that match the filter. -Pub/Sub automatically acknowledges the messages that don't match the filter. You can filter messages -by their attributes. The maximum length of a filter is 256 bytes. After creating the subscription, -you can't modify the filter.`, - }, - "labels": { - Type: resource_pubsub_subscription_schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Subscription.`, - Elem: &resource_pubsub_subscription_schema.Schema{Type: resource_pubsub_subscription_schema.TypeString}, - }, - "message_retention_duration": { - Type: resource_pubsub_subscription_schema.TypeString, - Optional: true, - Description: `How long to retain unacknowledged messages in the subscription's -backlog, from the moment a message is published. If -retainAckedMessages is true, then this also configures the retention -of acknowledged messages, and thus configures how far back in time a -subscriptions.seek can be done. Defaults to 7 days. Cannot be more -than 7 days ('"604800s"') or less than 10 minutes ('"600s"'). - -A duration in seconds with up to nine fractional digits, terminated -by 's'. Example: '"600.5s"'.`, - Default: "604800s", - }, - "push_config": { - Type: resource_pubsub_subscription_schema.TypeList, - Optional: true, - Description: `If push delivery is used with this subscription, this field is used to -configure it. An empty pushConfig signifies that the subscriber will -pull and ack messages using API methods.`, - MaxItems: 1, - Elem: &resource_pubsub_subscription_schema.Resource{ - Schema: map[string]*resource_pubsub_subscription_schema.Schema{ - "push_endpoint": { - Type: resource_pubsub_subscription_schema.TypeString, - Required: true, - Description: `A URL locating the endpoint to which messages should be pushed. -For example, a Webhook endpoint might use -"https://example.com/push".`, - }, - "attributes": { - Type: resource_pubsub_subscription_schema.TypeMap, - Optional: true, - DiffSuppressFunc: ignoreMissingKeyInMap("x-goog-version"), - Description: `Endpoint configuration attributes. - -Every endpoint has a set of API supported attributes that can -be used to control different aspects of the message delivery. - -The currently supported attribute is x-goog-version, which you -can use to change the format of the pushed message. This -attribute indicates the version of the data expected by -the endpoint. This controls the shape of the pushed message -(i.e., its fields and metadata). The endpoint version is -based on the version of the Pub/Sub API. - -If not present during the subscriptions.create call, -it will default to the version of the API used to make -such call. If not present during a subscriptions.modifyPushConfig -call, its value will not be changed. subscriptions.get -calls will always return a valid version, even if the -subscription was created without this attribute. - -The possible values for this attribute are: - -- v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. -- v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API.`, - Elem: &resource_pubsub_subscription_schema.Schema{Type: resource_pubsub_subscription_schema.TypeString}, - }, - "oidc_token": { - Type: resource_pubsub_subscription_schema.TypeList, - Optional: true, - Description: `If specified, Pub/Sub will generate and attach an OIDC JWT token as -an Authorization header in the HTTP request for every pushed message.`, - MaxItems: 1, - Elem: &resource_pubsub_subscription_schema.Resource{ - Schema: map[string]*resource_pubsub_subscription_schema.Schema{ - "service_account_email": { - Type: resource_pubsub_subscription_schema.TypeString, - Required: true, - Description: `Service account email to be used for generating the OIDC token. -The caller (for subscriptions.create, subscriptions.patch, and -subscriptions.modifyPushConfig RPCs) must have the -iam.serviceAccounts.actAs permission for the service account.`, - }, - "audience": { - Type: resource_pubsub_subscription_schema.TypeString, - Optional: true, - Description: `Audience to be used when generating OIDC token. The audience claim -identifies the recipients that the JWT is intended for. The audience -value is a single case-sensitive string. Having multiple values (array) -for the audience field is not supported. More info about the OIDC JWT -token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 -Note: if not specified, the Push endpoint URL will be used.`, - }, - }, - }, - }, - }, - }, - }, - "retain_acked_messages": { - Type: resource_pubsub_subscription_schema.TypeBool, - Optional: true, - Description: `Indicates whether to retain acknowledged messages. If 'true', then -messages are not expunged from the subscription's backlog, even if -they are acknowledged, until they fall out of the -messageRetentionDuration window.`, - }, - "retry_policy": { - Type: resource_pubsub_subscription_schema.TypeList, - Optional: true, - Description: `A policy that specifies how Pub/Sub retries message delivery for this subscription. - -If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. -RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message`, - MaxItems: 1, - Elem: &resource_pubsub_subscription_schema.Resource{ - Schema: map[string]*resource_pubsub_subscription_schema.Schema{ - "maximum_backoff": { - Type: resource_pubsub_subscription_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: durationDiffSuppress, - Description: `The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "minimum_backoff": { - Type: resource_pubsub_subscription_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: durationDiffSuppress, - Description: `The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - }, - }, - }, - "project": { - Type: resource_pubsub_subscription_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubSubscriptionCreate(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandPubsubSubscriptionName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(nameProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - topicProp, err := expandPubsubSubscriptionTopic(d.Get("topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("topic"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(topicProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, topicProp)) { - obj["topic"] = topicProp - } - labelsProp, err := expandPubsubSubscriptionLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(labelsProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("push_config"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(pushConfigProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, pushConfigProp)) { - obj["pushConfig"] = pushConfigProp - } - ackDeadlineSecondsProp, err := expandPubsubSubscriptionAckDeadlineSeconds(d.Get("ack_deadline_seconds"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ack_deadline_seconds"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(ackDeadlineSecondsProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, ackDeadlineSecondsProp)) { - obj["ackDeadlineSeconds"] = ackDeadlineSecondsProp - } - messageRetentionDurationProp, err := expandPubsubSubscriptionMessageRetentionDuration(d.Get("message_retention_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(messageRetentionDurationProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, messageRetentionDurationProp)) { - obj["messageRetentionDuration"] = messageRetentionDurationProp - } - retainAckedMessagesProp, err := expandPubsubSubscriptionRetainAckedMessages(d.Get("retain_acked_messages"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retain_acked_messages"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(retainAckedMessagesProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, retainAckedMessagesProp)) { - obj["retainAckedMessages"] = retainAckedMessagesProp - } - expirationPolicyProp, err := expandPubsubSubscriptionExpirationPolicy(d.Get("expiration_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_policy"); ok || !resource_pubsub_subscription_reflect.DeepEqual(v, expirationPolicyProp) { - obj["expirationPolicy"] = expirationPolicyProp - } - filterProp, err := expandPubsubSubscriptionFilter(d.Get("filter"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("filter"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(filterProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, filterProp)) { - obj["filter"] = filterProp - } - deadLetterPolicyProp, err := expandPubsubSubscriptionDeadLetterPolicy(d.Get("dead_letter_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dead_letter_policy"); ok || !resource_pubsub_subscription_reflect.DeepEqual(v, deadLetterPolicyProp) { - obj["deadLetterPolicy"] = deadLetterPolicyProp - } - retryPolicyProp, err := expandPubsubSubscriptionRetryPolicy(d.Get("retry_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_policy"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(retryPolicyProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, retryPolicyProp)) { - obj["retryPolicy"] = retryPolicyProp - } - enableMessageOrderingProp, err := expandPubsubSubscriptionEnableMessageOrdering(d.Get("enable_message_ordering"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enable_message_ordering"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(enableMessageOrderingProp)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, enableMessageOrderingProp)) { - obj["enableMessageOrdering"] = enableMessageOrderingProp - } - - obj, err = resourcePubsubSubscriptionEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return err - } - - resource_pubsub_subscription_log.Printf("[DEBUG] Creating new Subscription: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_subscription_schema.TimeoutCreate)) - if err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error creating Subscription: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourcePubsubSubscriptionPollRead(d, meta), PollCheckForExistence, "Creating Subscription", d.Timeout(resource_pubsub_subscription_schema.TimeoutCreate), 1) - if err != nil { - resource_pubsub_subscription_log.Printf("[ERROR] Unable to confirm eventually consistent Subscription %q finished updating: %q", d.Id(), err) - } - - resource_pubsub_subscription_log.Printf("[DEBUG] Finished creating Subscription %q: %#v", d.Id(), res) - - return resourcePubsubSubscriptionRead(d, meta) -} - -func resourcePubsubSubscriptionPollRead(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_pubsub_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourcePubsubSubscriptionRead(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_pubsub_subscription_fmt.Sprintf("PubsubSubscription %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - - if err := d.Set("name", flattenPubsubSubscriptionName(res["name"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("topic", flattenPubsubSubscriptionTopic(res["topic"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("labels", flattenPubsubSubscriptionLabels(res["labels"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("push_config", flattenPubsubSubscriptionPushConfig(res["pushConfig"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("ack_deadline_seconds", flattenPubsubSubscriptionAckDeadlineSeconds(res["ackDeadlineSeconds"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("message_retention_duration", flattenPubsubSubscriptionMessageRetentionDuration(res["messageRetentionDuration"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("retain_acked_messages", flattenPubsubSubscriptionRetainAckedMessages(res["retainAckedMessages"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("expiration_policy", flattenPubsubSubscriptionExpirationPolicy(res["expirationPolicy"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("filter", flattenPubsubSubscriptionFilter(res["filter"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("dead_letter_policy", flattenPubsubSubscriptionDeadLetterPolicy(res["deadLetterPolicy"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("retry_policy", flattenPubsubSubscriptionRetryPolicy(res["retryPolicy"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - if err := d.Set("enable_message_ordering", flattenPubsubSubscriptionEnableMessageOrdering(res["enableMessageOrdering"], d, config)); err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error reading Subscription: %s", err) - } - - return nil -} - -func resourcePubsubSubscriptionUpdate(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandPubsubSubscriptionLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(v)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - pushConfigProp, err := expandPubsubSubscriptionPushConfig(d.Get("push_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("push_config"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(v)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, pushConfigProp)) { - obj["pushConfig"] = pushConfigProp - } - ackDeadlineSecondsProp, err := expandPubsubSubscriptionAckDeadlineSeconds(d.Get("ack_deadline_seconds"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ack_deadline_seconds"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(v)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, ackDeadlineSecondsProp)) { - obj["ackDeadlineSeconds"] = ackDeadlineSecondsProp - } - messageRetentionDurationProp, err := expandPubsubSubscriptionMessageRetentionDuration(d.Get("message_retention_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(v)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, messageRetentionDurationProp)) { - obj["messageRetentionDuration"] = messageRetentionDurationProp - } - retainAckedMessagesProp, err := expandPubsubSubscriptionRetainAckedMessages(d.Get("retain_acked_messages"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retain_acked_messages"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(v)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, retainAckedMessagesProp)) { - obj["retainAckedMessages"] = retainAckedMessagesProp - } - expirationPolicyProp, err := expandPubsubSubscriptionExpirationPolicy(d.Get("expiration_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expiration_policy"); ok || !resource_pubsub_subscription_reflect.DeepEqual(v, expirationPolicyProp) { - obj["expirationPolicy"] = expirationPolicyProp - } - deadLetterPolicyProp, err := expandPubsubSubscriptionDeadLetterPolicy(d.Get("dead_letter_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("dead_letter_policy"); ok || !resource_pubsub_subscription_reflect.DeepEqual(v, deadLetterPolicyProp) { - obj["deadLetterPolicy"] = deadLetterPolicyProp - } - retryPolicyProp, err := expandPubsubSubscriptionRetryPolicy(d.Get("retry_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("retry_policy"); !isEmptyValue(resource_pubsub_subscription_reflect.ValueOf(v)) && (ok || !resource_pubsub_subscription_reflect.DeepEqual(v, retryPolicyProp)) { - obj["retryPolicy"] = retryPolicyProp - } - - obj, err = resourcePubsubSubscriptionUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return err - } - - resource_pubsub_subscription_log.Printf("[DEBUG] Updating Subscription %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("push_config") { - updateMask = append(updateMask, "pushConfig") - } - - if d.HasChange("ack_deadline_seconds") { - updateMask = append(updateMask, "ackDeadlineSeconds") - } - - if d.HasChange("message_retention_duration") { - updateMask = append(updateMask, "messageRetentionDuration") - } - - if d.HasChange("retain_acked_messages") { - updateMask = append(updateMask, "retainAckedMessages") - } - - if d.HasChange("expiration_policy") { - updateMask = append(updateMask, "expirationPolicy") - } - - if d.HasChange("dead_letter_policy") { - updateMask = append(updateMask, "deadLetterPolicy") - } - - if d.HasChange("retry_policy") { - updateMask = append(updateMask, "retryPolicy") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_pubsub_subscription_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_subscription_schema.TimeoutUpdate)) - - if err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error updating Subscription %q: %s", d.Id(), err) - } else { - resource_pubsub_subscription_log.Printf("[DEBUG] Finished updating Subscription %q: %#v", d.Id(), res) - } - - return resourcePubsubSubscriptionRead(d, meta) -} - -func resourcePubsubSubscriptionDelete(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_subscription_fmt.Errorf("Error fetching project for Subscription: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_pubsub_subscription_log.Printf("[DEBUG] Deleting Subscription %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_subscription_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Subscription") - } - - resource_pubsub_subscription_log.Printf("[DEBUG] Finished deleting Subscription %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubSubscriptionImport(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}) ([]*resource_pubsub_subscription_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/subscriptions/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") - if err != nil { - return nil, resource_pubsub_subscription_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_pubsub_subscription_schema.ResourceData{d}, nil -} - -func flattenPubsubSubscriptionName(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenPubsubSubscriptionTopic(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenPubsubSubscriptionLabels(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionPushConfig(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["oidc_token"] = - flattenPubsubSubscriptionPushConfigOidcToken(original["oidcToken"], d, config) - transformed["push_endpoint"] = - flattenPubsubSubscriptionPushConfigPushEndpoint(original["pushEndpoint"], d, config) - transformed["attributes"] = - flattenPubsubSubscriptionPushConfigAttributes(original["attributes"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubSubscriptionPushConfigOidcToken(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["service_account_email"] = - flattenPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(original["serviceAccountEmail"], d, config) - transformed["audience"] = - flattenPubsubSubscriptionPushConfigOidcTokenAudience(original["audience"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionPushConfigOidcTokenAudience(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionPushConfigPushEndpoint(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionPushConfigAttributes(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionAckDeadlineSeconds(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_pubsub_subscription_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenPubsubSubscriptionMessageRetentionDuration(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionRetainAckedMessages(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionExpirationPolicy(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - transformed := make(map[string]interface{}) - transformed["ttl"] = - flattenPubsubSubscriptionExpirationPolicyTtl(original["ttl"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubSubscriptionExpirationPolicyTtl(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionFilter(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionDeadLetterPolicy(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["dead_letter_topic"] = - flattenPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(original["deadLetterTopic"], d, config) - transformed["max_delivery_attempts"] = - flattenPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(original["maxDeliveryAttempts"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_pubsub_subscription_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenPubsubSubscriptionRetryPolicy(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["minimum_backoff"] = - flattenPubsubSubscriptionRetryPolicyMinimumBackoff(original["minimumBackoff"], d, config) - transformed["maximum_backoff"] = - flattenPubsubSubscriptionRetryPolicyMaximumBackoff(original["maximumBackoff"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubSubscriptionRetryPolicyMinimumBackoff(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionRetryPolicyMaximumBackoff(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubSubscriptionEnableMessageOrdering(v interface{}, d *resource_pubsub_subscription_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPubsubSubscriptionName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/subscriptions/{{name}}") -} - -func expandPubsubSubscriptionTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - project, err := getProject(d, config) - if err != nil { - return "", err - } - - topic := d.Get("topic").(string) - - re := resource_pubsub_subscription_regexp.MustCompile(`projects\/(.*)\/topics\/(.*)`) - match := re.FindStringSubmatch(topic) - if len(match) == 3 { - return topic, nil - } else { - - fullTopic := resource_pubsub_subscription_fmt.Sprintf("projects/%s/topics/%s", project, topic) - if err := d.Set("topic", fullTopic); err != nil { - return nil, resource_pubsub_subscription_fmt.Errorf("Error setting topic: %s", err) - } - return fullTopic, nil - } -} - -func expandPubsubSubscriptionLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandPubsubSubscriptionPushConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedOidcToken, err := expandPubsubSubscriptionPushConfigOidcToken(original["oidc_token"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedOidcToken); val.IsValid() && !isEmptyValue(val) { - transformed["oidcToken"] = transformedOidcToken - } - - transformedPushEndpoint, err := expandPubsubSubscriptionPushConfigPushEndpoint(original["push_endpoint"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedPushEndpoint); val.IsValid() && !isEmptyValue(val) { - transformed["pushEndpoint"] = transformedPushEndpoint - } - - transformedAttributes, err := expandPubsubSubscriptionPushConfigAttributes(original["attributes"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedAttributes); val.IsValid() && !isEmptyValue(val) { - transformed["attributes"] = transformedAttributes - } - - return transformed, nil -} - -func expandPubsubSubscriptionPushConfigOidcToken(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedServiceAccountEmail, err := expandPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountEmail"] = transformedServiceAccountEmail - } - - transformedAudience, err := expandPubsubSubscriptionPushConfigOidcTokenAudience(original["audience"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedAudience); val.IsValid() && !isEmptyValue(val) { - transformed["audience"] = transformedAudience - } - - return transformed, nil -} - -func expandPubsubSubscriptionPushConfigOidcTokenServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionPushConfigOidcTokenAudience(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionPushConfigPushEndpoint(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionPushConfigAttributes(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandPubsubSubscriptionAckDeadlineSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionMessageRetentionDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionRetainAckedMessages(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionExpirationPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 { - return nil, nil - } - - if l[0] == nil { - transformed := make(map[string]interface{}) - return transformed, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedTtl, err := expandPubsubSubscriptionExpirationPolicyTtl(original["ttl"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedTtl); val.IsValid() && !isEmptyValue(val) { - transformed["ttl"] = transformedTtl - } - - return transformed, nil -} - -func expandPubsubSubscriptionExpirationPolicyTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionDeadLetterPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedDeadLetterTopic, err := expandPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(original["dead_letter_topic"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedDeadLetterTopic); val.IsValid() && !isEmptyValue(val) { - transformed["deadLetterTopic"] = transformedDeadLetterTopic - } - - transformedMaxDeliveryAttempts, err := expandPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(original["max_delivery_attempts"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedMaxDeliveryAttempts); val.IsValid() && !isEmptyValue(val) { - transformed["maxDeliveryAttempts"] = transformedMaxDeliveryAttempts - } - - return transformed, nil -} - -func expandPubsubSubscriptionDeadLetterPolicyDeadLetterTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionDeadLetterPolicyMaxDeliveryAttempts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionRetryPolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMinimumBackoff, err := expandPubsubSubscriptionRetryPolicyMinimumBackoff(original["minimum_backoff"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedMinimumBackoff); val.IsValid() && !isEmptyValue(val) { - transformed["minimumBackoff"] = transformedMinimumBackoff - } - - transformedMaximumBackoff, err := expandPubsubSubscriptionRetryPolicyMaximumBackoff(original["maximum_backoff"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_subscription_reflect.ValueOf(transformedMaximumBackoff); val.IsValid() && !isEmptyValue(val) { - transformed["maximumBackoff"] = transformedMaximumBackoff - } - - return transformed, nil -} - -func expandPubsubSubscriptionRetryPolicyMinimumBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionRetryPolicyMaximumBackoff(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubSubscriptionEnableMessageOrdering(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePubsubSubscriptionEncoder(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "name") - return obj, nil -} - -func resourcePubsubSubscriptionUpdateEncoder(d *resource_pubsub_subscription_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["subscription"] = obj - return newObj, nil -} - -func resourcePubsubTopic() *resource_pubsub_topic_schema.Resource { - return &resource_pubsub_topic_schema.Resource{ - Create: resourcePubsubTopicCreate, - Read: resourcePubsubTopicRead, - Update: resourcePubsubTopicUpdate, - Delete: resourcePubsubTopicDelete, - - Importer: &resource_pubsub_topic_schema.ResourceImporter{ - State: resourcePubsubTopicImport, - }, - - Timeouts: &resource_pubsub_topic_schema.ResourceTimeout{ - Create: resource_pubsub_topic_schema.DefaultTimeout(6 * resource_pubsub_topic_time.Minute), - Update: resource_pubsub_topic_schema.DefaultTimeout(6 * resource_pubsub_topic_time.Minute), - Delete: resource_pubsub_topic_schema.DefaultTimeout(4 * resource_pubsub_topic_time.Minute), - }, - - Schema: map[string]*resource_pubsub_topic_schema.Schema{ - "name": { - Type: resource_pubsub_topic_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the topic.`, - }, - "kms_key_name": { - Type: resource_pubsub_topic_schema.TypeString, - Optional: true, - Description: `The resource name of the Cloud KMS CryptoKey to be used to protect access -to messages published on this topic. Your project's PubSub service account -('service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com') must have -'roles/cloudkms.cryptoKeyEncrypterDecrypter' to use this feature. -The expected format is 'projects/*/locations/*/keyRings/*/cryptoKeys/*'`, - }, - "labels": { - Type: resource_pubsub_topic_schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Topic.`, - Elem: &resource_pubsub_topic_schema.Schema{Type: resource_pubsub_topic_schema.TypeString}, - }, - "message_retention_duration": { - Type: resource_pubsub_topic_schema.TypeString, - Optional: true, - Description: `Indicates the minimum duration to retain a message after it is published -to the topic. If this field is set, messages published to the topic in -the last messageRetentionDuration are always available to subscribers. -For instance, it allows any attached subscription to seek to a timestamp -that is up to messageRetentionDuration in the past. If this field is not -set, message retention is controlled by settings on individual subscriptions. -Cannot be more than 7 days or less than 10 minutes.`, - }, - "message_storage_policy": { - Type: resource_pubsub_topic_schema.TypeList, - Computed: true, - Optional: true, - Description: `Policy constraining the set of Google Cloud Platform regions where -messages published to the topic may be stored. If not present, then no -constraints are in effect.`, - MaxItems: 1, - Elem: &resource_pubsub_topic_schema.Resource{ - Schema: map[string]*resource_pubsub_topic_schema.Schema{ - "allowed_persistence_regions": { - Type: resource_pubsub_topic_schema.TypeList, - Required: true, - Description: `A list of IDs of GCP regions where messages that are published to -the topic may be persisted in storage. Messages published by -publishers running in non-allowed GCP regions (or running outside -of GCP altogether) will be routed for storage in one of the -allowed regions. An empty list means that no regions are allowed, -and is not a valid configuration.`, - Elem: &resource_pubsub_topic_schema.Schema{ - Type: resource_pubsub_topic_schema.TypeString, - }, - }, - }, - }, - }, - "schema_settings": { - Type: resource_pubsub_topic_schema.TypeList, - Computed: true, - Optional: true, - Description: `Settings for validating messages published against a schema.`, - MaxItems: 1, - Elem: &resource_pubsub_topic_schema.Resource{ - Schema: map[string]*resource_pubsub_topic_schema.Schema{ - "schema": { - Type: resource_pubsub_topic_schema.TypeString, - Required: true, - Description: `The name of the schema that messages published should be -validated against. Format is projects/{project}/schemas/{schema}. -The value of this field will be _deleted-schema_ -if the schema has been deleted.`, - }, - "encoding": { - Type: resource_pubsub_topic_schema.TypeString, - Optional: true, - ValidateFunc: resource_pubsub_topic_validation.StringInSlice([]string{"ENCODING_UNSPECIFIED", "JSON", "BINARY", ""}, false), - Description: `The encoding of messages validated against schema. Default value: "ENCODING_UNSPECIFIED" Possible values: ["ENCODING_UNSPECIFIED", "JSON", "BINARY"]`, - Default: "ENCODING_UNSPECIFIED", - }, - }, - }, - }, - "project": { - Type: resource_pubsub_topic_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourcePubsubTopicCreate(d *resource_pubsub_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandPubsubTopicName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(nameProp)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - kmsKeyNameProp, err := expandPubsubTopicKmsKeyName(d.Get("kms_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key_name"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(kmsKeyNameProp)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, kmsKeyNameProp)) { - obj["kmsKeyName"] = kmsKeyNameProp - } - labelsProp, err := expandPubsubTopicLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(labelsProp)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - messageStoragePolicyProp, err := expandPubsubTopicMessageStoragePolicy(d.Get("message_storage_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_storage_policy"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(messageStoragePolicyProp)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, messageStoragePolicyProp)) { - obj["messageStoragePolicy"] = messageStoragePolicyProp - } - schemaSettingsProp, err := expandPubsubTopicSchemaSettings(d.Get("schema_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schema_settings"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(schemaSettingsProp)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, schemaSettingsProp)) { - obj["schemaSettings"] = schemaSettingsProp - } - messageRetentionDurationProp, err := expandPubsubTopicMessageRetentionDuration(d.Get("message_retention_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(messageRetentionDurationProp)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, messageRetentionDurationProp)) { - obj["messageRetentionDuration"] = messageRetentionDurationProp - } - - obj, err = resourcePubsubTopicEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - resource_pubsub_topic_log.Printf("[DEBUG] Creating new Topic: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_topic_schema.TimeoutCreate), pubsubTopicProjectNotReady) - if err != nil { - return resource_pubsub_topic_fmt.Errorf("Error creating Topic: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/topics/{{name}}") - if err != nil { - return resource_pubsub_topic_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = PollingWaitTime(resourcePubsubTopicPollRead(d, meta), PollCheckForExistence, "Creating Topic", d.Timeout(resource_pubsub_topic_schema.TimeoutCreate), 1) - if err != nil { - resource_pubsub_topic_log.Printf("[ERROR] Unable to confirm eventually consistent Topic %q finished updating: %q", d.Id(), err) - } - - resource_pubsub_topic_log.Printf("[DEBUG] Finished creating Topic %q: %#v", d.Id(), res) - - return resourcePubsubTopicRead(d, meta) -} - -func resourcePubsubTopicPollRead(d *resource_pubsub_topic_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_pubsub_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, pubsubTopicProjectNotReady) - if err != nil { - return res, err - } - return res, nil - } -} - -func resourcePubsubTopicRead(d *resource_pubsub_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil, pubsubTopicProjectNotReady) - if err != nil { - return handleNotFoundError(err, d, resource_pubsub_topic_fmt.Sprintf("PubsubTopic %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_pubsub_topic_fmt.Errorf("Error reading Topic: %s", err) - } - - if err := d.Set("name", flattenPubsubTopicName(res["name"], d, config)); err != nil { - return resource_pubsub_topic_fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("kms_key_name", flattenPubsubTopicKmsKeyName(res["kmsKeyName"], d, config)); err != nil { - return resource_pubsub_topic_fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("labels", flattenPubsubTopicLabels(res["labels"], d, config)); err != nil { - return resource_pubsub_topic_fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("message_storage_policy", flattenPubsubTopicMessageStoragePolicy(res["messageStoragePolicy"], d, config)); err != nil { - return resource_pubsub_topic_fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("schema_settings", flattenPubsubTopicSchemaSettings(res["schemaSettings"], d, config)); err != nil { - return resource_pubsub_topic_fmt.Errorf("Error reading Topic: %s", err) - } - if err := d.Set("message_retention_duration", flattenPubsubTopicMessageRetentionDuration(res["messageRetentionDuration"], d, config)); err != nil { - return resource_pubsub_topic_fmt.Errorf("Error reading Topic: %s", err) - } - - return nil -} - -func resourcePubsubTopicUpdate(d *resource_pubsub_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - kmsKeyNameProp, err := expandPubsubTopicKmsKeyName(d.Get("kms_key_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("kms_key_name"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, kmsKeyNameProp)) { - obj["kmsKeyName"] = kmsKeyNameProp - } - labelsProp, err := expandPubsubTopicLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - messageStoragePolicyProp, err := expandPubsubTopicMessageStoragePolicy(d.Get("message_storage_policy"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_storage_policy"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, messageStoragePolicyProp)) { - obj["messageStoragePolicy"] = messageStoragePolicyProp - } - schemaSettingsProp, err := expandPubsubTopicSchemaSettings(d.Get("schema_settings"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("schema_settings"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, schemaSettingsProp)) { - obj["schemaSettings"] = schemaSettingsProp - } - messageRetentionDurationProp, err := expandPubsubTopicMessageRetentionDuration(d.Get("message_retention_duration"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("message_retention_duration"); !isEmptyValue(resource_pubsub_topic_reflect.ValueOf(v)) && (ok || !resource_pubsub_topic_reflect.DeepEqual(v, messageRetentionDurationProp)) { - obj["messageRetentionDuration"] = messageRetentionDurationProp - } - - obj, err = resourcePubsubTopicUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - resource_pubsub_topic_log.Printf("[DEBUG] Updating Topic %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("kms_key_name") { - updateMask = append(updateMask, "kmsKeyName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("message_storage_policy") { - updateMask = append(updateMask, "messageStoragePolicy") - } - - if d.HasChange("schema_settings") { - updateMask = append(updateMask, "schemaSettings") - } - - if d.HasChange("message_retention_duration") { - updateMask = append(updateMask, "messageRetentionDuration") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_pubsub_topic_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_topic_schema.TimeoutUpdate), pubsubTopicProjectNotReady) - - if err != nil { - return resource_pubsub_topic_fmt.Errorf("Error updating Topic %q: %s", d.Id(), err) - } else { - resource_pubsub_topic_log.Printf("[DEBUG] Finished updating Topic %q: %#v", d.Id(), res) - } - - return resourcePubsubTopicRead(d, meta) -} - -func resourcePubsubTopicDelete(d *resource_pubsub_topic_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_pubsub_topic_fmt.Errorf("Error fetching project for Topic: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{PubsubBasePath}}projects/{{project}}/topics/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_pubsub_topic_log.Printf("[DEBUG] Deleting Topic %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_pubsub_topic_schema.TimeoutDelete), pubsubTopicProjectNotReady) - if err != nil { - return handleNotFoundError(err, d, "Topic") - } - - resource_pubsub_topic_log.Printf("[DEBUG] Finished deleting Topic %q: %#v", d.Id(), res) - return nil -} - -func resourcePubsubTopicImport(d *resource_pubsub_topic_schema.ResourceData, meta interface{}) ([]*resource_pubsub_topic_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/topics/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/topics/{{name}}") - if err != nil { - return nil, resource_pubsub_topic_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_pubsub_topic_schema.ResourceData{d}, nil -} - -func flattenPubsubTopicName(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenPubsubTopicKmsKeyName(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicLabels(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicMessageStoragePolicy(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["allowed_persistence_regions"] = - flattenPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(original["allowedPersistenceRegions"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicSchemaSettings(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["schema"] = - flattenPubsubTopicSchemaSettingsSchema(original["schema"], d, config) - transformed["encoding"] = - flattenPubsubTopicSchemaSettingsEncoding(original["encoding"], d, config) - return []interface{}{transformed} -} - -func flattenPubsubTopicSchemaSettingsSchema(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicSchemaSettingsEncoding(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenPubsubTopicMessageRetentionDuration(v interface{}, d *resource_pubsub_topic_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandPubsubTopicName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return GetResourceNameFromSelfLink(v.(string)), nil -} - -func expandPubsubTopicKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandPubsubTopicMessageStoragePolicy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAllowedPersistenceRegions, err := expandPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(original["allowed_persistence_regions"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_topic_reflect.ValueOf(transformedAllowedPersistenceRegions); val.IsValid() && !isEmptyValue(val) { - transformed["allowedPersistenceRegions"] = transformedAllowedPersistenceRegions - } - - return transformed, nil -} - -func expandPubsubTopicMessageStoragePolicyAllowedPersistenceRegions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicSchemaSettings(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedSchema, err := expandPubsubTopicSchemaSettingsSchema(original["schema"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_topic_reflect.ValueOf(transformedSchema); val.IsValid() && !isEmptyValue(val) { - transformed["schema"] = transformedSchema - } - - transformedEncoding, err := expandPubsubTopicSchemaSettingsEncoding(original["encoding"], d, config) - if err != nil { - return nil, err - } else if val := resource_pubsub_topic_reflect.ValueOf(transformedEncoding); val.IsValid() && !isEmptyValue(val) { - transformed["encoding"] = transformedEncoding - } - - return transformed, nil -} - -func expandPubsubTopicSchemaSettingsSchema(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicSchemaSettingsEncoding(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandPubsubTopicMessageRetentionDuration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourcePubsubTopicEncoder(d *resource_pubsub_topic_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "name") - return obj, nil -} - -func resourcePubsubTopicUpdateEncoder(d *resource_pubsub_topic_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - newObj := make(map[string]interface{}) - newObj["topic"] = obj - return newObj, nil -} - -func isRedisVersionDecreasing(_ resource_redis_instance_context.Context, old, new, _ interface{}) bool { - return isRedisVersionDecreasingFunc(old, new) -} - -func isRedisVersionDecreasingFunc(old, new interface{}) bool { - if old == nil || new == nil { - return false - } - re := resource_redis_instance_regexp.MustCompile(`REDIS_(\d+)_(\d+)`) - oldParsed := re.FindSubmatch([]byte(old.(string))) - newParsed := re.FindSubmatch([]byte(new.(string))) - - if oldParsed == nil || newParsed == nil { - return false - } - - oldVersion, err := resource_redis_instance_strconv.ParseFloat(resource_redis_instance_fmt.Sprintf("%s.%s", oldParsed[1], oldParsed[2]), 32) - if err != nil { - return false - } - newVersion, err := resource_redis_instance_strconv.ParseFloat(resource_redis_instance_fmt.Sprintf("%s.%s", newParsed[1], newParsed[2]), 32) - if err != nil { - return false - } - - return newVersion < oldVersion -} - -func resourceRedisInstance() *resource_redis_instance_schema.Resource { - return &resource_redis_instance_schema.Resource{ - Create: resourceRedisInstanceCreate, - Read: resourceRedisInstanceRead, - Update: resourceRedisInstanceUpdate, - Delete: resourceRedisInstanceDelete, - - Importer: &resource_redis_instance_schema.ResourceImporter{ - State: resourceRedisInstanceImport, - }, - - Timeouts: &resource_redis_instance_schema.ResourceTimeout{ - Create: resource_redis_instance_schema.DefaultTimeout(20 * resource_redis_instance_time.Minute), - Update: resource_redis_instance_schema.DefaultTimeout(20 * resource_redis_instance_time.Minute), - Delete: resource_redis_instance_schema.DefaultTimeout(20 * resource_redis_instance_time.Minute), - }, - - CustomizeDiff: resource_redis_instance_customdiff.All( - resource_redis_instance_customdiff.ForceNewIfChange("redis_version", isRedisVersionDecreasing)), - - Schema: map[string]*resource_redis_instance_schema.Schema{ - "memory_size_gb": { - Type: resource_redis_instance_schema.TypeInt, - Required: true, - Description: `Redis memory size in GiB.`, - }, - "name": { - Type: resource_redis_instance_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z][a-z0-9-]{0,39}[a-z0-9]$`), - Description: `The ID of the instance or a fully qualified identifier for the instance.`, - }, - "alternative_location_id": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Only applicable to STANDARD_HA tier which protects the instance -against zonal failures by provisioning it across two zones. -If provided, it must be a different zone from the one provided in -[locationId].`, - }, - "auth_enabled": { - Type: resource_redis_instance_schema.TypeBool, - Optional: true, - Description: `Optional. Indicates whether OSS Redis AUTH is enabled for the -instance. If set to "true" AUTH is enabled on the instance. -Default value is "false" meaning AUTH is disabled.`, - Default: false, - }, - "authorized_network": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The full name of the Google Compute Engine network to which the -instance is connected. If left unspecified, the default network -will be used.`, - }, - "connect_mode": { - Type: resource_redis_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_redis_instance_validation.StringInSlice([]string{"DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS", ""}, false), - Description: `The connection mode of the Redis instance. Default value: "DIRECT_PEERING" Possible values: ["DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"]`, - Default: "DIRECT_PEERING", - }, - "display_name": { - Type: resource_redis_instance_schema.TypeString, - Optional: true, - Description: `An arbitrary and optional user-provided name for the instance.`, - }, - "labels": { - Type: resource_redis_instance_schema.TypeMap, - Optional: true, - Description: `Resource labels to represent user provided metadata.`, - Elem: &resource_redis_instance_schema.Schema{Type: resource_redis_instance_schema.TypeString}, - }, - "location_id": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The zone where the instance will be provisioned. If not provided, -the service will choose a zone for the instance. For STANDARD_HA tier, -instances will be created across two zones for protection against -zonal failures. If [alternativeLocationId] is also provided, it must -be different from [locationId].`, - }, - "redis_configs": { - Type: resource_redis_instance_schema.TypeMap, - Optional: true, - Description: `Redis configuration parameters, according to http://redis.io/topics/config. -Please check Memorystore documentation for the list of supported parameters: -https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs`, - Elem: &resource_redis_instance_schema.Schema{Type: resource_redis_instance_schema.TypeString}, - }, - "redis_version": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Optional: true, - Description: `The version of Redis software. If not provided, latest supported -version will be used. Please check the API documentation linked -at the top for the latest valid values.`, - }, - "region": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The name of the Redis region of the instance.`, - }, - "reserved_ip_range": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The CIDR range of internal addresses that are reserved for this -instance. If not provided, the service will choose an unused /29 -block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be -unique and non-overlapping with existing subnets in an authorized -network.`, - }, - "tier": { - Type: resource_redis_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_redis_instance_validation.StringInSlice([]string{"BASIC", "STANDARD_HA", ""}, false), - Description: `The service tier of the instance. Must be one of these values: - -- BASIC: standalone instance -- STANDARD_HA: highly available primary/replica instances Default value: "BASIC" Possible values: ["BASIC", "STANDARD_HA"]`, - Default: "BASIC", - }, - "transit_encryption_mode": { - Type: resource_redis_instance_schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: resource_redis_instance_validation.StringInSlice([]string{"SERVER_AUTHENTICATION", "DISABLED", ""}, false), - Description: `The TLS mode of the Redis instance, If not provided, TLS is disabled for the instance. - -- SERVER_AUTHENTICATION: Client to Server traffic encryption enabled with server authentcation Default value: "DISABLED" Possible values: ["SERVER_AUTHENTICATION", "DISABLED"]`, - Default: "DISABLED", - }, - "create_time": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `The time the instance was created in RFC3339 UTC "Zulu" format, -accurate to nanoseconds.`, - }, - "current_location_id": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `The current zone where the Redis endpoint is placed. -For Basic Tier instances, this will always be the same as the -[locationId] provided by the user at creation time. For Standard Tier -instances, this can be either [locationId] or [alternativeLocationId] -and can change after a failover event.`, - }, - "host": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `Hostname or IP address of the exposed Redis endpoint used by clients -to connect to the service.`, - }, - "persistence_iam_identity": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `Output only. Cloud IAM identity used by import / export operations -to transfer data to/from Cloud Storage. Format is "serviceAccount:". -The value may change over time for a given instance so should be -checked before each import/export operation.`, - }, - "port": { - Type: resource_redis_instance_schema.TypeInt, - Computed: true, - Description: `The port number of the exposed Redis endpoint.`, - }, - "server_ca_certs": { - Type: resource_redis_instance_schema.TypeList, - Computed: true, - Description: `List of server CA certificates for the instance.`, - Elem: &resource_redis_instance_schema.Resource{ - Schema: map[string]*resource_redis_instance_schema.Schema{ - "cert": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `Serial number, as extracted from the certificate.`, - }, - "create_time": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `The time when the certificate was created.`, - }, - "expire_time": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `The time when the certificate expires.`, - }, - "serial_number": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `Serial number, as extracted from the certificate.`, - }, - "sha1_fingerprint": { - Type: resource_redis_instance_schema.TypeString, - Computed: true, - Description: `Sha1 Fingerprint of the certificate.`, - }, - }, - }, - }, - "auth_string": { - Type: resource_redis_instance_schema.TypeString, - Description: "AUTH String set on the instance. This field will only be populated if auth_enabled is true.", - Computed: true, - Sensitive: true, - }, - "project": { - Type: resource_redis_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceRedisInstanceCreate(d *resource_redis_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - alternativeLocationIdProp, err := expandRedisInstanceAlternativeLocationId(d.Get("alternative_location_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("alternative_location_id"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(alternativeLocationIdProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, alternativeLocationIdProp)) { - obj["alternativeLocationId"] = alternativeLocationIdProp - } - authEnabledProp, err := expandRedisInstanceAuthEnabled(d.Get("auth_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auth_enabled"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(authEnabledProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, authEnabledProp)) { - obj["authEnabled"] = authEnabledProp - } - authorizedNetworkProp, err := expandRedisInstanceAuthorizedNetwork(d.Get("authorized_network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("authorized_network"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(authorizedNetworkProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, authorizedNetworkProp)) { - obj["authorizedNetwork"] = authorizedNetworkProp - } - connectModeProp, err := expandRedisInstanceConnectMode(d.Get("connect_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("connect_mode"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(connectModeProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, connectModeProp)) { - obj["connectMode"] = connectModeProp - } - displayNameProp, err := expandRedisInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(displayNameProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandRedisInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(labelsProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - redisConfigsProp, err := expandRedisInstanceRedisConfigs(d.Get("redis_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_configs"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(redisConfigsProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, redisConfigsProp)) { - obj["redisConfigs"] = redisConfigsProp - } - locationIdProp, err := expandRedisInstanceLocationId(d.Get("location_id"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("location_id"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(locationIdProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, locationIdProp)) { - obj["locationId"] = locationIdProp - } - nameProp, err := expandRedisInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(nameProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - memorySizeGbProp, err := expandRedisInstanceMemorySizeGb(d.Get("memory_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("memory_size_gb"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(memorySizeGbProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, memorySizeGbProp)) { - obj["memorySizeGb"] = memorySizeGbProp - } - redisVersionProp, err := expandRedisInstanceRedisVersion(d.Get("redis_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_version"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(redisVersionProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, redisVersionProp)) { - obj["redisVersion"] = redisVersionProp - } - reservedIpRangeProp, err := expandRedisInstanceReservedIpRange(d.Get("reserved_ip_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reserved_ip_range"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(reservedIpRangeProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, reservedIpRangeProp)) { - obj["reservedIpRange"] = reservedIpRangeProp - } - tierProp, err := expandRedisInstanceTier(d.Get("tier"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tier"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(tierProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, tierProp)) { - obj["tier"] = tierProp - } - transitEncryptionModeProp, err := expandRedisInstanceTransitEncryptionMode(d.Get("transit_encryption_mode"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("transit_encryption_mode"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(transitEncryptionModeProp)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, transitEncryptionModeProp)) { - obj["transitEncryptionMode"] = transitEncryptionModeProp - } - - obj, err = resourceRedisInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances?instanceId={{name}}") - if err != nil { - return err - } - - resource_redis_instance_log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_redis_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_redis_instance_schema.TimeoutCreate)) - if err != nil { - return resource_redis_instance_fmt.Errorf("Error creating Instance: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return resource_redis_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = redisOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(resource_redis_instance_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_redis_instance_fmt.Errorf("Error waiting to create Instance: %s", err) - } - - opRes, err = resourceRedisInstanceDecoder(d, meta, opRes) - if err != nil { - return resource_redis_instance_fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return resource_redis_instance_fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenRedisInstanceName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return resource_redis_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_redis_instance_log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceRedisInstanceRead(d, meta) -} - -func resourceRedisInstanceRead(d *resource_redis_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_redis_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_redis_instance_fmt.Sprintf("RedisInstance %q", d.Id())) - } - - res, err = resourceRedisInstanceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_redis_instance_log.Printf("[DEBUG] Removing RedisInstance because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - if err := d.Set("region", region); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("alternative_location_id", flattenRedisInstanceAlternativeLocationId(res["alternativeLocationId"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("auth_enabled", flattenRedisInstanceAuthEnabled(res["authEnabled"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("authorized_network", flattenRedisInstanceAuthorizedNetwork(res["authorizedNetwork"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("connect_mode", flattenRedisInstanceConnectMode(res["connectMode"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("create_time", flattenRedisInstanceCreateTime(res["createTime"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("current_location_id", flattenRedisInstanceCurrentLocationId(res["currentLocationId"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("display_name", flattenRedisInstanceDisplayName(res["displayName"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("host", flattenRedisInstanceHost(res["host"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenRedisInstanceLabels(res["labels"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("redis_configs", flattenRedisInstanceRedisConfigs(res["redisConfigs"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("location_id", flattenRedisInstanceLocationId(res["locationId"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("name", flattenRedisInstanceName(res["name"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("memory_size_gb", flattenRedisInstanceMemorySizeGb(res["memorySizeGb"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("port", flattenRedisInstancePort(res["port"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("persistence_iam_identity", flattenRedisInstancePersistenceIamIdentity(res["persistenceIamIdentity"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("redis_version", flattenRedisInstanceRedisVersion(res["redisVersion"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("reserved_ip_range", flattenRedisInstanceReservedIpRange(res["reservedIpRange"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("tier", flattenRedisInstanceTier(res["tier"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("transit_encryption_mode", flattenRedisInstanceTransitEncryptionMode(res["transitEncryptionMode"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("server_ca_certs", flattenRedisInstanceServerCaCerts(res["serverCaCerts"], d, config)); err != nil { - return resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceRedisInstanceUpdate(d *resource_redis_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_redis_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - authEnabledProp, err := expandRedisInstanceAuthEnabled(d.Get("auth_enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("auth_enabled"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(v)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, authEnabledProp)) { - obj["authEnabled"] = authEnabledProp - } - displayNameProp, err := expandRedisInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(v)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandRedisInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(v)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - redisConfigsProp, err := expandRedisInstanceRedisConfigs(d.Get("redis_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_configs"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(v)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, redisConfigsProp)) { - obj["redisConfigs"] = redisConfigsProp - } - memorySizeGbProp, err := expandRedisInstanceMemorySizeGb(d.Get("memory_size_gb"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("memory_size_gb"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(v)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, memorySizeGbProp)) { - obj["memorySizeGb"] = memorySizeGbProp - } - - obj, err = resourceRedisInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - resource_redis_instance_log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("auth_enabled") { - updateMask = append(updateMask, "authEnabled") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("redis_configs") { - updateMask = append(updateMask, "redisConfigs") - } - - if d.HasChange("memory_size_gb") { - updateMask = append(updateMask, "memorySizeGb") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_redis_instance_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - if len(updateMask) > 0 { - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_redis_instance_schema.TimeoutUpdate)) - - if err != nil { - return resource_redis_instance_fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - resource_redis_instance_log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = redisOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(resource_redis_instance_schema.TimeoutUpdate)) - - if err != nil { - return err - } - } - d.Partial(true) - - if d.HasChange("redis_version") { - obj := make(map[string]interface{}) - - redisVersionProp, err := expandRedisInstanceRedisVersion(d.Get("redis_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("redis_version"); !isEmptyValue(resource_redis_instance_reflect.ValueOf(v)) && (ok || !resource_redis_instance_reflect.DeepEqual(v, redisVersionProp)) { - obj["redisVersion"] = redisVersionProp - } - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}:upgrade") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_redis_instance_schema.TimeoutUpdate)) - if err != nil { - return resource_redis_instance_fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - resource_redis_instance_log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = redisOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(resource_redis_instance_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceRedisInstanceRead(d, meta) -} - -func resourceRedisInstanceDelete(d *resource_redis_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_redis_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_redis_instance_log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_redis_instance_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - err = redisOperationWaitTime( - config, res, project, "Deleting Instance", userAgent, - d.Timeout(resource_redis_instance_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_redis_instance_log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceRedisInstanceImport(d *resource_redis_instance_schema.ResourceData, meta interface{}) ([]*resource_redis_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") - if err != nil { - return nil, resource_redis_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_redis_instance_schema.ResourceData{d}, nil -} - -func flattenRedisInstanceAlternativeLocationId(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceAuthEnabled(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceAuthorizedNetwork(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceConnectMode(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceCreateTime(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceCurrentLocationId(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceDisplayName(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceHost(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceLabels(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceRedisConfigs(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceLocationId(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceName(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenRedisInstanceMemorySizeGb(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_redis_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenRedisInstancePort(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_redis_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenRedisInstancePersistenceIamIdentity(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceRedisVersion(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceReservedIpRange(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceTier(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceTransitEncryptionMode(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCerts(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "serial_number": flattenRedisInstanceServerCaCertsSerialNumber(original["serialNumber"], d, config), - "cert": flattenRedisInstanceServerCaCertsCert(original["cert"], d, config), - "create_time": flattenRedisInstanceServerCaCertsCreateTime(original["createTime"], d, config), - "expire_time": flattenRedisInstanceServerCaCertsExpireTime(original["expireTime"], d, config), - "sha1_fingerprint": flattenRedisInstanceServerCaCertsSha1Fingerprint(original["sha1Fingerprint"], d, config), - }) - } - return transformed -} - -func flattenRedisInstanceServerCaCertsSerialNumber(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsCert(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsCreateTime(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsExpireTime(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenRedisInstanceServerCaCertsSha1Fingerprint(v interface{}, d *resource_redis_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandRedisInstanceAlternativeLocationId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceAuthEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceAuthorizedNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - fv, err := ParseNetworkFieldValue(v.(string), d, config) - if err != nil { - return nil, err - } - return fv.RelativeLink(), nil -} - -func expandRedisInstanceConnectMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandRedisInstanceRedisConfigs(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandRedisInstanceLocationId(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/locations/{{region}}/instances/{{name}}") -} - -func expandRedisInstanceMemorySizeGb(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceRedisVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceReservedIpRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceTier(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandRedisInstanceTransitEncryptionMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceRedisInstanceEncoder(d *resource_redis_instance_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - region, err := getRegionFromSchema("region", "location_id", d, config) - if err != nil { - return nil, err - } - if err := d.Set("region", region); err != nil { - return nil, resource_redis_instance_fmt.Errorf("Error setting region: %s", err) - } - return obj, nil -} - -func resourceRedisInstanceDecoder(d *resource_redis_instance_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - if v, ok := res["authEnabled"].(bool); ok { - if v { - url, err := replaceVars(d, config, "{{RedisBasePath}}projects/{{project}}/locations/{{region}}/instances/{{name}}/authString") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_redis_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return nil, resource_redis_instance_fmt.Errorf("Error reading AuthString: %s", err) - } - - if err := d.Set("auth_string", res["authString"]); err != nil { - return nil, resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - } - } else { - if err := d.Set("auth_string", ""); err != nil { - return nil, resource_redis_instance_fmt.Errorf("Error reading Instance: %s", err) - } - } - - return res, nil -} - -func resourceResourceManagerLien() *resource_resource_manager_lien_schema.Resource { - return &resource_resource_manager_lien_schema.Resource{ - Create: resourceResourceManagerLienCreate, - Read: resourceResourceManagerLienRead, - Delete: resourceResourceManagerLienDelete, - - Importer: &resource_resource_manager_lien_schema.ResourceImporter{ - State: resourceResourceManagerLienImport, - }, - - Timeouts: &resource_resource_manager_lien_schema.ResourceTimeout{ - Create: resource_resource_manager_lien_schema.DefaultTimeout(4 * resource_resource_manager_lien_time.Minute), - Delete: resource_resource_manager_lien_schema.DefaultTimeout(4 * resource_resource_manager_lien_time.Minute), - }, - - Schema: map[string]*resource_resource_manager_lien_schema.Schema{ - "origin": { - Type: resource_resource_manager_lien_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A stable, user-visible/meaningful string identifying the origin -of the Lien, intended to be inspected programmatically. Maximum length of -200 characters.`, - }, - "parent": { - Type: resource_resource_manager_lien_schema.TypeString, - Required: true, - ForceNew: true, - Description: `A reference to the resource this Lien is attached to. -The server will validate the parent against those for which Liens are supported. -Since a variety of objects can have Liens against them, you must provide the type -prefix (e.g. "projects/my-project-name").`, - }, - "reason": { - Type: resource_resource_manager_lien_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Concise user-visible strings indicating why an action cannot be performed -on a resource. Maximum length of 200 characters.`, - }, - "restrictions": { - Type: resource_resource_manager_lien_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The types of operations which should be blocked as a result of this Lien. -Each value should correspond to an IAM permission. The server will validate -the permissions against those for which Liens are supported. An empty -list is meaningless and will be rejected. -e.g. ['resourcemanager.projects.delete']`, - Elem: &resource_resource_manager_lien_schema.Schema{ - Type: resource_resource_manager_lien_schema.TypeString, - }, - }, - "create_time": { - Type: resource_resource_manager_lien_schema.TypeString, - Computed: true, - Description: `Time of creation`, - }, - "name": { - Type: resource_resource_manager_lien_schema.TypeString, - Computed: true, - Description: `A system-generated unique identifier for this Lien.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceResourceManagerLienCreate(d *resource_resource_manager_lien_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - reasonProp, err := expandNestedResourceManagerLienReason(d.Get("reason"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("reason"); !isEmptyValue(resource_resource_manager_lien_reflect.ValueOf(reasonProp)) && (ok || !resource_resource_manager_lien_reflect.DeepEqual(v, reasonProp)) { - obj["reason"] = reasonProp - } - originProp, err := expandNestedResourceManagerLienOrigin(d.Get("origin"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("origin"); !isEmptyValue(resource_resource_manager_lien_reflect.ValueOf(originProp)) && (ok || !resource_resource_manager_lien_reflect.DeepEqual(v, originProp)) { - obj["origin"] = originProp - } - parentProp, err := expandNestedResourceManagerLienParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_resource_manager_lien_reflect.ValueOf(parentProp)) && (ok || !resource_resource_manager_lien_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - restrictionsProp, err := expandNestedResourceManagerLienRestrictions(d.Get("restrictions"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("restrictions"); !isEmptyValue(resource_resource_manager_lien_reflect.ValueOf(restrictionsProp)) && (ok || !resource_resource_manager_lien_reflect.DeepEqual(v, restrictionsProp)) { - obj["restrictions"] = restrictionsProp - } - - url, err := replaceVars(d, config, "{{ResourceManagerBasePath}}liens") - if err != nil { - return err - } - - resource_resource_manager_lien_log.Printf("[DEBUG] Creating new Lien: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_resource_manager_lien_schema.TimeoutCreate)) - if err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error creating Lien: %s", err) - } - if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - d.SetId(flattenNestedResourceManagerLienName(res["name"], d, config).(string)) - if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error setting name: %s", err) - } - - resource_resource_manager_lien_log.Printf("[DEBUG] Finished creating Lien %q: %#v", d.Id(), res) - - return resourceResourceManagerLienRead(d, meta) -} - -func resourceResourceManagerLienRead(d *resource_resource_manager_lien_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{ResourceManagerBasePath}}liens?parent={{parent}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_resource_manager_lien_fmt.Sprintf("ResourceManagerLien %q", d.Id())) - } - - res, err = flattenNestedResourceManagerLien(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_resource_manager_lien_log.Printf("[DEBUG] Removing ResourceManagerLien because it couldn't be matched.") - d.SetId("") - return nil - } - - res, err = resourceResourceManagerLienDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_resource_manager_lien_log.Printf("[DEBUG] Removing ResourceManagerLien because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenNestedResourceManagerLienName(res["name"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("reason", flattenNestedResourceManagerLienReason(res["reason"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("origin", flattenNestedResourceManagerLienOrigin(res["origin"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("create_time", flattenNestedResourceManagerLienCreateTime(res["createTime"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("parent", flattenNestedResourceManagerLienParent(res["parent"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error reading Lien: %s", err) - } - if err := d.Set("restrictions", flattenNestedResourceManagerLienRestrictions(res["restrictions"], d, config)); err != nil { - return resource_resource_manager_lien_fmt.Errorf("Error reading Lien: %s", err) - } - - return nil -} - -func resourceResourceManagerLienDelete(d *resource_resource_manager_lien_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{ResourceManagerBasePath}}liens?parent={{parent}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - resource_resource_manager_lien_log.Printf("[DEBUG] replacing URL %q with a custom delete URL", url) - url, err = replaceVars(d, config, "{{ResourceManagerBasePath}}liens/{{name}}") - if err != nil { - return err - } - resource_resource_manager_lien_log.Printf("[DEBUG] Deleting Lien %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_resource_manager_lien_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Lien") - } - - resource_resource_manager_lien_log.Printf("[DEBUG] Finished deleting Lien %q: %#v", d.Id(), res) - return nil -} - -func resourceResourceManagerLienImport(d *resource_resource_manager_lien_schema.ResourceData, meta interface{}) ([]*resource_resource_manager_lien_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return nil, resource_resource_manager_lien_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - parent, err := replaceVars(d, config, "projects/{{parent}}") - if err != nil { - return nil, err - } - if err := d.Set("parent", parent); err != nil { - return nil, resource_resource_manager_lien_fmt.Errorf("Error setting parent: %s", err) - } - - return []*resource_resource_manager_lien_schema.ResourceData{d}, nil -} - -func flattenNestedResourceManagerLienName(v interface{}, d *resource_resource_manager_lien_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenNestedResourceManagerLienReason(v interface{}, d *resource_resource_manager_lien_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienOrigin(v interface{}, d *resource_resource_manager_lien_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienCreateTime(v interface{}, d *resource_resource_manager_lien_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienParent(v interface{}, d *resource_resource_manager_lien_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedResourceManagerLienRestrictions(v interface{}, d *resource_resource_manager_lien_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedResourceManagerLienReason(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedResourceManagerLienOrigin(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedResourceManagerLienParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedResourceManagerLienRestrictions(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedResourceManagerLien(d *resource_resource_manager_lien_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["liens"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_resource_manager_lien_fmt.Errorf("expected list or map for value liens. Actual value: %v", v) - } - - _, item, err := resourceResourceManagerLienFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceResourceManagerLienFindNestedObjectInList(d *resource_resource_manager_lien_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName := d.Get("name") - expectedFlattenedName := flattenNestedResourceManagerLienName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - item, err := resourceResourceManagerLienDecoder(d, meta, item) - if err != nil { - return -1, nil, err - } - - itemName := flattenNestedResourceManagerLienName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_resource_manager_lien_reflect.ValueOf(itemName)) && isEmptyValue(resource_resource_manager_lien_reflect.ValueOf(expectedFlattenedName))) && !resource_resource_manager_lien_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_resource_manager_lien_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_resource_manager_lien_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceResourceManagerLienDecoder(d *resource_resource_manager_lien_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - config := meta.(*Config) - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - new := res["parent"].(string) - old := d.Get("parent").(string) - if resource_resource_manager_lien_strings.HasPrefix(new, "projects/") { - new = resource_resource_manager_lien_strings.Split(new, "/")[1] - } - if resource_resource_manager_lien_strings.HasPrefix(old, "projects/") { - old = resource_resource_manager_lien_strings.Split(old, "/")[1] - } - resource_resource_manager_lien_log.Printf("[DEBUG] Trying to figure out whether to use %s or %s", old, new) - - if resource_resource_manager_lien_strings.Contains(old, "/") || resource_resource_manager_lien_strings.Contains(new, "/") { - return res, nil - } - - var oldProjId int64 - var newProjId int64 - if oldVal, err := resource_resource_manager_lien_strconv.ParseInt(old, 10, 64); err == nil { - resource_resource_manager_lien_log.Printf("[DEBUG] The old value was a real number: %d", oldVal) - oldProjId = oldVal - } else { - pOld, err := config.NewResourceManagerClient(userAgent).Projects.Get(old).Do() - if err != nil { - return res, nil - } - oldProjId = pOld.ProjectNumber - } - if newVal, err := resource_resource_manager_lien_strconv.ParseInt(new, 10, 64); err == nil { - resource_resource_manager_lien_log.Printf("[DEBUG] The new value was a real number: %d", newVal) - newProjId = newVal - } else { - pNew, err := config.NewResourceManagerClient(userAgent).Projects.Get(new).Do() - if err != nil { - return res, nil - } - newProjId = pNew.ProjectNumber - } - if newProjId == oldProjId { - res["parent"] = d.Get("parent") - } - return res, nil -} - -func resourceSecurityCenterNotificationConfig() *resource_scc_notification_config_schema.Resource { - return &resource_scc_notification_config_schema.Resource{ - Create: resourceSecurityCenterNotificationConfigCreate, - Read: resourceSecurityCenterNotificationConfigRead, - Update: resourceSecurityCenterNotificationConfigUpdate, - Delete: resourceSecurityCenterNotificationConfigDelete, - - Importer: &resource_scc_notification_config_schema.ResourceImporter{ - State: resourceSecurityCenterNotificationConfigImport, - }, - - Timeouts: &resource_scc_notification_config_schema.ResourceTimeout{ - Create: resource_scc_notification_config_schema.DefaultTimeout(4 * resource_scc_notification_config_time.Minute), - Update: resource_scc_notification_config_schema.DefaultTimeout(4 * resource_scc_notification_config_time.Minute), - Delete: resource_scc_notification_config_schema.DefaultTimeout(4 * resource_scc_notification_config_time.Minute), - }, - - Schema: map[string]*resource_scc_notification_config_schema.Schema{ - "config_id": { - Type: resource_scc_notification_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `This must be unique within the organization.`, - }, - "organization": { - Type: resource_scc_notification_config_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization whose Cloud Security Command Center the Notification -Config lives in.`, - }, - "pubsub_topic": { - Type: resource_scc_notification_config_schema.TypeString, - Required: true, - Description: `The Pub/Sub topic to send notifications to. Its format is -"projects/[project_id]/topics/[topic]".`, - }, - "streaming_config": { - Type: resource_scc_notification_config_schema.TypeList, - Required: true, - Description: `The config for triggering streaming-based notifications.`, - MaxItems: 1, - Elem: &resource_scc_notification_config_schema.Resource{ - Schema: map[string]*resource_scc_notification_config_schema.Schema{ - "filter": { - Type: resource_scc_notification_config_schema.TypeString, - Required: true, - Description: `Expression that defines the filter to apply across create/update -events of assets or findings as specified by the event type. The -expression is a list of zero or more restrictions combined via -logical operators AND and OR. Parentheses are supported, and OR -has higher precedence than AND. - -Restrictions have the form and may have -a - character in front of them to indicate negation. The fields -map to those defined in the corresponding resource. - -The supported operators are: - -* = for all value types. -* >, <, >=, <= for integer values. -* :, meaning substring matching, for strings. - -The supported value types are: - -* string literals in quotes. -* integer literals without quotes. -* boolean literals true and false without quotes. - -See -[Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) -for information on how to write a filter.`, - }, - }, - }, - }, - "description": { - Type: resource_scc_notification_config_schema.TypeString, - Optional: true, - ValidateFunc: resource_scc_notification_config_validation.StringLenBetween(0, 1024), - Description: `The description of the notification config (max of 1024 characters).`, - }, - "name": { - Type: resource_scc_notification_config_schema.TypeString, - Computed: true, - Description: `The resource name of this notification config, in the format -'organizations/{{organization}}/notificationConfigs/{{config_id}}'.`, - }, - "service_account": { - Type: resource_scc_notification_config_schema.TypeString, - Computed: true, - Description: `The service account that needs "pubsub.topics.publish" permission to -publish to the Pub/Sub topic.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecurityCenterNotificationConfigCreate(d *resource_scc_notification_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterNotificationConfigDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_scc_notification_config_reflect.ValueOf(descriptionProp)) && (ok || !resource_scc_notification_config_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - pubsubTopicProp, err := expandSecurityCenterNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_topic"); !isEmptyValue(resource_scc_notification_config_reflect.ValueOf(pubsubTopicProp)) && (ok || !resource_scc_notification_config_reflect.DeepEqual(v, pubsubTopicProp)) { - obj["pubsubTopic"] = pubsubTopicProp - } - streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("streaming_config"); !isEmptyValue(resource_scc_notification_config_reflect.ValueOf(streamingConfigProp)) && (ok || !resource_scc_notification_config_reflect.DeepEqual(v, streamingConfigProp)) { - obj["streamingConfig"] = streamingConfigProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/notificationConfigs?configId={{config_id}}") - if err != nil { - return err - } - - resource_scc_notification_config_log.Printf("[DEBUG] Creating new NotificationConfig: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_scc_notification_config_schema.TimeoutCreate)) - if err != nil { - return resource_scc_notification_config_fmt.Errorf("Error creating NotificationConfig: %s", err) - } - if err := d.Set("name", flattenSecurityCenterNotificationConfigName(res["name"], d, config)); err != nil { - return resource_scc_notification_config_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_scc_notification_config_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_scc_notification_config_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_scc_notification_config_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_scc_notification_config_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_scc_notification_config_log.Printf("[DEBUG] Finished creating NotificationConfig %q: %#v", d.Id(), res) - - return resourceSecurityCenterNotificationConfigRead(d, meta) -} - -func resourceSecurityCenterNotificationConfigRead(d *resource_scc_notification_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_scc_notification_config_fmt.Sprintf("SecurityCenterNotificationConfig %q", d.Id())) - } - - if err := d.Set("name", flattenSecurityCenterNotificationConfigName(res["name"], d, config)); err != nil { - return resource_scc_notification_config_fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("description", flattenSecurityCenterNotificationConfigDescription(res["description"], d, config)); err != nil { - return resource_scc_notification_config_fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("pubsub_topic", flattenSecurityCenterNotificationConfigPubsubTopic(res["pubsubTopic"], d, config)); err != nil { - return resource_scc_notification_config_fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("service_account", flattenSecurityCenterNotificationConfigServiceAccount(res["serviceAccount"], d, config)); err != nil { - return resource_scc_notification_config_fmt.Errorf("Error reading NotificationConfig: %s", err) - } - if err := d.Set("streaming_config", flattenSecurityCenterNotificationConfigStreamingConfig(res["streamingConfig"], d, config)); err != nil { - return resource_scc_notification_config_fmt.Errorf("Error reading NotificationConfig: %s", err) - } - - return nil -} - -func resourceSecurityCenterNotificationConfigUpdate(d *resource_scc_notification_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterNotificationConfigDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_scc_notification_config_reflect.ValueOf(v)) && (ok || !resource_scc_notification_config_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - pubsubTopicProp, err := expandSecurityCenterNotificationConfigPubsubTopic(d.Get("pubsub_topic"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_topic"); !isEmptyValue(resource_scc_notification_config_reflect.ValueOf(v)) && (ok || !resource_scc_notification_config_reflect.DeepEqual(v, pubsubTopicProp)) { - obj["pubsubTopic"] = pubsubTopicProp - } - streamingConfigProp, err := expandSecurityCenterNotificationConfigStreamingConfig(d.Get("streaming_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("streaming_config"); !isEmptyValue(resource_scc_notification_config_reflect.ValueOf(v)) && (ok || !resource_scc_notification_config_reflect.DeepEqual(v, streamingConfigProp)) { - obj["streamingConfig"] = streamingConfigProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - resource_scc_notification_config_log.Printf("[DEBUG] Updating NotificationConfig %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("pubsub_topic") { - updateMask = append(updateMask, "pubsubTopic") - } - - if d.HasChange("streaming_config") { - updateMask = append(updateMask, "streamingConfig.filter") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_scc_notification_config_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_scc_notification_config_schema.TimeoutUpdate)) - - if err != nil { - return resource_scc_notification_config_fmt.Errorf("Error updating NotificationConfig %q: %s", d.Id(), err) - } else { - resource_scc_notification_config_log.Printf("[DEBUG] Finished updating NotificationConfig %q: %#v", d.Id(), res) - } - - return resourceSecurityCenterNotificationConfigRead(d, meta) -} - -func resourceSecurityCenterNotificationConfigDelete(d *resource_scc_notification_config_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_scc_notification_config_log.Printf("[DEBUG] Deleting NotificationConfig %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_scc_notification_config_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "NotificationConfig") - } - - resource_scc_notification_config_log.Printf("[DEBUG] Finished deleting NotificationConfig %q: %#v", d.Id(), res) - return nil -} - -func resourceSecurityCenterNotificationConfigImport(d *resource_scc_notification_config_schema.ResourceData, meta interface{}) ([]*resource_scc_notification_config_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := resource_scc_notification_config_strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 4 { - return nil, resource_scc_notification_config_fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "organizations/{{organization}}/sources/{{source}}", - ) - } - - if err := d.Set("organization", stringParts[1]); err != nil { - return nil, resource_scc_notification_config_fmt.Errorf("Error setting organization: %s", err) - } - return []*resource_scc_notification_config_schema.ResourceData{d}, nil -} - -func flattenSecurityCenterNotificationConfigName(v interface{}, d *resource_scc_notification_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigDescription(v interface{}, d *resource_scc_notification_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigPubsubTopic(v interface{}, d *resource_scc_notification_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigServiceAccount(v interface{}, d *resource_scc_notification_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterNotificationConfigStreamingConfig(v interface{}, d *resource_scc_notification_config_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["filter"] = - flattenSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) - return []interface{}{transformed} -} - -func flattenSecurityCenterNotificationConfigStreamingConfigFilter(v interface{}, d *resource_scc_notification_config_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSecurityCenterNotificationConfigDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecurityCenterNotificationConfigPubsubTopic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecurityCenterNotificationConfigStreamingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedFilter, err := expandSecurityCenterNotificationConfigStreamingConfigFilter(original["filter"], d, config) - if err != nil { - return nil, err - } else if val := resource_scc_notification_config_reflect.ValueOf(transformedFilter); val.IsValid() && !isEmptyValue(val) { - transformed["filter"] = transformedFilter - } - - return transformed, nil -} - -func expandSecurityCenterNotificationConfigStreamingConfigFilter(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceSecurityCenterSource() *resource_scc_source_schema.Resource { - return &resource_scc_source_schema.Resource{ - Create: resourceSecurityCenterSourceCreate, - Read: resourceSecurityCenterSourceRead, - Update: resourceSecurityCenterSourceUpdate, - Delete: resourceSecurityCenterSourceDelete, - - Importer: &resource_scc_source_schema.ResourceImporter{ - State: resourceSecurityCenterSourceImport, - }, - - Timeouts: &resource_scc_source_schema.ResourceTimeout{ - Create: resource_scc_source_schema.DefaultTimeout(4 * resource_scc_source_time.Minute), - Update: resource_scc_source_schema.DefaultTimeout(4 * resource_scc_source_time.Minute), - Delete: resource_scc_source_schema.DefaultTimeout(4 * resource_scc_source_time.Minute), - }, - - Schema: map[string]*resource_scc_source_schema.Schema{ - "display_name": { - Type: resource_scc_source_schema.TypeString, - Required: true, - ValidateFunc: validateRegexp(`[\p{L}\p{N}]({\p{L}\p{N}_- ]{0,30}[\p{L}\p{N}])?`), - Description: `The source’s display name. A source’s display name must be unique -amongst its siblings, for example, two sources with the same parent -can't share the same display name. The display name must start and end -with a letter or digit, may contain letters, digits, spaces, hyphens, -and underscores, and can be no longer than 32 characters.`, - }, - "organization": { - Type: resource_scc_source_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The organization whose Cloud Security Command Center the Source -lives in.`, - }, - "description": { - Type: resource_scc_source_schema.TypeString, - Optional: true, - ValidateFunc: resource_scc_source_validation.StringLenBetween(0, 1024), - Description: `The description of the source (max of 1024 characters).`, - }, - "name": { - Type: resource_scc_source_schema.TypeString, - Computed: true, - Description: `The resource name of this source, in the format -'organizations/{{organization}}/sources/{{source}}'.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecurityCenterSourceCreate(d *resource_scc_source_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterSourceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_scc_source_reflect.ValueOf(descriptionProp)) && (ok || !resource_scc_source_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandSecurityCenterSourceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_scc_source_reflect.ValueOf(displayNameProp)) && (ok || !resource_scc_source_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}organizations/{{organization}}/sources") - if err != nil { - return err - } - - resource_scc_source_log.Printf("[DEBUG] Creating new Source: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_scc_source_schema.TimeoutCreate)) - if err != nil { - return resource_scc_source_fmt.Errorf("Error creating Source: %s", err) - } - if err := d.Set("name", flattenSecurityCenterSourceName(res["name"], d, config)); err != nil { - return resource_scc_source_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_scc_source_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - respBody, ok := res["response"] - if !ok { - return resource_scc_source_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - - name, ok = respBody.(map[string]interface{})["name"] - if !ok { - return resource_scc_source_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - } - if err := d.Set("name", name.(string)); err != nil { - return resource_scc_source_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - resource_scc_source_log.Printf("[DEBUG] Finished creating Source %q: %#v", d.Id(), res) - - return resourceSecurityCenterSourceRead(d, meta) -} - -func resourceSecurityCenterSourceRead(d *resource_scc_source_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_scc_source_fmt.Sprintf("SecurityCenterSource %q", d.Id())) - } - - if err := d.Set("name", flattenSecurityCenterSourceName(res["name"], d, config)); err != nil { - return resource_scc_source_fmt.Errorf("Error reading Source: %s", err) - } - if err := d.Set("description", flattenSecurityCenterSourceDescription(res["description"], d, config)); err != nil { - return resource_scc_source_fmt.Errorf("Error reading Source: %s", err) - } - if err := d.Set("display_name", flattenSecurityCenterSourceDisplayName(res["displayName"], d, config)); err != nil { - return resource_scc_source_fmt.Errorf("Error reading Source: %s", err) - } - - return nil -} - -func resourceSecurityCenterSourceUpdate(d *resource_scc_source_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandSecurityCenterSourceDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_scc_source_reflect.ValueOf(v)) && (ok || !resource_scc_source_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - displayNameProp, err := expandSecurityCenterSourceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_scc_source_reflect.ValueOf(v)) && (ok || !resource_scc_source_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - - url, err := replaceVars(d, config, "{{SecurityCenterBasePath}}{{name}}") - if err != nil { - return err - } - - resource_scc_source_log.Printf("[DEBUG] Updating Source %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_scc_source_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_scc_source_schema.TimeoutUpdate)) - - if err != nil { - return resource_scc_source_fmt.Errorf("Error updating Source %q: %s", d.Id(), err) - } else { - resource_scc_source_log.Printf("[DEBUG] Finished updating Source %q: %#v", d.Id(), res) - } - - return resourceSecurityCenterSourceRead(d, meta) -} - -func resourceSecurityCenterSourceDelete(d *resource_scc_source_schema.ResourceData, meta interface{}) error { - resource_scc_source_log.Printf("[WARNING] SecurityCenter Source resources"+ - " cannot be deleted from Google Cloud. The resource %s will be removed from Terraform"+ - " state, but will still be present on Google Cloud.", d.Id()) - d.SetId("") - - return nil -} - -func resourceSecurityCenterSourceImport(d *resource_scc_source_schema.ResourceData, meta interface{}) ([]*resource_scc_source_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - stringParts := resource_scc_source_strings.Split(d.Get("name").(string), "/") - if len(stringParts) != 4 { - return nil, resource_scc_source_fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "organizations/{{organization}}/sources/{{source}}", - ) - } - - if err := d.Set("organization", stringParts[1]); err != nil { - return nil, resource_scc_source_fmt.Errorf("Error setting organization: %s", err) - } - return []*resource_scc_source_schema.ResourceData{d}, nil -} - -func flattenSecurityCenterSourceName(v interface{}, d *resource_scc_source_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterSourceDescription(v interface{}, d *resource_scc_source_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecurityCenterSourceDisplayName(v interface{}, d *resource_scc_source_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSecurityCenterSourceDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecurityCenterSourceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceSecretManagerSecret() *resource_secret_manager_secret_schema.Resource { - return &resource_secret_manager_secret_schema.Resource{ - Create: resourceSecretManagerSecretCreate, - Read: resourceSecretManagerSecretRead, - Update: resourceSecretManagerSecretUpdate, - Delete: resourceSecretManagerSecretDelete, - - Importer: &resource_secret_manager_secret_schema.ResourceImporter{ - State: resourceSecretManagerSecretImport, - }, - - Timeouts: &resource_secret_manager_secret_schema.ResourceTimeout{ - Create: resource_secret_manager_secret_schema.DefaultTimeout(4 * resource_secret_manager_secret_time.Minute), - Update: resource_secret_manager_secret_schema.DefaultTimeout(4 * resource_secret_manager_secret_time.Minute), - Delete: resource_secret_manager_secret_schema.DefaultTimeout(4 * resource_secret_manager_secret_time.Minute), - }, - - Schema: map[string]*resource_secret_manager_secret_schema.Schema{ - "replication": { - Type: resource_secret_manager_secret_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The replication policy of the secret data attached to the Secret. It cannot be changed -after the Secret has been created.`, - MaxItems: 1, - Elem: &resource_secret_manager_secret_schema.Resource{ - Schema: map[string]*resource_secret_manager_secret_schema.Schema{ - "automatic": { - Type: resource_secret_manager_secret_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `The Secret will automatically be replicated without any restrictions.`, - ExactlyOneOf: []string{"replication.0.automatic", "replication.0.user_managed"}, - }, - "user_managed": { - Type: resource_secret_manager_secret_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `The Secret will automatically be replicated without any restrictions.`, - MaxItems: 1, - Elem: &resource_secret_manager_secret_schema.Resource{ - Schema: map[string]*resource_secret_manager_secret_schema.Schema{ - "replicas": { - Type: resource_secret_manager_secret_schema.TypeList, - Required: true, - ForceNew: true, - Description: `The list of Replicas for this Secret. Cannot be empty.`, - MinItems: 1, - Elem: &resource_secret_manager_secret_schema.Resource{ - Schema: map[string]*resource_secret_manager_secret_schema.Schema{ - "location": { - Type: resource_secret_manager_secret_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The canonical IDs of the location to replicate data. For example: "us-east1".`, - }, - "customer_managed_encryption": { - Type: resource_secret_manager_secret_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Customer Managed Encryption for the secret.`, - MaxItems: 1, - Elem: &resource_secret_manager_secret_schema.Resource{ - Schema: map[string]*resource_secret_manager_secret_schema.Schema{ - "kms_key_name": { - Type: resource_secret_manager_secret_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Describes the Cloud KMS encryption key that will be used to protect destination secret.`, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ExactlyOneOf: []string{"replication.0.automatic", "replication.0.user_managed"}, - }, - }, - }, - }, - "secret_id": { - Type: resource_secret_manager_secret_schema.TypeString, - Required: true, - ForceNew: true, - Description: `This must be unique within the project.`, - }, - "expire_time": { - Type: resource_secret_manager_secret_schema.TypeString, - Computed: true, - Optional: true, - Description: `Timestamp in UTC when the Secret is scheduled to expire. This is always provided on output, regardless of what was sent on input. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "labels": { - Type: resource_secret_manager_secret_schema.TypeMap, - Optional: true, - Description: `The labels assigned to this Secret. - -Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, -and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} - -Label values must be between 0 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, -and must conform to the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} - -No more than 64 labels can be assigned to a given resource. - -An object containing a list of "key": value pairs. Example: -{ "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_secret_manager_secret_schema.Schema{Type: resource_secret_manager_secret_schema.TypeString}, - }, - "rotation": { - Type: resource_secret_manager_secret_schema.TypeList, - Optional: true, - Description: `The rotation time and period for a Secret. At 'next_rotation_time', Secret Manager will send a Pub/Sub notification to the topics configured on the Secret. 'topics' must be set to configure rotation.`, - MaxItems: 1, - Elem: &resource_secret_manager_secret_schema.Resource{ - Schema: map[string]*resource_secret_manager_secret_schema.Schema{ - "next_rotation_time": { - Type: resource_secret_manager_secret_schema.TypeString, - Optional: true, - Description: `Timestamp in UTC at which the Secret is scheduled to rotate. -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - RequiredWith: []string{"rotation.0.rotation_period"}, - }, - "rotation_period": { - Type: resource_secret_manager_secret_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The Duration between rotation notifications. Must be in seconds and at least 3600s (1h) and at most 3153600000s (100 years). -If rotationPeriod is set, 'next_rotation_time' must be set. 'next_rotation_time' will be advanced by this period when the service automatically sends rotation notifications.`, - }, - }, - }, - RequiredWith: []string{"topics"}, - }, - "topics": { - Type: resource_secret_manager_secret_schema.TypeList, - Optional: true, - Description: `A list of up to 10 Pub/Sub topics to which messages are published when control plane operations are called on the secret or its versions.`, - Elem: &resource_secret_manager_secret_schema.Resource{ - Schema: map[string]*resource_secret_manager_secret_schema.Schema{ - "name": { - Type: resource_secret_manager_secret_schema.TypeString, - Required: true, - Description: `The resource name of the Pub/Sub topic that will be published to, in the following format: projects/*/topics/*. -For publication to succeed, the Secret Manager Service Agent service account must have pubsub.publisher permissions on the topic.`, - }, - }, - }, - RequiredWith: []string{"rotation"}, - }, - "ttl": { - Type: resource_secret_manager_secret_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The TTL for the Secret. -A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "create_time": { - Type: resource_secret_manager_secret_schema.TypeString, - Computed: true, - Description: `The time at which the Secret was created.`, - }, - "name": { - Type: resource_secret_manager_secret_schema.TypeString, - Computed: true, - Description: `The resource name of the Secret. Format: -'projects/{{project}}/secrets/{{secret_id}}'`, - }, - "project": { - Type: resource_secret_manager_secret_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecretManagerSecretCreate(d *resource_secret_manager_secret_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - labelsProp, err := expandSecretManagerSecretLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(labelsProp)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - replicationProp, err := expandSecretManagerSecretReplication(d.Get("replication"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("replication"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(replicationProp)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, replicationProp)) { - obj["replication"] = replicationProp - } - topicsProp, err := expandSecretManagerSecretTopics(d.Get("topics"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("topics"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(topicsProp)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, topicsProp)) { - obj["topics"] = topicsProp - } - expireTimeProp, err := expandSecretManagerSecretExpireTime(d.Get("expire_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expire_time"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(expireTimeProp)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, expireTimeProp)) { - obj["expireTime"] = expireTimeProp - } - ttlProp, err := expandSecretManagerSecretTtl(d.Get("ttl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ttl"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(ttlProp)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, ttlProp)) { - obj["ttl"] = ttlProp - } - rotationProp, err := expandSecretManagerSecretRotation(d.Get("rotation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(rotationProp)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, rotationProp)) { - obj["rotation"] = rotationProp - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets?secretId={{secret_id}}") - if err != nil { - return err - } - - resource_secret_manager_secret_log.Printf("[DEBUG] Creating new Secret: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_secret_manager_secret_schema.TimeoutCreate)) - if err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error creating Secret: %s", err) - } - if err := d.Set("name", flattenSecretManagerSecretName(res["name"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_secret_manager_secret_log.Printf("[DEBUG] Finished creating Secret %q: %#v", d.Id(), res) - - return resourceSecretManagerSecretRead(d, meta) -} - -func resourceSecretManagerSecretRead(d *resource_secret_manager_secret_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_secret_manager_secret_fmt.Sprintf("SecretManagerSecret %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - - if err := d.Set("name", flattenSecretManagerSecretName(res["name"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("create_time", flattenSecretManagerSecretCreateTime(res["createTime"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("labels", flattenSecretManagerSecretLabels(res["labels"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("replication", flattenSecretManagerSecretReplication(res["replication"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("topics", flattenSecretManagerSecretTopics(res["topics"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("expire_time", flattenSecretManagerSecretExpireTime(res["expireTime"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - if err := d.Set("rotation", flattenSecretManagerSecretRotation(res["rotation"], d, config)); err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error reading Secret: %s", err) - } - - return nil -} - -func resourceSecretManagerSecretUpdate(d *resource_secret_manager_secret_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - labelsProp, err := expandSecretManagerSecretLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(v)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - topicsProp, err := expandSecretManagerSecretTopics(d.Get("topics"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("topics"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(v)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, topicsProp)) { - obj["topics"] = topicsProp - } - expireTimeProp, err := expandSecretManagerSecretExpireTime(d.Get("expire_time"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("expire_time"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(v)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, expireTimeProp)) { - obj["expireTime"] = expireTimeProp - } - rotationProp, err := expandSecretManagerSecretRotation(d.Get("rotation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("rotation"); !isEmptyValue(resource_secret_manager_secret_reflect.ValueOf(v)) && (ok || !resource_secret_manager_secret_reflect.DeepEqual(v, rotationProp)) { - obj["rotation"] = rotationProp - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return err - } - - resource_secret_manager_secret_log.Printf("[DEBUG] Updating Secret %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("topics") { - updateMask = append(updateMask, "topics") - } - - if d.HasChange("expire_time") { - updateMask = append(updateMask, "expireTime") - } - - if d.HasChange("rotation") { - updateMask = append(updateMask, "rotation") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_secret_manager_secret_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_secret_manager_secret_schema.TimeoutUpdate)) - - if err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error updating Secret %q: %s", d.Id(), err) - } else { - resource_secret_manager_secret_log.Printf("[DEBUG] Finished updating Secret %q: %#v", d.Id(), res) - } - - return resourceSecretManagerSecretRead(d, meta) -} - -func resourceSecretManagerSecretDelete(d *resource_secret_manager_secret_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_secret_manager_secret_fmt.Errorf("Error fetching project for Secret: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_secret_manager_secret_log.Printf("[DEBUG] Deleting Secret %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_secret_manager_secret_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Secret") - } - - resource_secret_manager_secret_log.Printf("[DEBUG] Finished deleting Secret %q: %#v", d.Id(), res) - return nil -} - -func resourceSecretManagerSecretImport(d *resource_secret_manager_secret_schema.ResourceData, meta interface{}) ([]*resource_secret_manager_secret_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/secrets/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/secrets/{{secret_id}}") - if err != nil { - return nil, resource_secret_manager_secret_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_secret_manager_secret_schema.ResourceData{d}, nil -} - -func flattenSecretManagerSecretName(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretCreateTime(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretLabels(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretReplication(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["automatic"] = - flattenSecretManagerSecretReplicationAutomatic(original["automatic"], d, config) - transformed["user_managed"] = - flattenSecretManagerSecretReplicationUserManaged(original["userManaged"], d, config) - return []interface{}{transformed} -} - -func flattenSecretManagerSecretReplicationAutomatic(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v != nil -} - -func flattenSecretManagerSecretReplicationUserManaged(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["replicas"] = - flattenSecretManagerSecretReplicationUserManagedReplicas(original["replicas"], d, config) - return []interface{}{transformed} -} - -func flattenSecretManagerSecretReplicationUserManagedReplicas(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "location": flattenSecretManagerSecretReplicationUserManagedReplicasLocation(original["location"], d, config), - "customer_managed_encryption": flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(original["customerManagedEncryption"], d, config), - }) - } - return transformed -} - -func flattenSecretManagerSecretReplicationUserManagedReplicasLocation(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} - -func flattenSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretTopics(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "name": flattenSecretManagerSecretTopicsName(original["name"], d, config), - }) - } - return transformed -} - -func flattenSecretManagerSecretTopicsName(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretExpireTime(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretRotation(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["next_rotation_time"] = - flattenSecretManagerSecretRotationNextRotationTime(original["nextRotationTime"], d, config) - transformed["rotation_period"] = - flattenSecretManagerSecretRotationRotationPeriod(original["rotationPeriod"], d, config) - return []interface{}{transformed} -} - -func flattenSecretManagerSecretRotationNextRotationTime(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretRotationRotationPeriod(v interface{}, d *resource_secret_manager_secret_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSecretManagerSecretLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandSecretManagerSecretReplication(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedAutomatic, err := expandSecretManagerSecretReplicationAutomatic(original["automatic"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedAutomatic); val.IsValid() && !isEmptyValue(val) { - transformed["automatic"] = transformedAutomatic - } - - transformedUserManaged, err := expandSecretManagerSecretReplicationUserManaged(original["user_managed"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedUserManaged); val.IsValid() && !isEmptyValue(val) { - transformed["userManaged"] = transformedUserManaged - } - - return transformed, nil -} - -func expandSecretManagerSecretReplicationAutomatic(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil || !v.(bool) { - return nil, nil - } - - return struct{}{}, nil -} - -func expandSecretManagerSecretReplicationUserManaged(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedReplicas, err := expandSecretManagerSecretReplicationUserManagedReplicas(original["replicas"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedReplicas); val.IsValid() && !isEmptyValue(val) { - transformed["replicas"] = transformedReplicas - } - - return transformed, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicas(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedLocation, err := expandSecretManagerSecretReplicationUserManagedReplicasLocation(original["location"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedLocation); val.IsValid() && !isEmptyValue(val) { - transformed["location"] = transformedLocation - } - - transformedCustomerManagedEncryption, err := expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(original["customer_managed_encryption"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedCustomerManagedEncryption); val.IsValid() && !isEmptyValue(val) { - transformed["customerManagedEncryption"] = transformedCustomerManagedEncryption - } - - req = append(req, transformed) - } - return req, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicasLocation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryption(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandSecretManagerSecretReplicationUserManagedReplicasCustomerManagedEncryptionKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretTopics(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - req := make([]interface{}, 0, len(l)) - for _, raw := range l { - if raw == nil { - continue - } - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedName, err := expandSecretManagerSecretTopicsName(original["name"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { - transformed["name"] = transformedName - } - - req = append(req, transformed) - } - return req, nil -} - -func expandSecretManagerSecretTopicsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretExpireTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretTtl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretRotation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedNextRotationTime, err := expandSecretManagerSecretRotationNextRotationTime(original["next_rotation_time"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedNextRotationTime); val.IsValid() && !isEmptyValue(val) { - transformed["nextRotationTime"] = transformedNextRotationTime - } - - transformedRotationPeriod, err := expandSecretManagerSecretRotationRotationPeriod(original["rotation_period"], d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_reflect.ValueOf(transformedRotationPeriod); val.IsValid() && !isEmptyValue(val) { - transformed["rotationPeriod"] = transformedRotationPeriod - } - - return transformed, nil -} - -func expandSecretManagerSecretRotationNextRotationTime(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSecretManagerSecretRotationRotationPeriod(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceSecretManagerSecretVersionUpdate(d *resource_secret_manager_secret_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - _, err := expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } - - return resourceSecretManagerSecretVersionRead(d, meta) -} - -func resourceSecretManagerSecretVersion() *resource_secret_manager_secret_version_schema.Resource { - return &resource_secret_manager_secret_version_schema.Resource{ - Create: resourceSecretManagerSecretVersionCreate, - Read: resourceSecretManagerSecretVersionRead, - Delete: resourceSecretManagerSecretVersionDelete, - - Importer: &resource_secret_manager_secret_version_schema.ResourceImporter{ - State: resourceSecretManagerSecretVersionImport, - }, - - Timeouts: &resource_secret_manager_secret_version_schema.ResourceTimeout{ - Create: resource_secret_manager_secret_version_schema.DefaultTimeout(4 * resource_secret_manager_secret_version_time.Minute), - Delete: resource_secret_manager_secret_version_schema.DefaultTimeout(4 * resource_secret_manager_secret_version_time.Minute), - }, - - Update: resourceSecretManagerSecretVersionUpdate, - - Schema: map[string]*resource_secret_manager_secret_version_schema.Schema{ - "secret_data": { - Type: resource_secret_manager_secret_version_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The secret data. Must be no larger than 64KiB.`, - Sensitive: true, - }, - - "secret": { - Type: resource_secret_manager_secret_version_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Secret Manager secret resource`, - }, - "enabled": { - Type: resource_secret_manager_secret_version_schema.TypeBool, - Optional: true, - Description: `The current state of the SecretVersion.`, - Default: true, - }, - "create_time": { - Type: resource_secret_manager_secret_version_schema.TypeString, - Computed: true, - Description: `The time at which the Secret was created.`, - }, - "destroy_time": { - Type: resource_secret_manager_secret_version_schema.TypeString, - Computed: true, - Description: `The time at which the Secret was destroyed. Only present if state is DESTROYED.`, - }, - "name": { - Type: resource_secret_manager_secret_version_schema.TypeString, - Computed: true, - Description: `The resource name of the SecretVersion. Format: -'projects/{{project}}/secrets/{{secret_id}}/versions/{{version}}'`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSecretManagerSecretVersionCreate(d *resource_secret_manager_secret_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - stateProp, err := expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(resource_secret_manager_secret_version_reflect.ValueOf(stateProp)) && (ok || !resource_secret_manager_secret_version_reflect.DeepEqual(v, stateProp)) { - obj["state"] = stateProp - } - payloadProp, err := expandSecretManagerSecretVersionPayload(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_secret_manager_secret_version_reflect.ValueOf(payloadProp)) { - obj["payload"] = payloadProp - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{secret}}:addVersion") - if err != nil { - return err - } - - resource_secret_manager_secret_version_log.Printf("[DEBUG] Creating new SecretVersion: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_secret_manager_secret_version_schema.TimeoutCreate)) - if err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error creating SecretVersion: %s", err) - } - if err := d.Set("name", flattenSecretManagerSecretVersionName(res["name"], d, config)); err != nil { - return resource_secret_manager_secret_version_fmt.Errorf(`Error setting computed identity field "name": %s`, err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - name, ok := res["name"] - if !ok { - return resource_secret_manager_secret_version_fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.") - } - if err := d.Set("name", name.(string)); err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error setting name: %s", err) - } - d.SetId(name.(string)) - - _, err = expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) - if err != nil { - return err - } - - resource_secret_manager_secret_version_log.Printf("[DEBUG] Finished creating SecretVersion %q: %#v", d.Id(), res) - - return resourceSecretManagerSecretVersionRead(d, meta) -} - -func resourceSecretManagerSecretVersionRead(d *resource_secret_manager_secret_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_secret_manager_secret_version_fmt.Sprintf("SecretManagerSecretVersion %q", d.Id())) - } - - if err := d.Set("enabled", flattenSecretManagerSecretVersionEnabled(res["state"], d, config)); err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error reading SecretVersion: %s", err) - } - if err := d.Set("name", flattenSecretManagerSecretVersionName(res["name"], d, config)); err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error reading SecretVersion: %s", err) - } - if err := d.Set("create_time", flattenSecretManagerSecretVersionCreateTime(res["createTime"], d, config)); err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error reading SecretVersion: %s", err) - } - if err := d.Set("destroy_time", flattenSecretManagerSecretVersionDestroyTime(res["destroyTime"], d, config)); err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error reading SecretVersion: %s", err) - } - - if flattenedProp := flattenSecretManagerSecretVersionPayload(res["payload"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_secret_manager_secret_version_googleapi.Error); ok { - return resource_secret_manager_secret_version_fmt.Errorf("Error reading SecretVersion: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_secret_manager_secret_version_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - - return nil -} - -func resourceSecretManagerSecretVersionDelete(d *resource_secret_manager_secret_version_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}:destroy") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_secret_manager_secret_version_log.Printf("[DEBUG] Deleting SecretVersion %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_secret_manager_secret_version_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SecretVersion") - } - - resource_secret_manager_secret_version_log.Printf("[DEBUG] Finished deleting SecretVersion %q: %#v", d.Id(), res) - return nil -} - -func resourceSecretManagerSecretVersionImport(d *resource_secret_manager_secret_version_schema.ResourceData, meta interface{}) ([]*resource_secret_manager_secret_version_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - secretRegex := resource_secret_manager_secret_version_regexp.MustCompile("(projects/.+/secrets/.+)/versions/.+$") - - parts := secretRegex.FindStringSubmatch(name) - if len(parts) != 2 { - panic(resource_secret_manager_secret_version_fmt.Sprintf("Version name doesn not fit the format `projects/{{project}}/secrets/{{secret}}/versions{{version}}`")) - } - if err := d.Set("secret", parts[1]); err != nil { - return nil, resource_secret_manager_secret_version_fmt.Errorf("Error setting secret: %s", err) - } - - return []*resource_secret_manager_secret_version_schema.ResourceData{d}, nil -} - -func flattenSecretManagerSecretVersionEnabled(v interface{}, d *resource_secret_manager_secret_version_schema.ResourceData, config *Config) interface{} { - if v.(string) == "ENABLED" { - return true - } - - return false -} - -func flattenSecretManagerSecretVersionName(v interface{}, d *resource_secret_manager_secret_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretVersionCreateTime(v interface{}, d *resource_secret_manager_secret_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretVersionDestroyTime(v interface{}, d *resource_secret_manager_secret_version_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSecretManagerSecretVersionPayload(v interface{}, d *resource_secret_manager_secret_version_schema.ResourceData, config *Config) interface{} { - transformed := make(map[string]interface{}) - - if d.Get("enabled").(bool) == false { - transformed["secret_data"] = d.Get("secret_data") - return []interface{}{transformed} - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}:access") - if err != nil { - return err - } - - parts := resource_secret_manager_secret_version_strings.Split(d.Get("name").(string), "/") - project := parts[1] - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - accessRes, err := sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return err - } - - data, err := resource_secret_manager_secret_version_base64.StdEncoding.DecodeString(accessRes["payload"].(map[string]interface{})["data"].(string)) - if err != nil { - return err - } - transformed["secret_data"] = string(data) - return []interface{}{transformed} -} - -func expandSecretManagerSecretVersionEnabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - name := d.Get("name").(string) - if name == "" { - return "", nil - } - - url, err := replaceVars(d, config, "{{SecretManagerBasePath}}{{name}}") - if err != nil { - return nil, err - } - - if v == true { - url = resource_secret_manager_secret_version_fmt.Sprintf("%s:enable", url) - } else { - url = resource_secret_manager_secret_version_fmt.Sprintf("%s:disable", url) - } - - parts := resource_secret_manager_secret_version_strings.Split(name, "/") - project := parts[1] - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - _, err = sendRequest(config, "POST", project, url, userAgent, nil) - if err != nil { - return nil, err - } - - return nil, nil -} - -func expandSecretManagerSecretVersionPayload(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedSecretData, err := expandSecretManagerSecretVersionPayloadSecretData(d.Get("secret_data"), d, config) - if err != nil { - return nil, err - } else if val := resource_secret_manager_secret_version_reflect.ValueOf(transformedSecretData); val.IsValid() && !isEmptyValue(val) { - transformed["data"] = transformedSecretData - } - - return transformed, nil -} - -func expandSecretManagerSecretVersionPayloadSecretData(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - if v == nil { - return nil, nil - } - - return resource_secret_manager_secret_version_base64.StdEncoding.EncodeToString([]byte(v.(string))), nil -} - -func resourceServiceNetworkingConnection() *resource_service_networking_connection_schema.Resource { - return &resource_service_networking_connection_schema.Resource{ - Create: resourceServiceNetworkingConnectionCreate, - Read: resourceServiceNetworkingConnectionRead, - Update: resourceServiceNetworkingConnectionUpdate, - Delete: resourceServiceNetworkingConnectionDelete, - Importer: &resource_service_networking_connection_schema.ResourceImporter{ - State: resourceServiceNetworkingConnectionImportState, - }, - - Timeouts: &resource_service_networking_connection_schema.ResourceTimeout{ - Create: resource_service_networking_connection_schema.DefaultTimeout(10 * resource_service_networking_connection_time.Minute), - Update: resource_service_networking_connection_schema.DefaultTimeout(10 * resource_service_networking_connection_time.Minute), - Delete: resource_service_networking_connection_schema.DefaultTimeout(10 * resource_service_networking_connection_time.Minute), - }, - - Schema: map[string]*resource_service_networking_connection_schema.Schema{ - "network": { - Type: resource_service_networking_connection_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of VPC network connected with service producers using VPC peering.`, - }, - - "service": { - Type: resource_service_networking_connection_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Provider peering service that is managing peering connectivity for a service provider organization. For Google services that support this functionality it is 'servicenetworking.googleapis.com'.`, - }, - "reserved_peering_ranges": { - Type: resource_service_networking_connection_schema.TypeList, - Required: true, - Elem: &resource_service_networking_connection_schema.Schema{Type: resource_service_networking_connection_schema.TypeString}, - Description: `Named IP address range(s) of PEERING type reserved for this service provider. Note that invoking this method with a different range when connection is already established will not reallocate already provisioned service producer subnetworks.`, - }, - "peering": { - Type: resource_service_networking_connection_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceServiceNetworkingConnectionCreate(d *resource_service_networking_connection_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) - } - - connection := &resource_service_networking_connection_servicenetworking.Connection{ - Network: serviceNetworkingNetworkName, - ReservedPeeringRanges: convertStringArr(d.Get("reserved_peering_ranges").([]interface{})), - } - - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - project := networkFieldValue.Project - - parentService := formatParentService(d.Get("service").(string)) - - if bp, err := getBillingProject(d, config); err == nil { - project = bp - } - - createCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.Patch(parentService+"/connections/-", connection).UpdateMask("reservedPeeringRanges").Force(true) - if config.UserProjectOverride { - createCall.Header().Add("X-Goog-User-Project", project) - } - op, err := createCall.Do() - if err != nil { - return err - } - - if err := serviceNetworkingOperationWaitTime(config, op, "Create Service Networking Connection", userAgent, project, d.Timeout(resource_service_networking_connection_schema.TimeoutCreate)); err != nil { - return err - } - - connectionId := &connectionId{ - Network: network, - Service: d.Get("service").(string), - } - - d.SetId(connectionId.Id()) - return resourceServiceNetworkingConnectionRead(d, meta) -} - -func resourceServiceNetworkingConnectionRead(d *resource_service_networking_connection_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - connectionId, err := parseConnectionId(d.Id()) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Unable to parse Service Networking Connection id, err: {{err}}", err) - } - - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, connectionId.Network, userAgent) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) - } - - network := d.Get("network").(string) - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - project := networkFieldValue.Project - - if bp, err := getBillingProject(d, config); err == nil { - project = bp - } - - parentService := formatParentService(connectionId.Service) - readCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.List(parentService).Network(serviceNetworkingNetworkName) - if config.UserProjectOverride { - readCall.Header().Add("X-Goog-User-Project", project) - } - response, err := readCall.Do() - if err != nil { - return err - } - - var connection *resource_service_networking_connection_servicenetworking.Connection - for _, c := range response.Connections { - if c.Network == serviceNetworkingNetworkName { - connection = c - break - } - } - - if connection == nil { - d.SetId("") - resource_service_networking_connection_log.Printf("[WARNING] Failed to find Service Networking Connection, network: %s service: %s", connectionId.Network, connectionId.Service) - return nil - } - - if err := d.Set("network", connectionId.Network); err != nil { - return resource_service_networking_connection_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("service", connectionId.Service); err != nil { - return resource_service_networking_connection_fmt.Errorf("Error setting service: %s", err) - } - if err := d.Set("peering", connection.Peering); err != nil { - return resource_service_networking_connection_fmt.Errorf("Error setting peering: %s", err) - } - if err := d.Set("reserved_peering_ranges", connection.ReservedPeeringRanges); err != nil { - return resource_service_networking_connection_fmt.Errorf("Error setting reserved_peering_ranges: %s", err) - } - return nil -} - -func resourceServiceNetworkingConnectionUpdate(d *resource_service_networking_connection_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - connectionId, err := parseConnectionId(d.Id()) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Unable to parse Service Networking Connection id, err: {{err}}", err) - } - - parentService := formatParentService(connectionId.Service) - - if d.HasChange("reserved_peering_ranges") { - network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Failed to find Service Networking Connection, err: {{err}}", err) - } - - connection := &resource_service_networking_connection_servicenetworking.Connection{ - Network: serviceNetworkingNetworkName, - ReservedPeeringRanges: convertStringArr(d.Get("reserved_peering_ranges").([]interface{})), - } - - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - project := networkFieldValue.Project - - if bp, err := getBillingProject(d, config); err == nil { - project = bp - } - - patchCall := config.NewServiceNetworkingClient(userAgent).Services.Connections.Patch(parentService+"/connections/-", connection).UpdateMask("reservedPeeringRanges").Force(true) - if config.UserProjectOverride { - patchCall.Header().Add("X-Goog-User-Project", project) - } - op, err := patchCall.Do() - if err != nil { - return err - } - if err := serviceNetworkingOperationWaitTime(config, op, "Update Service Networking Connection", userAgent, project, d.Timeout(resource_service_networking_connection_schema.TimeoutUpdate)); err != nil { - return err - } - } - return resourceServiceNetworkingConnectionRead(d, meta) -} - -func resourceServiceNetworkingConnectionDelete(d *resource_service_networking_connection_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - network := d.Get("network").(string) - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - peering := d.Get("peering").(string) - obj["name"] = peering - resource_service_networking_connection_url := resource_service_networking_connection_fmt.Sprintf("%s%s/removePeering", config.ComputeBasePath, serviceNetworkingNetworkName) - - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) - if err != nil { - return resource_service_networking_connection_errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - - project := networkFieldValue.Project - res, err := sendRequestWithTimeout(config, "POST", project, resource_service_networking_connection_url, userAgent, obj, d.Timeout(resource_service_networking_connection_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, resource_service_networking_connection_fmt.Sprintf("ServiceNetworkingConnection %q", d.Id())) - } - - op := &resource_service_networking_connection_compute.Operation{} - err = Convert(res, op) - if err != nil { - return err - } - - err = computeOperationWaitTime( - config, op, project, "Updating Network", userAgent, d.Timeout(resource_service_networking_connection_schema.TimeoutDelete)) - if err != nil { - return err - } - - d.SetId("") - resource_service_networking_connection_log.Printf("[INFO] Service network connection removed.") - - return nil -} - -func resourceServiceNetworkingConnectionImportState(d *resource_service_networking_connection_schema.ResourceData, meta interface{}) ([]*resource_service_networking_connection_schema.ResourceData, error) { - connectionId, err := parseConnectionId(d.Id()) - if err != nil { - return nil, err - } - - if err := d.Set("network", connectionId.Network); err != nil { - return nil, resource_service_networking_connection_fmt.Errorf("Error setting network: %s", err) - } - if err := d.Set("service", connectionId.Service); err != nil { - return nil, resource_service_networking_connection_fmt.Errorf("Error setting service: %s", err) - } - return []*resource_service_networking_connection_schema.ResourceData{d}, nil -} - -type connectionId struct { - Network string - Service string -} - -func (id *connectionId) Id() string { - return resource_service_networking_connection_fmt.Sprintf("%s:%s", resource_service_networking_connection_url.QueryEscape(id.Network), resource_service_networking_connection_url.QueryEscape(id.Service)) -} - -func parseConnectionId(id string) (*connectionId, error) { - res := resource_service_networking_connection_strings.Split(id, ":") - - if len(res) != 2 { - return nil, resource_service_networking_connection_fmt.Errorf("Failed to parse service networking connection id, value: %s", id) - } - - network, err := resource_service_networking_connection_url.QueryUnescape(res[0]) - if err != nil { - return nil, resource_service_networking_connection_errwrap.Wrapf("Failed to parse service networking connection id, invalid network, err: {{err}}", err) - } else if len(network) == 0 { - return nil, resource_service_networking_connection_fmt.Errorf("Failed to parse service networking connection id, empty network") - } - - service, err := resource_service_networking_connection_url.QueryUnescape(res[1]) - if err != nil { - return nil, resource_service_networking_connection_errwrap.Wrapf("Failed to parse service networking connection id, invalid service, err: {{err}}", err) - } else if len(service) == 0 { - return nil, resource_service_networking_connection_fmt.Errorf("Failed to parse service networking connection id, empty service") - } - - return &connectionId{ - Network: network, - Service: service, - }, nil -} - -func retrieveServiceNetworkingNetworkName(d *resource_service_networking_connection_schema.ResourceData, config *Config, network, userAgent string) (string, error) { - networkFieldValue, err := ParseNetworkFieldValue(network, d, config) - if err != nil { - return "", resource_service_networking_connection_errwrap.Wrapf("Failed to retrieve network field value, err: {{err}}", err) - } - - pid := networkFieldValue.Project - if pid == "" { - return "", resource_service_networking_connection_fmt.Errorf("Could not determine project") - } - resource_service_networking_connection_log.Printf("[DEBUG] Retrieving project number by doing a GET with the project id, as required by service networking") - - billingProject := pid - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getProjectCall := config.NewResourceManagerClient(userAgent).Projects.Get(pid) - if config.UserProjectOverride { - getProjectCall.Header().Add("X-Goog-User-Project", billingProject) - } - project, err := getProjectCall.Do() - if err != nil { - - return "", resource_service_networking_connection_fmt.Errorf("Failed to retrieve project, pid: %s, err: %w", pid, err) - } - - networkName := networkFieldValue.Name - if networkName == "" { - return "", resource_service_networking_connection_fmt.Errorf("Failed to parse network") - } - - return resource_service_networking_connection_fmt.Sprintf("projects/%v/global/networks/%v", project.ProjectNumber, networkName), nil - -} - -const parentServicePattern = "^services/.+$" - -func formatParentService(service string) string { - r := resource_service_networking_connection_regexp.MustCompile(parentServicePattern) - if !r.MatchString(service) { - return resource_service_networking_connection_fmt.Sprintf("services/%s", service) - } else { - return service - } -} - -func resourceSourceRepoRepositoryPubSubConfigsHash(v interface{}) int { - if v == nil { - return 0 - } - - var buf resource_sourcerepo_repository_bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(resource_sourcerepo_repository_fmt.Sprintf("%s-", GetResourceNameFromSelfLink(m["topic"].(string)))) - buf.WriteString(resource_sourcerepo_repository_fmt.Sprintf("%s-", m["message_format"].(string))) - if v, ok := m["service_account_email"]; ok { - buf.WriteString(resource_sourcerepo_repository_fmt.Sprintf("%s-", v.(string))) - } - - return hashcode(buf.String()) -} - -func resourceSourceRepoRepository() *resource_sourcerepo_repository_schema.Resource { - return &resource_sourcerepo_repository_schema.Resource{ - Create: resourceSourceRepoRepositoryCreate, - Read: resourceSourceRepoRepositoryRead, - Update: resourceSourceRepoRepositoryUpdate, - Delete: resourceSourceRepoRepositoryDelete, - - Importer: &resource_sourcerepo_repository_schema.ResourceImporter{ - State: resourceSourceRepoRepositoryImport, - }, - - Timeouts: &resource_sourcerepo_repository_schema.ResourceTimeout{ - Create: resource_sourcerepo_repository_schema.DefaultTimeout(4 * resource_sourcerepo_repository_time.Minute), - Update: resource_sourcerepo_repository_schema.DefaultTimeout(4 * resource_sourcerepo_repository_time.Minute), - Delete: resource_sourcerepo_repository_schema.DefaultTimeout(4 * resource_sourcerepo_repository_time.Minute), - }, - - Schema: map[string]*resource_sourcerepo_repository_schema.Schema{ - "name": { - Type: resource_sourcerepo_repository_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Resource name of the repository, of the form '{{repo}}'. -The repo name may contain slashes. eg, 'name/with/slash'`, - }, - "pubsub_configs": { - Type: resource_sourcerepo_repository_schema.TypeSet, - Optional: true, - Description: `How this repository publishes a change in the repository through Cloud Pub/Sub. -Keyed by the topic names.`, - Elem: &resource_sourcerepo_repository_schema.Resource{ - Schema: map[string]*resource_sourcerepo_repository_schema.Schema{ - "topic": { - Type: resource_sourcerepo_repository_schema.TypeString, - Required: true, - }, - "message_format": { - Type: resource_sourcerepo_repository_schema.TypeString, - Required: true, - ValidateFunc: resource_sourcerepo_repository_validation.StringInSlice([]string{"PROTOBUF", "JSON"}, false), - Description: `The format of the Cloud Pub/Sub messages. -- PROTOBUF: The message payload is a serialized protocol buffer of SourceRepoEvent. -- JSON: The message payload is a JSON string of SourceRepoEvent. Possible values: ["PROTOBUF", "JSON"]`, - }, - "service_account_email": { - Type: resource_sourcerepo_repository_schema.TypeString, - Computed: true, - Optional: true, - Description: `Email address of the service account used for publishing Cloud Pub/Sub messages. -This service account needs to be in the same project as the PubsubConfig. When added, -the caller needs to have iam.serviceAccounts.actAs permission on this service account. -If unspecified, it defaults to the compute engine default service account.`, - }, - }, - }, - Set: resourceSourceRepoRepositoryPubSubConfigsHash, - }, - "size": { - Type: resource_sourcerepo_repository_schema.TypeInt, - Computed: true, - Description: `The disk usage of the repo, in bytes.`, - }, - "url": { - Type: resource_sourcerepo_repository_schema.TypeString, - Computed: true, - Description: `URL to clone the repository from Google Cloud Source Repositories.`, - }, - "project": { - Type: resource_sourcerepo_repository_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSourceRepoRepositoryCreate(d *resource_sourcerepo_repository_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSourceRepoRepositoryName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_sourcerepo_repository_reflect.ValueOf(nameProp)) && (ok || !resource_sourcerepo_repository_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - pubsubConfigsProp, err := expandSourceRepoRepositoryPubsubConfigs(d.Get("pubsub_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_configs"); !isEmptyValue(resource_sourcerepo_repository_reflect.ValueOf(pubsubConfigsProp)) && (ok || !resource_sourcerepo_repository_reflect.DeepEqual(v, pubsubConfigsProp)) { - obj["pubsubConfigs"] = pubsubConfigsProp - } - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos") - if err != nil { - return err - } - - resource_sourcerepo_repository_log.Printf("[DEBUG] Creating new Repository: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_sourcerepo_repository_schema.TimeoutCreate)) - if err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error creating Repository: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") - if err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if v, ok := d.GetOkExists("pubsub_configs"); !isEmptyValue(resource_sourcerepo_repository_reflect.ValueOf(pubsubConfigsProp)) && (ok || !resource_sourcerepo_repository_reflect.DeepEqual(v, pubsubConfigsProp)) { - resource_sourcerepo_repository_log.Printf("[DEBUG] Calling update after create to patch in pubsub_configs") - - return resourceSourceRepoRepositoryUpdate(d, meta) - } - - resource_sourcerepo_repository_log.Printf("[DEBUG] Finished creating Repository %q: %#v", d.Id(), res) - - return resourceSourceRepoRepositoryRead(d, meta) -} - -func resourceSourceRepoRepositoryRead(d *resource_sourcerepo_repository_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_sourcerepo_repository_fmt.Sprintf("SourceRepoRepository %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error reading Repository: %s", err) - } - - if err := d.Set("name", flattenSourceRepoRepositoryName(res["name"], d, config)); err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("url", flattenSourceRepoRepositoryUrl(res["url"], d, config)); err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("size", flattenSourceRepoRepositorySize(res["size"], d, config)); err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error reading Repository: %s", err) - } - if err := d.Set("pubsub_configs", flattenSourceRepoRepositoryPubsubConfigs(res["pubsubConfigs"], d, config)); err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error reading Repository: %s", err) - } - - return nil -} - -func resourceSourceRepoRepositoryUpdate(d *resource_sourcerepo_repository_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - pubsubConfigsProp, err := expandSourceRepoRepositoryPubsubConfigs(d.Get("pubsub_configs"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("pubsub_configs"); !isEmptyValue(resource_sourcerepo_repository_reflect.ValueOf(v)) && (ok || !resource_sourcerepo_repository_reflect.DeepEqual(v, pubsubConfigsProp)) { - obj["pubsubConfigs"] = pubsubConfigsProp - } - - obj, err = resourceSourceRepoRepositoryUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") - if err != nil { - return err - } - - resource_sourcerepo_repository_log.Printf("[DEBUG] Updating Repository %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("pubsub_configs") { - updateMask = append(updateMask, "pubsubConfigs") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_sourcerepo_repository_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_sourcerepo_repository_schema.TimeoutUpdate)) - - if err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error updating Repository %q: %s", d.Id(), err) - } else { - resource_sourcerepo_repository_log.Printf("[DEBUG] Finished updating Repository %q: %#v", d.Id(), res) - } - - return resourceSourceRepoRepositoryRead(d, meta) -} - -func resourceSourceRepoRepositoryDelete(d *resource_sourcerepo_repository_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sourcerepo_repository_fmt.Errorf("Error fetching project for Repository: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SourceRepoBasePath}}projects/{{project}}/repos/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_sourcerepo_repository_log.Printf("[DEBUG] Deleting Repository %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_sourcerepo_repository_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Repository") - } - - resource_sourcerepo_repository_log.Printf("[DEBUG] Finished deleting Repository %q: %#v", d.Id(), res) - return nil -} - -func resourceSourceRepoRepositoryImport(d *resource_sourcerepo_repository_schema.ResourceData, meta interface{}) ([]*resource_sourcerepo_repository_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/repos/(?P.+)", - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/repos/{{name}}") - if err != nil { - return nil, resource_sourcerepo_repository_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_sourcerepo_repository_schema.ResourceData{d}, nil -} - -func flattenSourceRepoRepositoryName(v interface{}, d *resource_sourcerepo_repository_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - - parts := resource_sourcerepo_repository_strings.SplitAfterN(v.(string), "/", 4) - return parts[3] -} - -func flattenSourceRepoRepositoryUrl(v interface{}, d *resource_sourcerepo_repository_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSourceRepoRepositorySize(v interface{}, d *resource_sourcerepo_repository_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_sourcerepo_repository_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenSourceRepoRepositoryPubsubConfigs(v interface{}, d *resource_sourcerepo_repository_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.(map[string]interface{}) - transformed := make([]interface{}, 0, len(l)) - for k, raw := range l { - original := raw.(map[string]interface{}) - transformed = append(transformed, map[string]interface{}{ - "topic": k, - "message_format": flattenSourceRepoRepositoryPubsubConfigsMessageFormat(original["messageFormat"], d, config), - "service_account_email": flattenSourceRepoRepositoryPubsubConfigsServiceAccountEmail(original["serviceAccountEmail"], d, config), - }) - } - return transformed -} - -func flattenSourceRepoRepositoryPubsubConfigsMessageFormat(v interface{}, d *resource_sourcerepo_repository_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSourceRepoRepositoryPubsubConfigsServiceAccountEmail(v interface{}, d *resource_sourcerepo_repository_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSourceRepoRepositoryName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return replaceVars(d, config, "projects/{{project}}/repos/{{name}}") -} - -func expandSourceRepoRepositoryPubsubConfigs(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) { - if v == nil { - return map[string]interface{}{}, nil - } - m := make(map[string]interface{}) - for _, raw := range v.(*resource_sourcerepo_repository_schema.Set).List() { - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedMessageFormat, err := expandSourceRepoRepositoryPubsubConfigsMessageFormat(original["message_format"], d, config) - if err != nil { - return nil, err - } else if val := resource_sourcerepo_repository_reflect.ValueOf(transformedMessageFormat); val.IsValid() && !isEmptyValue(val) { - transformed["messageFormat"] = transformedMessageFormat - } - - transformedServiceAccountEmail, err := expandSourceRepoRepositoryPubsubConfigsServiceAccountEmail(original["service_account_email"], d, config) - if err != nil { - return nil, err - } else if val := resource_sourcerepo_repository_reflect.ValueOf(transformedServiceAccountEmail); val.IsValid() && !isEmptyValue(val) { - transformed["serviceAccountEmail"] = transformedServiceAccountEmail - } - - transformedTopic, err := expandSourceRepoRepositoryPubsubConfigsTopic(original["topic"], d, config) - if err != nil { - return nil, err - } - m[transformedTopic] = transformed - } - return m, nil -} - -func expandSourceRepoRepositoryPubsubConfigsMessageFormat(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSourceRepoRepositoryPubsubConfigsServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceSourceRepoRepositoryUpdateEncoder(d *resource_sourcerepo_repository_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - pubsubConfigsVal := obj["pubsubConfigs"] - if pubsubConfigsVal != nil { - pubsubConfigs := pubsubConfigsVal.(map[string]interface{}) - for key := range pubsubConfigs { - config := pubsubConfigs[key].(map[string]interface{}) - config["topic"] = key - } - } - - newObj := make(map[string]interface{}) - newObj["repo"] = obj - return newObj, nil -} - -func resourceSpannerDBDdlCustomDiffFunc(diff TerraformResourceDiff) error { - old, new := diff.GetChange("ddl") - oldDdls := old.([]interface{}) - newDdls := new.([]interface{}) - var err error - - if len(newDdls) < len(oldDdls) { - err = diff.ForceNew("ddl") - if err != nil { - return resource_spanner_database_fmt.Errorf("ForceNew failed for ddl, old - %v and new - %v", oldDdls, newDdls) - } - return nil - } - - for i := range oldDdls { - if newDdls[i].(string) != oldDdls[i].(string) { - err = diff.ForceNew("ddl") - if err != nil { - return resource_spanner_database_fmt.Errorf("ForceNew failed for ddl, old - %v and new - %v", oldDdls, newDdls) - } - return nil - } - } - return nil -} - -func resourceSpannerDBDdlCustomDiff(_ resource_spanner_database_context.Context, diff *resource_spanner_database_schema.ResourceDiff, meta interface{}) error { - - return resourceSpannerDBDdlCustomDiffFunc(diff) -} - -func resourceSpannerDatabase() *resource_spanner_database_schema.Resource { - return &resource_spanner_database_schema.Resource{ - Create: resourceSpannerDatabaseCreate, - Read: resourceSpannerDatabaseRead, - Update: resourceSpannerDatabaseUpdate, - Delete: resourceSpannerDatabaseDelete, - - Importer: &resource_spanner_database_schema.ResourceImporter{ - State: resourceSpannerDatabaseImport, - }, - - Timeouts: &resource_spanner_database_schema.ResourceTimeout{ - Create: resource_spanner_database_schema.DefaultTimeout(4 * resource_spanner_database_time.Minute), - Update: resource_spanner_database_schema.DefaultTimeout(4 * resource_spanner_database_time.Minute), - Delete: resource_spanner_database_schema.DefaultTimeout(4 * resource_spanner_database_time.Minute), - }, - - CustomizeDiff: resourceSpannerDBDdlCustomDiff, - - Schema: map[string]*resource_spanner_database_schema.Schema{ - "instance": { - Type: resource_spanner_database_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The instance to create the database on.`, - }, - "name": { - Type: resource_spanner_database_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z][a-z0-9_\-]*[a-z0-9]$`), - Description: `A unique identifier for the database, which cannot be changed after -the instance is created. Values are of the form [a-z][-a-z0-9]*[a-z0-9].`, - }, - "ddl": { - Type: resource_spanner_database_schema.TypeList, - Optional: true, - Description: `An optional list of DDL statements to run inside the newly created -database. Statements can create tables, indexes, etc. These statements -execute atomically with the creation of the database: if there is an -error in any statement, the database is not created.`, - Elem: &resource_spanner_database_schema.Schema{ - Type: resource_spanner_database_schema.TypeString, - }, - }, - "encryption_config": { - Type: resource_spanner_database_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Encryption configuration for the database`, - MaxItems: 1, - Elem: &resource_spanner_database_schema.Resource{ - Schema: map[string]*resource_spanner_database_schema.Schema{ - "kms_key_name": { - Type: resource_spanner_database_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Fully qualified name of the KMS key to use to encrypt this database. This key must exist -in the same location as the Spanner Database.`, - }, - }, - }, - }, - "state": { - Type: resource_spanner_database_schema.TypeString, - Computed: true, - Description: `An explanation of the status of the database.`, - }, - "deletion_protection": { - Type: resource_spanner_database_schema.TypeBool, - Optional: true, - Default: true, - }, - "project": { - Type: resource_spanner_database_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSpannerDatabaseCreate(d *resource_spanner_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSpannerDatabaseName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_spanner_database_reflect.ValueOf(nameProp)) && (ok || !resource_spanner_database_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ddl"); !isEmptyValue(resource_spanner_database_reflect.ValueOf(extraStatementsProp)) && (ok || !resource_spanner_database_reflect.DeepEqual(v, extraStatementsProp)) { - obj["extraStatements"] = extraStatementsProp - } - encryptionConfigProp, err := expandSpannerDatabaseEncryptionConfig(d.Get("encryption_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_config"); !isEmptyValue(resource_spanner_database_reflect.ValueOf(encryptionConfigProp)) && (ok || !resource_spanner_database_reflect.DeepEqual(v, encryptionConfigProp)) { - obj["encryptionConfig"] = encryptionConfigProp - } - instanceProp, err := expandSpannerDatabaseInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(resource_spanner_database_reflect.ValueOf(instanceProp)) && (ok || !resource_spanner_database_reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - - obj, err = resourceSpannerDatabaseEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases") - if err != nil { - return err - } - - resource_spanner_database_log.Printf("[DEBUG] Creating new Database: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_spanner_database_schema.TimeoutCreate)) - if err != nil { - return resource_spanner_database_fmt.Errorf("Error creating Database: %s", err) - } - - id, err := replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return resource_spanner_database_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = spannerOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Database", userAgent, - d.Timeout(resource_spanner_database_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_spanner_database_fmt.Errorf("Error waiting to create Database: %s", err) - } - - opRes, err = resourceSpannerDatabaseDecoder(d, meta, opRes) - if err != nil { - return resource_spanner_database_fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return resource_spanner_database_fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenSpannerDatabaseName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return resource_spanner_database_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_spanner_database_log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) - - return resourceSpannerDatabaseRead(d, meta) -} - -func resourceSpannerDatabaseRead(d *resource_spanner_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_spanner_database_fmt.Sprintf("SpannerDatabase %q", d.Id())) - } - - res, err = resourceSpannerDatabaseDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_spanner_database_log.Printf("[DEBUG] Removing SpannerDatabase because it no longer exists.") - d.SetId("") - return nil - } - - if _, ok := d.GetOkExists("deletion_protection"); !ok { - if err := d.Set("deletion_protection", true); err != nil { - return resource_spanner_database_fmt.Errorf("Error setting deletion_protection: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_spanner_database_fmt.Errorf("Error reading Database: %s", err) - } - - if err := d.Set("name", flattenSpannerDatabaseName(res["name"], d, config)); err != nil { - return resource_spanner_database_fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("state", flattenSpannerDatabaseState(res["state"], d, config)); err != nil { - return resource_spanner_database_fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("encryption_config", flattenSpannerDatabaseEncryptionConfig(res["encryptionConfig"], d, config)); err != nil { - return resource_spanner_database_fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("instance", flattenSpannerDatabaseInstance(res["instance"], d, config)); err != nil { - return resource_spanner_database_fmt.Errorf("Error reading Database: %s", err) - } - - return nil -} - -func resourceSpannerDatabaseUpdate(d *resource_spanner_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("ddl") { - obj := make(map[string]interface{}) - - extraStatementsProp, err := expandSpannerDatabaseDdl(d.Get("ddl"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ddl"); !isEmptyValue(resource_spanner_database_reflect.ValueOf(v)) && (ok || !resource_spanner_database_reflect.DeepEqual(v, extraStatementsProp)) { - obj["extraStatements"] = extraStatementsProp - } - - obj, err = resourceSpannerDatabaseUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}/ddl") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_spanner_database_schema.TimeoutUpdate)) - if err != nil { - return resource_spanner_database_fmt.Errorf("Error updating Database %q: %s", d.Id(), err) - } else { - resource_spanner_database_log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) - } - - err = spannerOperationWaitTime( - config, res, project, "Updating Database", userAgent, - d.Timeout(resource_spanner_database_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceSpannerDatabaseRead(d, meta) -} - -func resourceSpannerDatabaseDelete(d *resource_spanner_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - if d.Get("deletion_protection").(bool) { - return resource_spanner_database_fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") - } - resource_spanner_database_log.Printf("[DEBUG] Deleting Database %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_spanner_database_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Database") - } - - err = spannerOperationWaitTime( - config, res, project, "Deleting Database", userAgent, - d.Timeout(resource_spanner_database_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_spanner_database_log.Printf("[DEBUG] Finished deleting Database %q: %#v", d.Id(), res) - return nil -} - -func resourceSpannerDatabaseImport(d *resource_spanner_database_schema.ResourceData, meta interface{}) ([]*resource_spanner_database_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)", - "instances/(?P[^/]+)/databases/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return nil, resource_spanner_database_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("deletion_protection", true); err != nil { - return nil, resource_spanner_database_fmt.Errorf("Error setting deletion_protection: %s", err) - } - - return []*resource_spanner_database_schema.ResourceData{d}, nil -} - -func flattenSpannerDatabaseName(v interface{}, d *resource_spanner_database_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenSpannerDatabaseState(v interface{}, d *resource_spanner_database_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerDatabaseEncryptionConfig(v interface{}, d *resource_spanner_database_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenSpannerDatabaseEncryptionConfigKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} - -func flattenSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d *resource_spanner_database_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerDatabaseInstance(v interface{}, d *resource_spanner_database_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func expandSpannerDatabaseName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseDdl(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseEncryptionConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandSpannerDatabaseEncryptionConfigKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_spanner_database_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandSpannerDatabaseEncryptionConfigKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerDatabaseInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - f, err := parseGlobalFieldValue("instances", v.(string), "project", d, config, true) - if err != nil { - return nil, resource_spanner_database_fmt.Errorf("Invalid value for instance: %s", err) - } - return f.RelativeLink(), nil -} - -func resourceSpannerDatabaseEncoder(d *resource_spanner_database_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - obj["createStatement"] = resource_spanner_database_fmt.Sprintf("CREATE DATABASE `%s`", obj["name"]) - delete(obj, "name") - delete(obj, "instance") - return obj, nil -} - -func resourceSpannerDatabaseUpdateEncoder(d *resource_spanner_database_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - old, new := d.GetChange("ddl") - oldDdls := old.([]interface{}) - newDdls := new.([]interface{}) - updateDdls := []string{} - - for i := len(oldDdls); i < len(newDdls); i++ { - updateDdls = append(updateDdls, newDdls[i].(string)) - } - - obj["statements"] = updateDdls - delete(obj, "name") - delete(obj, "instance") - delete(obj, "extraStatements") - return obj, nil -} - -func resourceSpannerDatabaseDecoder(d *resource_spanner_database_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - d.SetId(res["name"].(string)) - if err := parseImportId([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - res["project"] = d.Get("project").(string) - res["instance"] = d.Get("instance").(string) - res["name"] = d.Get("name").(string) - id, err := replaceVars(d, config, "{{instance}}/{{name}}") - if err != nil { - return nil, err - } - d.SetId(id) - return res, nil -} - -func deleteSpannerBackups(d *resource_spanner_instance_schema.ResourceData, config *Config, res map[string]interface{}, userAgent string, billingProject string) error { - var v interface{} - var ok bool - - v, ok = res["backups"] - if !ok || v == nil { - return nil - } - - for _, itemRaw := range v.([]interface{}) { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - backupName := item["name"].(string) - - resource_spanner_instance_log.Printf("[DEBUG] Found backups for resource %q: %#v)", d.Id(), item) - - path := "{{SpannerBasePath}}" + backupName - - url, err := replaceVars(d, config, path) - if err != nil { - return err - } - - _, err = sendRequest(config, "DELETE", billingProject, url, userAgent, nil) - if err != nil { - return err - } - } - return nil -} - -func resourceSpannerInstance() *resource_spanner_instance_schema.Resource { - return &resource_spanner_instance_schema.Resource{ - Create: resourceSpannerInstanceCreate, - Read: resourceSpannerInstanceRead, - Update: resourceSpannerInstanceUpdate, - Delete: resourceSpannerInstanceDelete, - - Importer: &resource_spanner_instance_schema.ResourceImporter{ - State: resourceSpannerInstanceImport, - }, - - Timeouts: &resource_spanner_instance_schema.ResourceTimeout{ - Create: resource_spanner_instance_schema.DefaultTimeout(20 * resource_spanner_instance_time.Minute), - Update: resource_spanner_instance_schema.DefaultTimeout(20 * resource_spanner_instance_time.Minute), - Delete: resource_spanner_instance_schema.DefaultTimeout(20 * resource_spanner_instance_time.Minute), - }, - - Schema: map[string]*resource_spanner_instance_schema.Schema{ - "config": { - Type: resource_spanner_instance_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the instance's configuration (similar but not -quite the same as a region) which defines the geographic placement and -replication of your databases in this instance. It determines where your data -is stored. Values are typically of the form 'regional-europe-west1' , 'us-central' etc. -In order to obtain a valid list please consult the -[Configuration section of the docs](https://cloud.google.com/spanner/docs/instances).`, - }, - "display_name": { - Type: resource_spanner_instance_schema.TypeString, - Required: true, - Description: `The descriptive name for this instance as it appears in UIs. Must be -unique per project and between 4 and 30 characters in length.`, - }, - "name": { - Type: resource_spanner_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - ValidateFunc: validateRegexp(`^[a-z][-a-z0-9]*[a-z0-9]$`), - Description: `A unique identifier for the instance, which cannot be changed after -the instance is created. The name must be between 6 and 30 characters -in length. - - -If not provided, a random string starting with 'tf-' will be selected.`, - }, - "labels": { - Type: resource_spanner_instance_schema.TypeMap, - Optional: true, - Description: `An object containing a list of "key": value pairs. -Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.`, - Elem: &resource_spanner_instance_schema.Schema{Type: resource_spanner_instance_schema.TypeString}, - }, - "num_nodes": { - Type: resource_spanner_instance_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The number of nodes allocated to this instance. At most one of either node_count or processing_units -can be present in terraform.`, - ExactlyOneOf: []string{"num_nodes", "processing_units"}, - }, - "processing_units": { - Type: resource_spanner_instance_schema.TypeInt, - Computed: true, - Optional: true, - Description: `The number of processing units allocated to this instance. At most one of processing_units -or node_count can be present in terraform.`, - ExactlyOneOf: []string{"num_nodes", "processing_units"}, - }, - "state": { - Type: resource_spanner_instance_schema.TypeString, - Computed: true, - Description: `Instance status: 'CREATING' or 'READY'.`, - }, - "force_destroy": { - Type: resource_spanner_instance_schema.TypeBool, - Optional: true, - Default: false, - }, - "project": { - Type: resource_spanner_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSpannerInstanceCreate(d *resource_spanner_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSpannerInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(nameProp)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - configProp, err := expandSpannerInstanceConfig(d.Get("config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("config"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(configProp)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, configProp)) { - obj["config"] = configProp - } - displayNameProp, err := expandSpannerInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(displayNameProp)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - nodeCountProp, err := expandSpannerInstanceNumNodes(d.Get("num_nodes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("num_nodes"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(nodeCountProp)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, nodeCountProp)) { - obj["nodeCount"] = nodeCountProp - } - processingUnitsProp, err := expandSpannerInstanceProcessingUnits(d.Get("processing_units"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("processing_units"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(processingUnitsProp)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, processingUnitsProp)) { - obj["processingUnits"] = processingUnitsProp - } - labelsProp, err := expandSpannerInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(labelsProp)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - obj, err = resourceSpannerInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances") - if err != nil { - return err - } - - resource_spanner_instance_log.Printf("[DEBUG] Creating new Instance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_spanner_instance_schema.TimeoutCreate)) - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error creating Instance: %s", err) - } - - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = spannerOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Instance", userAgent, - d.Timeout(resource_spanner_instance_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_spanner_instance_fmt.Errorf("Error waiting to create Instance: %s", err) - } - - opRes, err = resourceSpannerInstanceDecoder(d, meta, opRes) - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return resource_spanner_instance_fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenSpannerInstanceName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_spanner_instance_time.Sleep(5 * resource_spanner_instance_time.Second) - - resource_spanner_instance_log.Printf("[DEBUG] Finished creating Instance %q: %#v", d.Id(), res) - - return resourceSpannerInstanceRead(d, meta) -} - -func resourceSpannerInstanceRead(d *resource_spanner_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_spanner_instance_fmt.Sprintf("SpannerInstance %q", d.Id())) - } - - res, err = resourceSpannerInstanceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_spanner_instance_log.Printf("[DEBUG] Removing SpannerInstance because it no longer exists.") - d.SetId("") - return nil - } - - if _, ok := d.GetOkExists("force_destroy"); !ok { - if err := d.Set("force_destroy", false); err != nil { - return resource_spanner_instance_fmt.Errorf("Error setting force_destroy: %s", err) - } - } - if err := d.Set("project", project); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - if err := d.Set("name", flattenSpannerInstanceName(res["name"], d, config)); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("config", flattenSpannerInstanceConfig(res["config"], d, config)); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("display_name", flattenSpannerInstanceDisplayName(res["displayName"], d, config)); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("num_nodes", flattenSpannerInstanceNumNodes(res["nodeCount"], d, config)); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("processing_units", flattenSpannerInstanceProcessingUnits(res["processingUnits"], d, config)); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("labels", flattenSpannerInstanceLabels(res["labels"], d, config)); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - if err := d.Set("state", flattenSpannerInstanceState(res["state"], d, config)); err != nil { - return resource_spanner_instance_fmt.Errorf("Error reading Instance: %s", err) - } - - return nil -} - -func resourceSpannerInstanceUpdate(d *resource_spanner_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandSpannerInstanceDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(v)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - nodeCountProp, err := expandSpannerInstanceNumNodes(d.Get("num_nodes"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("num_nodes"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(v)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, nodeCountProp)) { - obj["nodeCount"] = nodeCountProp - } - processingUnitsProp, err := expandSpannerInstanceProcessingUnits(d.Get("processing_units"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("processing_units"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(v)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, processingUnitsProp)) { - obj["processingUnits"] = processingUnitsProp - } - labelsProp, err := expandSpannerInstanceLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_spanner_instance_reflect.ValueOf(v)) && (ok || !resource_spanner_instance_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - obj, err = resourceSpannerInstanceUpdateEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - resource_spanner_instance_log.Printf("[DEBUG] Updating Instance %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_spanner_instance_schema.TimeoutUpdate)) - - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error updating Instance %q: %s", d.Id(), err) - } else { - resource_spanner_instance_log.Printf("[DEBUG] Finished updating Instance %q: %#v", d.Id(), res) - } - - err = spannerOperationWaitTime( - config, res, project, "Updating Instance", userAgent, - d.Timeout(resource_spanner_instance_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceSpannerInstanceRead(d, meta) -} - -func resourceSpannerInstanceDelete(d *resource_spanner_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_spanner_instance_fmt.Errorf("Error fetching project for Instance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - - if d.Get("force_destroy").(bool) { - backupsUrl, err := replaceVars(d, config, "{{SpannerBasePath}}projects/{{project}}/instances/{{name}}/backups") - if err != nil { - return err - } - - resp, err := sendRequest(config, "GET", billingProject, backupsUrl, userAgent, nil) - if err != nil { - - return handleNotFoundError(err, d, resource_spanner_instance_fmt.Sprintf("SpannerInstance %q", d.Id())) - } - - err = deleteSpannerBackups(d, config, resp, billingProject, userAgent) - if err != nil { - return err - } - } - resource_spanner_instance_log.Printf("[DEBUG] Deleting Instance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_spanner_instance_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Instance") - } - - resource_spanner_instance_log.Printf("[DEBUG] Finished deleting Instance %q: %#v", d.Id(), res) - return nil -} - -func resourceSpannerInstanceImport(d *resource_spanner_instance_schema.ResourceData, meta interface{}) ([]*resource_spanner_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return nil, resource_spanner_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - if err := d.Set("force_destroy", false); err != nil { - return nil, resource_spanner_instance_fmt.Errorf("Error setting force_destroy: %s", err) - } - - return []*resource_spanner_instance_schema.ResourceData{d}, nil -} - -func flattenSpannerInstanceName(v interface{}, d *resource_spanner_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenSpannerInstanceConfig(v interface{}, d *resource_spanner_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenSpannerInstanceDisplayName(v interface{}, d *resource_spanner_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerInstanceNumNodes(v interface{}, d *resource_spanner_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_spanner_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenSpannerInstanceProcessingUnits(v interface{}, d *resource_spanner_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_spanner_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenSpannerInstanceLabels(v interface{}, d *resource_spanner_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSpannerInstanceState(v interface{}, d *resource_spanner_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSpannerInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - r := resource_spanner_instance_regexp.MustCompile("projects/(.+)/instanceConfigs/(.+)") - if r.MatchString(v.(string)) { - return v.(string), nil - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - - return resource_spanner_instance_fmt.Sprintf("projects/%s/instanceConfigs/%s", project, v.(string)), nil -} - -func expandSpannerInstanceDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceNumNodes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceProcessingUnits(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSpannerInstanceLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceSpannerInstanceEncoder(d *resource_spanner_instance_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - - if obj["processingUnits"] == nil && obj["nodeCount"] == nil { - obj["nodeCount"] = 1 - } - newObj := make(map[string]interface{}) - newObj["instance"] = obj - if obj["name"] == nil { - if err := d.Set("name", resource_spanner_instance_resource.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { - return nil, resource_spanner_instance_fmt.Errorf("Error setting name: %s", err) - } - newObj["instanceId"] = d.Get("name").(string) - } else { - newObj["instanceId"] = obj["name"] - } - delete(obj, "name") - return newObj, nil -} - -func resourceSpannerInstanceUpdateEncoder(d *resource_spanner_instance_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - project, err := getProject(d, meta.(*Config)) - if err != nil { - return nil, err - } - obj["name"] = resource_spanner_instance_fmt.Sprintf("projects/%s/instances/%s", project, obj["name"]) - newObj := make(map[string]interface{}) - newObj["instance"] = obj - updateMask := make([]string, 0) - if d.HasChange("num_nodes") { - updateMask = append(updateMask, "nodeCount") - } - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - if d.HasChange("processing_units") { - updateMask = append(updateMask, "processingUnits") - } - newObj["fieldMask"] = resource_spanner_instance_strings.Join(updateMask, ",") - return newObj, nil -} - -func resourceSpannerInstanceDecoder(d *resource_spanner_instance_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - config := meta.(*Config) - d.SetId(res["name"].(string)) - if err := parseImportId([]string{"projects/(?P[^/]+)/instances/(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - res["project"] = d.Get("project").(string) - res["name"] = d.Get("name").(string) - id, err := replaceVars(d, config, "{{project}}/{{name}}") - if err != nil { - return nil, err - } - d.SetId(id) - return res, nil -} - -func resourceSQLDatabase() *resource_sql_database_schema.Resource { - return &resource_sql_database_schema.Resource{ - Create: resourceSQLDatabaseCreate, - Read: resourceSQLDatabaseRead, - Update: resourceSQLDatabaseUpdate, - Delete: resourceSQLDatabaseDelete, - - Importer: &resource_sql_database_schema.ResourceImporter{ - State: resourceSQLDatabaseImport, - }, - - Timeouts: &resource_sql_database_schema.ResourceTimeout{ - Create: resource_sql_database_schema.DefaultTimeout(15 * resource_sql_database_time.Minute), - Update: resource_sql_database_schema.DefaultTimeout(10 * resource_sql_database_time.Minute), - Delete: resource_sql_database_schema.DefaultTimeout(10 * resource_sql_database_time.Minute), - }, - - Schema: map[string]*resource_sql_database_schema.Schema{ - "instance": { - Type: resource_sql_database_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Cloud SQL instance. This does not include the project -ID.`, - }, - "name": { - Type: resource_sql_database_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the database in the Cloud SQL instance. -This does not include the project ID or instance name.`, - }, - "charset": { - Type: resource_sql_database_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: caseDiffSuppress, - Description: `The charset value. See MySQL's -[Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) -and Postgres' [Character Set Support](https://www.postgresql.org/docs/9.6/static/multibyte.html) -for more details and supported values. Postgres databases only support -a value of 'UTF8' at creation time.`, - }, - "collation": { - Type: resource_sql_database_schema.TypeString, - Computed: true, - Optional: true, - Description: `The collation value. See MySQL's -[Supported Character Sets and Collations](https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html) -and Postgres' [Collation Support](https://www.postgresql.org/docs/9.6/static/collation.html) -for more details and supported values. Postgres databases only support -a value of 'en_US.UTF8' at creation time.`, - }, - "project": { - Type: resource_sql_database_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "self_link": { - Type: resource_sql_database_schema.TypeString, - Computed: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSQLDatabaseCreate(d *resource_sql_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - charsetProp, err := expandSQLDatabaseCharset(d.Get("charset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("charset"); !isEmptyValue(resource_sql_database_reflect.ValueOf(charsetProp)) && (ok || !resource_sql_database_reflect.DeepEqual(v, charsetProp)) { - obj["charset"] = charsetProp - } - collationProp, err := expandSQLDatabaseCollation(d.Get("collation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collation"); !isEmptyValue(resource_sql_database_reflect.ValueOf(collationProp)) && (ok || !resource_sql_database_reflect.DeepEqual(v, collationProp)) { - obj["collation"] = collationProp - } - nameProp, err := expandSQLDatabaseName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_sql_database_reflect.ValueOf(nameProp)) && (ok || !resource_sql_database_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - instanceProp, err := expandSQLDatabaseInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(resource_sql_database_reflect.ValueOf(instanceProp)) && (ok || !resource_sql_database_reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - - lockName, err := replaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases") - if err != nil { - return err - } - - resource_sql_database_log.Printf("[DEBUG] Creating new Database: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sql_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_sql_database_schema.TimeoutCreate)) - if err != nil { - return resource_sql_database_fmt.Errorf("Error creating Database: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return resource_sql_database_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = sqlAdminOperationWaitTime( - config, res, project, "Creating Database", userAgent, - d.Timeout(resource_sql_database_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_sql_database_fmt.Errorf("Error waiting to create Database: %s", err) - } - - resource_sql_database_log.Printf("[DEBUG] Finished creating Database %q: %#v", d.Id(), res) - - return resourceSQLDatabaseRead(d, meta) -} - -func resourceSQLDatabaseRead(d *resource_sql_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sql_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(transformSQLDatabaseReadError(err), d, resource_sql_database_fmt.Sprintf("SQLDatabase %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_sql_database_fmt.Errorf("Error reading Database: %s", err) - } - - if err := d.Set("charset", flattenSQLDatabaseCharset(res["charset"], d, config)); err != nil { - return resource_sql_database_fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("collation", flattenSQLDatabaseCollation(res["collation"], d, config)); err != nil { - return resource_sql_database_fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("name", flattenSQLDatabaseName(res["name"], d, config)); err != nil { - return resource_sql_database_fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("instance", flattenSQLDatabaseInstance(res["instance"], d, config)); err != nil { - return resource_sql_database_fmt.Errorf("Error reading Database: %s", err) - } - if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { - return resource_sql_database_fmt.Errorf("Error reading Database: %s", err) - } - - return nil -} - -func resourceSQLDatabaseUpdate(d *resource_sql_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sql_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - charsetProp, err := expandSQLDatabaseCharset(d.Get("charset"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("charset"); !isEmptyValue(resource_sql_database_reflect.ValueOf(v)) && (ok || !resource_sql_database_reflect.DeepEqual(v, charsetProp)) { - obj["charset"] = charsetProp - } - collationProp, err := expandSQLDatabaseCollation(d.Get("collation"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("collation"); !isEmptyValue(resource_sql_database_reflect.ValueOf(v)) && (ok || !resource_sql_database_reflect.DeepEqual(v, collationProp)) { - obj["collation"] = collationProp - } - nameProp, err := expandSQLDatabaseName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_sql_database_reflect.ValueOf(v)) && (ok || !resource_sql_database_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - instanceProp, err := expandSQLDatabaseInstance(d.Get("instance"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("instance"); !isEmptyValue(resource_sql_database_reflect.ValueOf(v)) && (ok || !resource_sql_database_reflect.DeepEqual(v, instanceProp)) { - obj["instance"] = instanceProp - } - - lockName, err := replaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - resource_sql_database_log.Printf("[DEBUG] Updating Database %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_sql_database_schema.TimeoutUpdate)) - - if err != nil { - return resource_sql_database_fmt.Errorf("Error updating Database %q: %s", d.Id(), err) - } else { - resource_sql_database_log.Printf("[DEBUG] Finished updating Database %q: %#v", d.Id(), res) - } - - err = sqlAdminOperationWaitTime( - config, res, project, "Updating Database", userAgent, - d.Timeout(resource_sql_database_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceSQLDatabaseRead(d, meta) -} - -func resourceSQLDatabaseDelete(d *resource_sql_database_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sql_database_fmt.Errorf("Error fetching project for Database: %s", err) - } - billingProject = project - - lockName, err := replaceVars(d, config, "google-sql-database-instance-{{project}}-{{instance}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_sql_database_log.Printf("[DEBUG] Deleting Database %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_sql_database_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Database") - } - - err = sqlAdminOperationWaitTime( - config, res, project, "Deleting Database", userAgent, - d.Timeout(resource_sql_database_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_sql_database_log.Printf("[DEBUG] Finished deleting Database %q: %#v", d.Id(), res) - return nil -} - -func resourceSQLDatabaseImport(d *resource_sql_database_schema.ResourceData, meta interface{}) ([]*resource_sql_database_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)/databases/(?P[^/]+)", - "instances/(?P[^/]+)/databases/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{instance}}/databases/{{name}}") - if err != nil { - return nil, resource_sql_database_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_sql_database_schema.ResourceData{d}, nil -} - -func flattenSQLDatabaseCharset(v interface{}, d *resource_sql_database_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLDatabaseCollation(v interface{}, d *resource_sql_database_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLDatabaseName(v interface{}, d *resource_sql_database_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLDatabaseInstance(v interface{}, d *resource_sql_database_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandSQLDatabaseCharset(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLDatabaseCollation(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLDatabaseName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLDatabaseInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -const privateNetworkLinkRegex = "projects/(" + ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" - -var sqlDatabaseAuthorizedNetWorkSchemaElem *resource_sql_database_instance_schema.Resource = &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "expiration_time": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - }, - "name": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - }, - "value": { - Type: resource_sql_database_instance_schema.TypeString, - Required: true, - }, - }, -} - -var ( - backupConfigurationKeys = []string{ - "settings.0.backup_configuration.0.binary_log_enabled", - "settings.0.backup_configuration.0.enabled", - "settings.0.backup_configuration.0.start_time", - "settings.0.backup_configuration.0.location", - "settings.0.backup_configuration.0.point_in_time_recovery_enabled", - "settings.0.backup_configuration.0.backup_retention_settings", - "settings.0.backup_configuration.0.transaction_log_retention_days", - } - - ipConfigurationKeys = []string{ - "settings.0.ip_configuration.0.authorized_networks", - "settings.0.ip_configuration.0.ipv4_enabled", - "settings.0.ip_configuration.0.require_ssl", - "settings.0.ip_configuration.0.private_network", - } - - maintenanceWindowKeys = []string{ - "settings.0.maintenance_window.0.day", - "settings.0.maintenance_window.0.hour", - "settings.0.maintenance_window.0.update_track", - } - - replicaConfigurationKeys = []string{ - "replica_configuration.0.ca_certificate", - "replica_configuration.0.client_certificate", - "replica_configuration.0.client_key", - "replica_configuration.0.connect_retry_interval", - "replica_configuration.0.dump_file_path", - "replica_configuration.0.failover_target", - "replica_configuration.0.master_heartbeat_period", - "replica_configuration.0.password", - "replica_configuration.0.ssl_cipher", - "replica_configuration.0.username", - "replica_configuration.0.verify_server_certificate", - } - - insightsConfigKeys = []string{ - "settings.0.insights_config.0.query_insights_enabled", - "settings.0.insights_config.0.query_string_length", - "settings.0.insights_config.0.record_application_tags", - "settings.0.insights_config.0.record_client_address", - } -) - -func resourceSqlDatabaseInstance() *resource_sql_database_instance_schema.Resource { - return &resource_sql_database_instance_schema.Resource{ - Create: resourceSqlDatabaseInstanceCreate, - Read: resourceSqlDatabaseInstanceRead, - Update: resourceSqlDatabaseInstanceUpdate, - Delete: resourceSqlDatabaseInstanceDelete, - Importer: &resource_sql_database_instance_schema.ResourceImporter{ - State: resourceSqlDatabaseInstanceImport, - }, - - Timeouts: &resource_sql_database_instance_schema.ResourceTimeout{ - Create: resource_sql_database_instance_schema.DefaultTimeout(30 * resource_sql_database_instance_time.Minute), - Update: resource_sql_database_instance_schema.DefaultTimeout(30 * resource_sql_database_instance_time.Minute), - Delete: resource_sql_database_instance_schema.DefaultTimeout(30 * resource_sql_database_instance_time.Minute), - }, - - CustomizeDiff: resource_sql_database_instance_customdiff.All( - resource_sql_database_instance_customdiff.ForceNewIfChange("settings.0.disk_size", isDiskShrinkage), - privateNetworkCustomizeDiff, - pitrPostgresOnlyCustomizeDiff, - insightsPostgresOnlyCustomizeDiff, - ), - - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "region": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The region the instance will sit in. Note, Cloud SQL is not available in all regions. A valid region must be provided to use this resource. If a region is not provided in the resource definition, the provider region will be used instead, but this will be an apply-time error for instances if the provider region is not supported with Cloud SQL. If you choose not to provide the region argument for this resource, make sure you understand this.`, - }, - "deletion_protection": { - Type: resource_sql_database_instance_schema.TypeBool, - Default: true, - Optional: true, - Description: `Used to block Terraform from deleting a SQL Instance.`, - }, - "settings": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - Computed: true, - AtLeastOneOf: []string{"settings", "clone"}, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "version": { - Type: resource_sql_database_instance_schema.TypeInt, - Computed: true, - Description: `Used to make sure changes to the settings block are atomic.`, - }, - "tier": { - Type: resource_sql_database_instance_schema.TypeString, - Required: true, - Description: `The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine types, and custom machine types such as db-custom-2-13312. See the Custom Machine Type Documentation to learn about specifying custom machine types.`, - }, - "activation_policy": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Default: "ALWAYS", - Description: `This specifies when the instance should be active. Can be either ALWAYS, NEVER or ON_DEMAND.`, - }, - "availability_type": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Default: "ZONAL", - ValidateFunc: resource_sql_database_instance_validation.StringInSlice([]string{"REGIONAL", "ZONAL"}, false), - Description: `The availability type of the Cloud SQL instance, high availability -(REGIONAL) or single zone (ZONAL). For MySQL instances, ensure that -settings.backup_configuration.enabled and -settings.backup_configuration.binary_log_enabled are both set to true.`, - }, - "backup_configuration": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "binary_log_enabled": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: backupConfigurationKeys, - Description: `True if binary logging is enabled. If settings.backup_configuration.enabled is false, this must be as well. Cannot be used with Postgres.`, - }, - "enabled": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: backupConfigurationKeys, - Description: `True if backup configuration is enabled.`, - }, - "start_time": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - - Computed: true, - AtLeastOneOf: backupConfigurationKeys, - Description: `HH:MM format time indicating when backup configuration starts.`, - }, - "location": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: backupConfigurationKeys, - Description: `Location of the backup configuration.`, - }, - "point_in_time_recovery_enabled": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: backupConfigurationKeys, - Description: `True if Point-in-time recovery is enabled.`, - }, - "transaction_log_retention_days": { - Type: resource_sql_database_instance_schema.TypeInt, - Computed: true, - Optional: true, - AtLeastOneOf: backupConfigurationKeys, - Description: `The number of days of transaction logs we retain for point in time restore, from 1-7.`, - }, - "backup_retention_settings": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - AtLeastOneOf: backupConfigurationKeys, - Computed: true, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "retained_backups": { - Type: resource_sql_database_instance_schema.TypeInt, - Required: true, - Description: `Number of backups to retain.`, - }, - "retention_unit": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Default: "COUNT", - Description: `The unit that 'retainedBackups' represents. Defaults to COUNT`, - }, - }, - }, - }, - }, - }, - }, - "collation": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Description: `The name of server instance collation.`, - }, - "database_flags": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "value": { - Type: resource_sql_database_instance_schema.TypeString, - Required: true, - Description: `Value of the flag.`, - }, - "name": { - Type: resource_sql_database_instance_schema.TypeString, - Required: true, - Description: `Name of the flag.`, - }, - }, - }, - }, - "disk_autoresize": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - Default: true, - Description: `Configuration to increase storage size automatically. Note that future terraform apply calls will attempt to resize the disk to the value specified in disk_size - if this is set, do not set disk_size.`, - }, - "disk_autoresize_limit": { - Type: resource_sql_database_instance_schema.TypeInt, - Optional: true, - Default: 0, - Description: `The maximum size, in GB, to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit.`, - }, - "disk_size": { - Type: resource_sql_database_instance_schema.TypeInt, - Optional: true, - - Computed: true, - Description: `The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased.`, - }, - "disk_type": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Default: "PD_SSD", - Description: `The type of data disk: PD_SSD or PD_HDD.`, - }, - "ip_configuration": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "authorized_networks": { - Type: resource_sql_database_instance_schema.TypeSet, - Optional: true, - Set: resource_sql_database_instance_schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), - Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, - AtLeastOneOf: ipConfigurationKeys, - }, - "ipv4_enabled": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - Default: true, - AtLeastOneOf: ipConfigurationKeys, - Description: `Whether this Cloud SQL instance should be assigned a public IPV4 address. At least ipv4_enabled must be enabled or a private_network must be configured.`, - }, - "require_ssl": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: ipConfigurationKeys, - }, - "private_network": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ValidateFunc: orEmpty(validateRegexp(privateNetworkLinkRegex)), - DiffSuppressFunc: compareSelfLinkRelativePaths, - AtLeastOneOf: ipConfigurationKeys, - Description: `The VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. Specifying a network enables private IP. At least ipv4_enabled must be enabled or a private_network must be configured. This setting can be updated, but it cannot be removed after it is set.`, - }, - }, - }, - }, - "location_preference": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - MaxItems: 1, - Computed: true, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "follow_gae_application": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, - Description: `A Google App Engine application whose zone to remain in. Must be in the same region as this instance.`, - }, - "zone": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, - Description: `The preferred compute engine zone.`, - }, - }, - }, - }, - "maintenance_window": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "day": { - Type: resource_sql_database_instance_schema.TypeInt, - Optional: true, - ValidateFunc: resource_sql_database_instance_validation.IntBetween(1, 7), - AtLeastOneOf: maintenanceWindowKeys, - Description: `Day of week (1-7), starting on Monday`, - }, - "hour": { - Type: resource_sql_database_instance_schema.TypeInt, - Optional: true, - ValidateFunc: resource_sql_database_instance_validation.IntBetween(0, 23), - AtLeastOneOf: maintenanceWindowKeys, - Description: `Hour of day (0-23), ignored if day not set`, - }, - "update_track": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - AtLeastOneOf: maintenanceWindowKeys, - Description: `Receive updates earlier (canary) or later (stable)`, - }, - }, - }, - Description: `Declares a one-hour maintenance window when an Instance can automatically restart to apply updates. The maintenance window is specified in UTC time.`, - }, - "pricing_plan": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Default: "PER_USE", - Description: `Pricing plan for this instance, can only be PER_USE.`, - }, - "user_labels": { - Type: resource_sql_database_instance_schema.TypeMap, - Optional: true, - Computed: true, - Elem: &resource_sql_database_instance_schema.Schema{Type: resource_sql_database_instance_schema.TypeString}, - Description: `A set of key/value user label pairs to assign to the instance.`, - }, - "insights_config": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "query_insights_enabled": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: insightsConfigKeys, - Description: `True if Query Insights feature is enabled.`, - }, - "query_string_length": { - Type: resource_sql_database_instance_schema.TypeInt, - Optional: true, - Default: 1024, - ValidateFunc: resource_sql_database_instance_validation.IntBetween(256, 4500), - AtLeastOneOf: insightsConfigKeys, - Description: `Maximum query length stored in bytes. Between 256 and 4500. Default to 1024.`, - }, - "record_application_tags": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: insightsConfigKeys, - Description: `True if Query Insights will record application tags from query when enabled.`, - }, - "record_client_address": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - AtLeastOneOf: insightsConfigKeys, - Description: `True if Query Insights will record client address when enabled.`, - }, - }, - }, - Description: `Configuration of Query Insights.`, - }, - }, - }, - Description: `The settings to use for the database. The configuration is detailed below.`, - }, - - "connection_name": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `The connection name of the instance to be used in connection strings. For example, when connecting with Cloud SQL Proxy.`, - }, - - "database_version": { - Type: resource_sql_database_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.`, - }, - - "root_password": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - Description: `Initial root password. Required for MS SQL Server, ignored by MySQL and PostgreSQL.`, - }, - - "ip_address": { - Type: resource_sql_database_instance_schema.TypeList, - Computed: true, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "ip_address": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - }, - "type": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - }, - "time_to_retire": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - }, - }, - }, - }, - - "first_ip_address": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `The first IPv4 address of any type assigned. This is to support accessing the first address in the list in a terraform output when the resource is configured with a count.`, - }, - - "public_ip_address": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, - }, - - "private_ip_address": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, - }, - - "name": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The name of the instance. If the name is left blank, Terraform will randomly generate one when the instance is first created. This is done because after a name is used, it cannot be reused for up to one week.`, - }, - - "master_instance_name": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The name of the instance that will act as the master in the replication setup. Note, this requires the master to have binary_log_enabled set, as well as existing backups.`, - }, - - "project": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "replica_configuration": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - MaxItems: 1, - - Computed: true, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "ca_certificate": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `PEM representation of the trusted CA's x509 certificate.`, - }, - "client_certificate": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `PEM representation of the replica's x509 certificate.`, - }, - "client_key": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `PEM representation of the replica's private key. The corresponding public key in encoded in the client_certificate.`, - }, - "connect_retry_interval": { - Type: resource_sql_database_instance_schema.TypeInt, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `The number of seconds between connect retries.`, - }, - "dump_file_path": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `Path to a SQL file in Google Cloud Storage from which replica instances are created. Format is gs://bucket/filename.`, - }, - "failover_target": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. If the master instance fails, the replica instance will be promoted as the new master instance.`, - }, - "master_heartbeat_period": { - Type: resource_sql_database_instance_schema.TypeInt, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `Time in ms between replication heartbeats.`, - }, - "password": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `Password for the replication connection.`, - }, - "ssl_cipher": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `Permissible ciphers for use in SSL encryption.`, - }, - "username": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `Username for replication connection.`, - }, - "verify_server_certificate": { - Type: resource_sql_database_instance_schema.TypeBool, - Optional: true, - ForceNew: true, - AtLeastOneOf: replicaConfigurationKeys, - Description: `True if the master's common name value is checked during the SSL handshake.`, - }, - }, - }, - Description: `The configuration for replication.`, - }, - "server_ca_cert": { - Type: resource_sql_database_instance_schema.TypeList, - Computed: true, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "cert": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `The CA Certificate used to connect to the SQL Instance via SSL.`, - }, - "common_name": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `The CN valid for the CA Cert.`, - }, - "create_time": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `Creation time of the CA Cert.`, - }, - "expiration_time": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `Expiration time of the CA Cert.`, - }, - "sha1_fingerprint": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `SHA Fingerprint of the CA Cert.`, - }, - }, - }, - }, - "service_account_email_address": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `The service account email address assigned to the instance.`, - }, - "self_link": { - Type: resource_sql_database_instance_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - "restore_backup_context": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "backup_run_id": { - Type: resource_sql_database_instance_schema.TypeInt, - Required: true, - Description: `The ID of the backup run to restore from.`, - }, - "instance_id": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Description: `The ID of the instance that the backup was taken from.`, - }, - "project": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - Description: `The full project ID of the source instance.`, - }, - }, - }, - }, - "clone": { - Type: resource_sql_database_instance_schema.TypeList, - Optional: true, - Computed: false, - AtLeastOneOf: []string{"settings", "clone"}, - Description: `Configuration for creating a new instance as a clone of another instance.`, - MaxItems: 1, - Elem: &resource_sql_database_instance_schema.Resource{ - Schema: map[string]*resource_sql_database_instance_schema.Schema{ - "source_instance_name": { - Type: resource_sql_database_instance_schema.TypeString, - Required: true, - Description: `The name of the instance from which the point in time should be restored.`, - }, - "point_in_time": { - Type: resource_sql_database_instance_schema.TypeString, - Optional: true, - DiffSuppressFunc: timestampDiffSuppress(resource_sql_database_instance_time.RFC3339Nano), - Description: `The timestamp of the point in time that should be restored.`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func privateNetworkCustomizeDiff(_ resource_sql_database_instance_context.Context, d *resource_sql_database_instance_schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange("settings.0.ip_configuration.0.private_network") - - if old != "" && new == "" { - if err := d.ForceNew("settings.0.ip_configuration.0.private_network"); err != nil { - return err - } - } - - return nil -} - -func pitrPostgresOnlyCustomizeDiff(_ resource_sql_database_instance_context.Context, diff *resource_sql_database_instance_schema.ResourceDiff, v interface{}) error { - pitr := diff.Get("settings.0.backup_configuration.0.point_in_time_recovery_enabled").(bool) - dbVersion := diff.Get("database_version").(string) - if pitr && !resource_sql_database_instance_strings.Contains(dbVersion, "POSTGRES") { - return resource_sql_database_instance_fmt.Errorf("point_in_time_recovery_enabled is only available for Postgres. You may want to consider using binary_log_enabled instead.") - } - return nil -} - -func insightsPostgresOnlyCustomizeDiff(_ resource_sql_database_instance_context.Context, diff *resource_sql_database_instance_schema.ResourceDiff, v interface{}) error { - insights := diff.Get("settings.0.insights_config.0.query_insights_enabled").(bool) - dbVersion := diff.Get("database_version").(string) - if insights && !resource_sql_database_instance_strings.Contains(dbVersion, "POSTGRES") { - return resource_sql_database_instance_fmt.Errorf("query_insights_enabled is only available for Postgres now.") - } - return nil -} - -func resourceSqlDatabaseInstanceCreate(d *resource_sql_database_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - region, err := getRegion(d, config) - if err != nil { - return err - } - - var name string - if v, ok := d.GetOk("name"); ok { - name = v.(string) - } else { - name = resource_sql_database_instance_resource.UniqueId() - } - - if err := d.Set("name", name); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting name: %s", err) - } - - network := d.Get("settings.0.ip_configuration.0.private_network").(string) - if network != "" { - err = sqlDatabaseInstanceServiceNetworkPrecheck(d, config, userAgent, network) - if err != nil { - return err - } - } - - instance := &resource_sql_database_instance_sqladminsqladmin.DatabaseInstance{ - Name: name, - Region: region, - DatabaseVersion: d.Get("database_version").(string), - MasterInstanceName: d.Get("master_instance_name").(string), - ReplicaConfiguration: expandReplicaConfiguration(d.Get("replica_configuration").([]interface{})), - } - - cloneContext, cloneSource := expandCloneContext(d.Get("clone").([]interface{})) - - s, ok := d.GetOk("settings") - desiredSettings := expandSqlDatabaseInstanceSettings(s.([]interface{})) - if ok { - instance.Settings = desiredSettings - } - - if resource_sql_database_instance_strings.Contains(instance.DatabaseVersion, "SQLSERVER") { - instance.RootPassword = d.Get("root_password").(string) - } - - if !sqlDatabaseIsMaster(d) { - mutexKV.Lock(instanceMutexKey(project, instance.MasterInstanceName)) - defer mutexKV.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) - } - - var patchData *resource_sql_database_instance_sqladminsqladmin.DatabaseInstance - - if instance.MasterInstanceName != "" && instance.Settings != nil && instance.Settings.BackupConfiguration != nil { - bc := instance.Settings.BackupConfiguration - instance.Settings.BackupConfiguration = nil - if bc.BinaryLogEnabled { - patchData = &resource_sql_database_instance_sqladminsqladmin.DatabaseInstance{Settings: &resource_sql_database_instance_sqladminsqladmin.Settings{BackupConfiguration: bc}} - } - } - - var op *resource_sql_database_instance_sqladminsqladmin.Operation - err = retryTimeDuration(func() (operr error) { - if cloneContext != nil { - cloneContext.DestinationInstanceName = name - clodeReq := resource_sql_database_instance_sqladminsqladmin.InstancesCloneRequest{CloneContext: cloneContext} - op, operr = config.NewSqlAdminClient(userAgent).Instances.Clone(project, cloneSource, &clodeReq).Do() - } else { - op, operr = config.NewSqlAdminClient(userAgent).Instances.Insert(project, instance).Do() - } - return operr - }, d.Timeout(resource_sql_database_instance_schema.TimeoutCreate), isSqlOperationInProgressError) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = sqlAdminOperationWaitTime(config, op, project, "Create Instance", userAgent, d.Timeout(resource_sql_database_instance_schema.TimeoutCreate)) - if err != nil { - d.SetId("") - return err - } - - if patchData != nil { - err = retryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, instance.Name, patchData).Do() - return rerr - }, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate), isSqlOperationInProgressError) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) - } - err = sqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - err = resourceSqlDatabaseInstanceRead(d, meta) - if err != nil { - return err - } - - s = d.Get("settings") - - if len(s.([]interface{})) != 0 && cloneContext != nil && desiredSettings != nil { - instanceUpdate := &resource_sql_database_instance_sqladminsqladmin.DatabaseInstance{ - Settings: desiredSettings, - } - _settings := s.([]interface{})[0].(map[string]interface{}) - instanceUpdate.Settings.SettingsVersion = int64(_settings["version"].(int)) - var op *resource_sql_database_instance_sqladminsqladmin.Operation - err = retryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, name, instanceUpdate).Do() - return rerr - }, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate), isSqlOperationInProgressError) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) - } - - err = sqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate)) - if err != nil { - return err - } - - err = resourceSqlDatabaseInstanceRead(d, meta) - if err != nil { - return err - } - } - - if sqlDatabaseIsMaster(d) { - var users *resource_sql_database_instance_sqladminsqladmin.UsersListResponse - err = retryTimeDuration(func() error { - users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance.Name).Do() - return err - }, d.Timeout(resource_sql_database_instance_schema.TimeoutRead), isSqlOperationInProgressError) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) - } - for _, u := range users.Items { - if u.Name == "root" && u.Host == "%" { - err = retry(func() error { - op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance.Name).Host(u.Host).Name(u.Name).Do() - if err == nil { - err = sqlAdminOperationWaitTime(config, op, project, "Delete default root User", userAgent, d.Timeout(resource_sql_database_instance_schema.TimeoutCreate)) - } - return err - }) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, failed to delete default 'root'@'*' user, but the database was created successfully: %s", err) - } - } - } - } - - if r, ok := d.GetOk("restore_backup_context"); ok { - err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, name, r) - if err != nil { - return err - } - } - - return nil -} - -func expandSqlDatabaseInstanceSettings(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.Settings { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - _settings := configured[0].(map[string]interface{}) - settings := &resource_sql_database_instance_sqladminsqladmin.Settings{ - - SettingsVersion: int64(_settings["version"].(int)), - Tier: _settings["tier"].(string), - ForceSendFields: []string{"StorageAutoResize"}, - ActivationPolicy: _settings["activation_policy"].(string), - AvailabilityType: _settings["availability_type"].(string), - Collation: _settings["collation"].(string), - DataDiskSizeGb: int64(_settings["disk_size"].(int)), - DataDiskType: _settings["disk_type"].(string), - PricingPlan: _settings["pricing_plan"].(string), - UserLabels: convertStringMap(_settings["user_labels"].(map[string]interface{})), - BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), - DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].([]interface{})), - IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{})), - LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), - MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), - InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), - } - - resize := _settings["disk_autoresize"].(bool) - settings.StorageAutoResize = &resize - settings.StorageAutoResizeLimit = int64(_settings["disk_autoresize_limit"].(int)) - - return settings -} - -func expandReplicaConfiguration(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.ReplicaConfiguration { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - _replicaConfiguration := configured[0].(map[string]interface{}) - return &resource_sql_database_instance_sqladminsqladmin.ReplicaConfiguration{ - FailoverTarget: _replicaConfiguration["failover_target"].(bool), - - MysqlReplicaConfiguration: &resource_sql_database_instance_sqladminsqladmin.MySqlReplicaConfiguration{ - CaCertificate: _replicaConfiguration["ca_certificate"].(string), - ClientCertificate: _replicaConfiguration["client_certificate"].(string), - ClientKey: _replicaConfiguration["client_key"].(string), - ConnectRetryInterval: int64(_replicaConfiguration["connect_retry_interval"].(int)), - DumpFilePath: _replicaConfiguration["dump_file_path"].(string), - MasterHeartbeatPeriod: int64(_replicaConfiguration["master_heartbeat_period"].(int)), - Password: _replicaConfiguration["password"].(string), - SslCipher: _replicaConfiguration["ssl_cipher"].(string), - Username: _replicaConfiguration["username"].(string), - VerifyServerCertificate: _replicaConfiguration["verify_server_certificate"].(bool), - }, - } -} - -func expandCloneContext(configured []interface{}) (*resource_sql_database_instance_sqladminsqladmin.CloneContext, string) { - if len(configured) == 0 || configured[0] == nil { - return nil, "" - } - - _cloneConfiguration := configured[0].(map[string]interface{}) - - return &resource_sql_database_instance_sqladminsqladmin.CloneContext{ - PointInTime: _cloneConfiguration["point_in_time"].(string), - }, _cloneConfiguration["source_instance_name"].(string) -} - -func expandMaintenanceWindow(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.MaintenanceWindow { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - window := configured[0].(map[string]interface{}) - return &resource_sql_database_instance_sqladminsqladmin.MaintenanceWindow{ - Day: int64(window["day"].(int)), - Hour: int64(window["hour"].(int)), - UpdateTrack: window["update_track"].(string), - ForceSendFields: []string{"Hour"}, - } -} - -func expandLocationPreference(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.LocationPreference { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - _locationPreference := configured[0].(map[string]interface{}) - return &resource_sql_database_instance_sqladminsqladmin.LocationPreference{ - FollowGaeApplication: _locationPreference["follow_gae_application"].(string), - Zone: _locationPreference["zone"].(string), - } -} - -func expandIpConfiguration(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.IpConfiguration { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - _ipConfiguration := configured[0].(map[string]interface{}) - - return &resource_sql_database_instance_sqladminsqladmin.IpConfiguration{ - Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool), - RequireSsl: _ipConfiguration["require_ssl"].(bool), - PrivateNetwork: _ipConfiguration["private_network"].(string), - AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*resource_sql_database_instance_schema.Set).List()), - ForceSendFields: []string{"Ipv4Enabled", "RequireSsl"}, - } -} - -func expandAuthorizedNetworks(configured []interface{}) []*resource_sql_database_instance_sqladminsqladmin.AclEntry { - an := make([]*resource_sql_database_instance_sqladminsqladmin.AclEntry, 0, len(configured)) - for _, _acl := range configured { - _entry := _acl.(map[string]interface{}) - an = append(an, &resource_sql_database_instance_sqladminsqladmin.AclEntry{ - ExpirationTime: _entry["expiration_time"].(string), - Name: _entry["name"].(string), - Value: _entry["value"].(string), - }) - } - - return an -} - -func expandDatabaseFlags(configured []interface{}) []*resource_sql_database_instance_sqladminsqladmin.DatabaseFlags { - databaseFlags := make([]*resource_sql_database_instance_sqladminsqladmin.DatabaseFlags, 0, len(configured)) - for _, _flag := range configured { - _entry := _flag.(map[string]interface{}) - - databaseFlags = append(databaseFlags, &resource_sql_database_instance_sqladminsqladmin.DatabaseFlags{ - Name: _entry["name"].(string), - Value: _entry["value"].(string), - }) - } - return databaseFlags -} - -func expandBackupConfiguration(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.BackupConfiguration { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - _backupConfiguration := configured[0].(map[string]interface{}) - return &resource_sql_database_instance_sqladminsqladmin.BackupConfiguration{ - BinaryLogEnabled: _backupConfiguration["binary_log_enabled"].(bool), - BackupRetentionSettings: expandBackupRetentionSettings(_backupConfiguration["backup_retention_settings"]), - Enabled: _backupConfiguration["enabled"].(bool), - StartTime: _backupConfiguration["start_time"].(string), - Location: _backupConfiguration["location"].(string), - TransactionLogRetentionDays: int64(_backupConfiguration["transaction_log_retention_days"].(int)), - PointInTimeRecoveryEnabled: _backupConfiguration["point_in_time_recovery_enabled"].(bool), - ForceSendFields: []string{"BinaryLogEnabled", "Enabled", "PointInTimeRecoveryEnabled"}, - } -} - -func expandBackupRetentionSettings(configured interface{}) *resource_sql_database_instance_sqladminsqladmin.BackupRetentionSettings { - l := configured.([]interface{}) - if len(l) == 0 { - return nil - } - config := l[0].(map[string]interface{}) - return &resource_sql_database_instance_sqladminsqladmin.BackupRetentionSettings{ - RetainedBackups: int64(config["retained_backups"].(int)), - RetentionUnit: config["retention_unit"].(string), - } -} - -func expandInsightsConfig(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.InsightsConfig { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - _insightsConfig := configured[0].(map[string]interface{}) - return &resource_sql_database_instance_sqladminsqladmin.InsightsConfig{ - QueryInsightsEnabled: _insightsConfig["query_insights_enabled"].(bool), - QueryStringLength: int64(_insightsConfig["query_string_length"].(int)), - RecordApplicationTags: _insightsConfig["record_application_tags"].(bool), - RecordClientAddress: _insightsConfig["record_client_address"].(bool), - } -} - -func resourceSqlDatabaseInstanceRead(d *resource_sql_database_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - var instance *resource_sql_database_instance_sqladminsqladmin.DatabaseInstance - err = retryTimeDuration(func() (rerr error) { - instance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, d.Get("name").(string)).Do() - return rerr - }, d.Timeout(resource_sql_database_instance_schema.TimeoutRead), isSqlOperationInProgressError) - if err != nil { - return handleNotFoundError(err, d, resource_sql_database_instance_fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) - } - - if err := d.Set("name", instance.Name); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("region", instance.Region); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting region: %s", err) - } - if err := d.Set("database_version", instance.DatabaseVersion); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting database_version: %s", err) - } - if err := d.Set("connection_name", instance.ConnectionName); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting connection_name: %s", err) - } - if err := d.Set("service_account_email_address", instance.ServiceAccountEmailAddress); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting service_account_email_address: %s", err) - } - - if err := d.Set("settings", flattenSettings(instance.Settings)); err != nil { - resource_sql_database_instance_log.Printf("[WARN] Failed to set SQL Database Instance Settings") - } - - if err := d.Set("replica_configuration", flattenReplicaConfiguration(instance.ReplicaConfiguration, d)); err != nil { - resource_sql_database_instance_log.Printf("[WARN] Failed to set SQL Database Instance Replica Configuration") - } - ipAddresses := flattenIpAddresses(instance.IpAddresses) - if err := d.Set("ip_address", ipAddresses); err != nil { - resource_sql_database_instance_log.Printf("[WARN] Failed to set SQL Database Instance IP Addresses") - } - - if len(ipAddresses) > 0 { - if err := d.Set("first_ip_address", ipAddresses[0]["ip_address"]); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting first_ip_address: %s", err) - } - } - - publicIpAddress := "" - privateIpAddress := "" - for _, ip := range instance.IpAddresses { - if publicIpAddress == "" && ip.Type == "PRIMARY" { - publicIpAddress = ip.IpAddress - } - - if privateIpAddress == "" && ip.Type == "PRIVATE" { - privateIpAddress = ip.IpAddress - } - } - - if err := d.Set("public_ip_address", publicIpAddress); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting public_ip_address: %s", err) - } - if err := d.Set("private_ip_address", privateIpAddress); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting private_ip_address: %s", err) - } - - if err := d.Set("server_ca_cert", flattenServerCaCerts([]*resource_sql_database_instance_sqladminsqladmin.SslCert{instance.ServerCaCert})); err != nil { - resource_sql_database_instance_log.Printf("[WARN] Failed to set SQL Database CA Certificate") - } - - if err := d.Set("master_instance_name", resource_sql_database_instance_strings.TrimPrefix(instance.MasterInstanceName, project+":")); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting master_instance_name: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("self_link", instance.SelfLink); err != nil { - return resource_sql_database_instance_fmt.Errorf("Error setting self_link: %s", err) - } - d.SetId(instance.Name) - - return nil -} - -func resourceSqlDatabaseInstanceUpdate(d *resource_sql_database_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - instance := &resource_sql_database_instance_sqladminsqladmin.DatabaseInstance{ - Settings: expandSqlDatabaseInstanceSettings(d.Get("settings").([]interface{})), - } - - if v, ok := d.GetOk("master_instance_name"); ok { - mutexKV.Lock(instanceMutexKey(project, v.(string))) - defer mutexKV.Unlock(instanceMutexKey(project, v.(string))) - } - - var op *resource_sql_database_instance_sqladminsqladmin.Operation - err = retryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, d.Get("name").(string), instance).Do() - return rerr - }, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate), isSqlOperationInProgressError) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) - } - - err = sqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate)) - if err != nil { - return err - } - - if r, ok := d.GetOk("restore_backup_context"); ok { - if d.HasChange("restore_backup_context") { - err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, d.Get("name").(string), r) - if err != nil { - return err - } - } - } - - return resourceSqlDatabaseInstanceRead(d, meta) -} - -func resourceSqlDatabaseInstanceDelete(d *resource_sql_database_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - if d.Get("deletion_protection").(bool) { - return resource_sql_database_instance_fmt.Errorf("Error, failed to delete instance because deletion_protection is set to true. Set it to false to proceed with instance deletion") - } - - if v, ok := d.GetOk("master_instance_name"); ok { - mutexKV.Lock(instanceMutexKey(project, v.(string))) - defer mutexKV.Unlock(instanceMutexKey(project, v.(string))) - } - - var op *resource_sql_database_instance_sqladminsqladmin.Operation - err = retryTimeDuration(func() (rerr error) { - op, rerr = config.NewSqlAdminClient(userAgent).Instances.Delete(project, d.Get("name").(string)).Do() - if rerr != nil { - return rerr - } - err = sqlAdminOperationWaitTime(config, op, project, "Delete Instance", userAgent, d.Timeout(resource_sql_database_instance_schema.TimeoutDelete)) - if err != nil { - return err - } - return nil - }, d.Timeout(resource_sql_database_instance_schema.TimeoutDelete), isSqlOperationInProgressError, isSqlInternalError) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) - } - return nil -} - -func resourceSqlDatabaseInstanceImport(d *resource_sql_database_instance_schema.ResourceData, meta interface{}) ([]*resource_sql_database_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)"}, d, config); err != nil { - return nil, err - } - - if err := d.Set("deletion_protection", true); err != nil { - return nil, resource_sql_database_instance_fmt.Errorf("Error setting deletion_protection: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return nil, resource_sql_database_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_sql_database_instance_schema.ResourceData{d}, nil -} - -func flattenSettings(settings *resource_sql_database_instance_sqladminsqladmin.Settings) []map[string]interface{} { - data := map[string]interface{}{ - "version": settings.SettingsVersion, - "tier": settings.Tier, - "activation_policy": settings.ActivationPolicy, - "availability_type": settings.AvailabilityType, - "collation": settings.Collation, - "disk_type": settings.DataDiskType, - "disk_size": settings.DataDiskSizeGb, - "pricing_plan": settings.PricingPlan, - "user_labels": settings.UserLabels, - } - - if settings.BackupConfiguration != nil { - data["backup_configuration"] = flattenBackupConfiguration(settings.BackupConfiguration) - } - - if settings.DatabaseFlags != nil { - data["database_flags"] = flattenDatabaseFlags(settings.DatabaseFlags) - } - - if settings.IpConfiguration != nil { - data["ip_configuration"] = flattenIpConfiguration(settings.IpConfiguration) - } - - if settings.LocationPreference != nil { - data["location_preference"] = flattenLocationPreference(settings.LocationPreference) - } - - if settings.MaintenanceWindow != nil { - data["maintenance_window"] = flattenMaintenanceWindow(settings.MaintenanceWindow) - } - - if settings.InsightsConfig != nil { - data["insights_config"] = flattenInsightsConfig(settings.InsightsConfig) - } - - data["disk_autoresize"] = settings.StorageAutoResize - data["disk_autoresize_limit"] = settings.StorageAutoResizeLimit - - if settings.UserLabels != nil { - data["user_labels"] = settings.UserLabels - } - - return []map[string]interface{}{data} -} - -func flattenBackupConfiguration(backupConfiguration *resource_sql_database_instance_sqladminsqladmin.BackupConfiguration) []map[string]interface{} { - data := map[string]interface{}{ - "binary_log_enabled": backupConfiguration.BinaryLogEnabled, - "enabled": backupConfiguration.Enabled, - "start_time": backupConfiguration.StartTime, - "location": backupConfiguration.Location, - "point_in_time_recovery_enabled": backupConfiguration.PointInTimeRecoveryEnabled, - "backup_retention_settings": flattenBackupRetentionSettings(backupConfiguration.BackupRetentionSettings), - "transaction_log_retention_days": backupConfiguration.TransactionLogRetentionDays, - } - - return []map[string]interface{}{data} -} - -func flattenBackupRetentionSettings(b *resource_sql_database_instance_sqladminsqladmin.BackupRetentionSettings) []map[string]interface{} { - if b == nil { - return nil - } - return []map[string]interface{}{ - { - "retained_backups": b.RetainedBackups, - "retention_unit": b.RetentionUnit, - }, - } -} - -func flattenDatabaseFlags(databaseFlags []*resource_sql_database_instance_sqladminsqladmin.DatabaseFlags) []map[string]interface{} { - flags := make([]map[string]interface{}, 0, len(databaseFlags)) - - for _, flag := range databaseFlags { - data := map[string]interface{}{ - "name": flag.Name, - "value": flag.Value, - } - - flags = append(flags, data) - } - - return flags -} - -func flattenIpConfiguration(ipConfiguration *resource_sql_database_instance_sqladminsqladmin.IpConfiguration) interface{} { - data := map[string]interface{}{ - "ipv4_enabled": ipConfiguration.Ipv4Enabled, - "private_network": ipConfiguration.PrivateNetwork, - "require_ssl": ipConfiguration.RequireSsl, - } - - if ipConfiguration.AuthorizedNetworks != nil { - data["authorized_networks"] = flattenAuthorizedNetworks(ipConfiguration.AuthorizedNetworks) - } - - return []map[string]interface{}{data} -} - -func flattenAuthorizedNetworks(entries []*resource_sql_database_instance_sqladminsqladmin.AclEntry) interface{} { - networks := resource_sql_database_instance_schema.NewSet(resource_sql_database_instance_schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), []interface{}{}) - - for _, entry := range entries { - data := map[string]interface{}{ - "expiration_time": entry.ExpirationTime, - "name": entry.Name, - "value": entry.Value, - } - - networks.Add(data) - } - - return networks -} - -func flattenLocationPreference(locationPreference *resource_sql_database_instance_sqladminsqladmin.LocationPreference) interface{} { - data := map[string]interface{}{ - "follow_gae_application": locationPreference.FollowGaeApplication, - "zone": locationPreference.Zone, - } - - return []map[string]interface{}{data} -} - -func flattenMaintenanceWindow(maintenanceWindow *resource_sql_database_instance_sqladminsqladmin.MaintenanceWindow) interface{} { - data := map[string]interface{}{ - "day": maintenanceWindow.Day, - "hour": maintenanceWindow.Hour, - "update_track": maintenanceWindow.UpdateTrack, - } - - return []map[string]interface{}{data} -} - -func flattenReplicaConfiguration(replicaConfiguration *resource_sql_database_instance_sqladminsqladmin.ReplicaConfiguration, d *resource_sql_database_instance_schema.ResourceData) []map[string]interface{} { - rc := []map[string]interface{}{} - - if replicaConfiguration != nil { - data := map[string]interface{}{ - "failover_target": replicaConfiguration.FailoverTarget, - - "ca_certificate": d.Get("replica_configuration.0.ca_certificate"), - "client_certificate": d.Get("replica_configuration.0.client_certificate"), - "client_key": d.Get("replica_configuration.0.client_key"), - "connect_retry_interval": d.Get("replica_configuration.0.connect_retry_interval"), - "dump_file_path": d.Get("replica_configuration.0.dump_file_path"), - "master_heartbeat_period": d.Get("replica_configuration.0.master_heartbeat_period"), - "password": d.Get("replica_configuration.0.password"), - "ssl_cipher": d.Get("replica_configuration.0.ssl_cipher"), - "username": d.Get("replica_configuration.0.username"), - "verify_server_certificate": d.Get("replica_configuration.0.verify_server_certificate"), - } - rc = append(rc, data) - } - - return rc -} - -func flattenIpAddresses(ipAddresses []*resource_sql_database_instance_sqladminsqladmin.IpMapping) []map[string]interface{} { - var ips []map[string]interface{} - - for _, ip := range ipAddresses { - data := map[string]interface{}{ - "ip_address": ip.IpAddress, - "type": ip.Type, - "time_to_retire": ip.TimeToRetire, - } - - ips = append(ips, data) - } - - return ips -} - -func flattenServerCaCerts(caCerts []*resource_sql_database_instance_sqladminsqladmin.SslCert) []map[string]interface{} { - var certs []map[string]interface{} - - for _, caCert := range caCerts { - if caCert != nil { - data := map[string]interface{}{ - "cert": caCert.Cert, - "common_name": caCert.CommonName, - "create_time": caCert.CreateTime, - "expiration_time": caCert.ExpirationTime, - "sha1_fingerprint": caCert.Sha1Fingerprint, - } - - certs = append(certs, data) - } - } - - return certs -} - -func flattenInsightsConfig(insightsConfig *resource_sql_database_instance_sqladminsqladmin.InsightsConfig) interface{} { - data := map[string]interface{}{ - "query_insights_enabled": insightsConfig.QueryInsightsEnabled, - "query_string_length": insightsConfig.QueryStringLength, - "record_application_tags": insightsConfig.RecordApplicationTags, - "record_client_address": insightsConfig.RecordClientAddress, - } - - return []map[string]interface{}{data} -} - -func instanceMutexKey(project, instance_name string) string { - return resource_sql_database_instance_fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) -} - -func sqlDatabaseIsMaster(d *resource_sql_database_instance_schema.ResourceData) bool { - _, ok := d.GetOk("master_instance_name") - return !ok -} - -func sqlDatabaseInstanceServiceNetworkPrecheck(d *resource_sql_database_instance_schema.ResourceData, config *Config, userAgent, network string) error { - resource_sql_database_instance_log.Printf("[DEBUG] checking network %q for at least one service networking connection", network) - - serviceNetworkingNetworkName, err := retrieveServiceNetworkingNetworkName(d, config, network, userAgent) - if err != nil { - var gerr *resource_sql_database_instance_googleapi.Error - if resource_sql_database_instance_errors.As(err, &gerr) { - resource_sql_database_instance_log.Printf("[DEBUG] retrieved googleapi error while creating sn name for %q. precheck skipped. code %v and message: %s", network, gerr.Code, gerr.Body) - return nil - } - - return err - } - - response, err := config.NewServiceNetworkingClient(userAgent).Services.Connections.List("services/servicenetworking.googleapis.com").Network(serviceNetworkingNetworkName).Do() - if err != nil { - - resource_sql_database_instance_log.Printf("[WARNING] Failed to list Service Networking of the project. Skipped Service Networking precheck.") - return nil - } - - if len(response.Connections) < 1 { - return resource_sql_database_instance_fmt.Errorf("Error, failed to create instance because the network doesn't have at least 1 private services connection. Please see https://cloud.google.com/sql/docs/mysql/private-ip#network_requirements for how to create this connection.") - } - - return nil -} - -func expandRestoreBackupContext(configured []interface{}) *resource_sql_database_instance_sqladminsqladmin.RestoreBackupContext { - if len(configured) == 0 || configured[0] == nil { - return nil - } - - _rc := configured[0].(map[string]interface{}) - return &resource_sql_database_instance_sqladminsqladmin.RestoreBackupContext{ - BackupRunId: int64(_rc["backup_run_id"].(int)), - InstanceId: _rc["instance_id"].(string), - Project: _rc["project"].(string), - } -} - -func sqlDatabaseInstanceRestoreFromBackup(d *resource_sql_database_instance_schema.ResourceData, config *Config, userAgent, project, instanceId string, r interface{}) error { - resource_sql_database_instance_log.Printf("[DEBUG] Initiating SQL database instance backup restore") - restoreContext := r.([]interface{}) - - backupRequest := &resource_sql_database_instance_sqladminsqladmin.InstancesRestoreBackupRequest{ - RestoreBackupContext: expandRestoreBackupContext(restoreContext), - } - - var op *resource_sql_database_instance_sqladminsqladmin.Operation - err := retryTimeDuration(func() (operr error) { - op, operr = config.NewSqlAdminClient(userAgent).Instances.RestoreBackup(project, instanceId, backupRequest).Do() - return operr - }, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate), isSqlOperationInProgressError) - if err != nil { - return resource_sql_database_instance_fmt.Errorf("Error, failed to restore instance from backup %s: %s", instanceId, err) - } - - err = sqlAdminOperationWaitTime(config, op, project, "Restore Backup", userAgent, d.Timeout(resource_sql_database_instance_schema.TimeoutUpdate)) - if err != nil { - return err - } - - return nil -} - -func resourceSQLSourceRepresentationInstance() *resource_sql_source_representation_instance_schema.Resource { - return &resource_sql_source_representation_instance_schema.Resource{ - Create: resourceSQLSourceRepresentationInstanceCreate, - Read: resourceSQLSourceRepresentationInstanceRead, - Delete: resourceSQLSourceRepresentationInstanceDelete, - - Importer: &resource_sql_source_representation_instance_schema.ResourceImporter{ - State: resourceSQLSourceRepresentationInstanceImport, - }, - - Timeouts: &resource_sql_source_representation_instance_schema.ResourceTimeout{ - Create: resource_sql_source_representation_instance_schema.DefaultTimeout(4 * resource_sql_source_representation_instance_time.Minute), - Delete: resource_sql_source_representation_instance_schema.DefaultTimeout(4 * resource_sql_source_representation_instance_time.Minute), - }, - - Schema: map[string]*resource_sql_source_representation_instance_schema.Schema{ - "database_version": { - Type: resource_sql_source_representation_instance_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_sql_source_representation_instance_validation.StringInSlice([]string{"MYSQL_5_5", "MYSQL_5_6", "MYSQL_5_7", "MYSQL_8_0"}, false), - Description: `The MySQL version running on your source database server. Possible values: ["MYSQL_5_5", "MYSQL_5_6", "MYSQL_5_7", "MYSQL_8_0"]`, - }, - "name": { - Type: resource_sql_source_representation_instance_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the source representation instance. Use any valid Cloud SQL instance name.`, - }, - "host": { - Type: resource_sql_source_representation_instance_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validateIpAddress, - Description: `The externally accessible IPv4 address for the source database server.`, - }, - "port": { - Type: resource_sql_source_representation_instance_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_sql_source_representation_instance_validation.IntBetween(0, 65535), - Description: `The externally accessible port for the source database server. -Defaults to 3306.`, - Default: 3306, - }, - - "region": { - Type: resource_sql_source_representation_instance_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The Region in which the created instance should reside. -If it is not provided, the provider region is used.`, - }, - "project": { - Type: resource_sql_source_representation_instance_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSQLSourceRepresentationInstanceCreate(d *resource_sql_source_representation_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandSQLSourceRepresentationInstanceName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_sql_source_representation_instance_reflect.ValueOf(nameProp)) && (ok || !resource_sql_source_representation_instance_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - regionProp, err := expandSQLSourceRepresentationInstanceRegion(d.Get("region"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("region"); !isEmptyValue(resource_sql_source_representation_instance_reflect.ValueOf(regionProp)) && (ok || !resource_sql_source_representation_instance_reflect.DeepEqual(v, regionProp)) { - obj["region"] = regionProp - } - databaseVersionProp, err := expandSQLSourceRepresentationInstanceDatabaseVersion(d.Get("database_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("database_version"); !isEmptyValue(resource_sql_source_representation_instance_reflect.ValueOf(databaseVersionProp)) && (ok || !resource_sql_source_representation_instance_reflect.DeepEqual(v, databaseVersionProp)) { - obj["databaseVersion"] = databaseVersionProp - } - onPremisesConfigurationProp, err := expandSQLSourceRepresentationInstanceOnPremisesConfiguration(nil, d, config) - if err != nil { - return err - } else if !isEmptyValue(resource_sql_source_representation_instance_reflect.ValueOf(onPremisesConfigurationProp)) { - obj["onPremisesConfiguration"] = onPremisesConfigurationProp - } - - obj, err = resourceSQLSourceRepresentationInstanceEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances") - if err != nil { - return err - } - - resource_sql_source_representation_instance_log.Printf("[DEBUG] Creating new SourceRepresentationInstance: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_sql_source_representation_instance_schema.TimeoutCreate)) - if err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error creating SourceRepresentationInstance: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - err = sqlAdminOperationWaitTime( - config, res, project, "Creating SourceRepresentationInstance", userAgent, - d.Timeout(resource_sql_source_representation_instance_schema.TimeoutCreate)) - - if err != nil { - - d.SetId("") - return resource_sql_source_representation_instance_fmt.Errorf("Error waiting to create SourceRepresentationInstance: %s", err) - } - - resource_sql_source_representation_instance_log.Printf("[DEBUG] Finished creating SourceRepresentationInstance %q: %#v", d.Id(), res) - - return resourceSQLSourceRepresentationInstanceRead(d, meta) -} - -func resourceSQLSourceRepresentationInstanceRead(d *resource_sql_source_representation_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_sql_source_representation_instance_fmt.Sprintf("SQLSourceRepresentationInstance %q", d.Id())) - } - - res, err = resourceSQLSourceRepresentationInstanceDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_sql_source_representation_instance_log.Printf("[DEBUG] Removing SQLSourceRepresentationInstance because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - - if err := d.Set("name", flattenSQLSourceRepresentationInstanceName(res["name"], d, config)); err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - if err := d.Set("region", flattenSQLSourceRepresentationInstanceRegion(res["region"], d, config)); err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - if err := d.Set("database_version", flattenSQLSourceRepresentationInstanceDatabaseVersion(res["databaseVersion"], d, config)); err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error reading SourceRepresentationInstance: %s", err) - } - - if flattenedProp := flattenSQLSourceRepresentationInstanceOnPremisesConfiguration(res["onPremisesConfiguration"], d, config); flattenedProp != nil { - if gerr, ok := flattenedProp.(*resource_sql_source_representation_instance_googleapi.Error); ok { - return resource_sql_source_representation_instance_fmt.Errorf("Error reading SourceRepresentationInstance: %s", gerr) - } - casted := flattenedProp.([]interface{})[0] - if casted != nil { - for k, v := range casted.(map[string]interface{}) { - if err := d.Set(k, v); err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error setting %s: %s", k, err) - } - } - } - } - - return nil -} - -func resourceSQLSourceRepresentationInstanceDelete(d *resource_sql_source_representation_instance_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_sql_source_representation_instance_fmt.Errorf("Error fetching project for SourceRepresentationInstance: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{SQLBasePath}}projects/{{project}}/instances/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_sql_source_representation_instance_log.Printf("[DEBUG] Deleting SourceRepresentationInstance %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_sql_source_representation_instance_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "SourceRepresentationInstance") - } - - err = sqlAdminOperationWaitTime( - config, res, project, "Deleting SourceRepresentationInstance", userAgent, - d.Timeout(resource_sql_source_representation_instance_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_sql_source_representation_instance_log.Printf("[DEBUG] Finished deleting SourceRepresentationInstance %q: %#v", d.Id(), res) - return nil -} - -func resourceSQLSourceRepresentationInstanceImport(d *resource_sql_source_representation_instance_schema.ResourceData, meta interface{}) ([]*resource_sql_source_representation_instance_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/instances/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/instances/{{name}}") - if err != nil { - return nil, resource_sql_source_representation_instance_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_sql_source_representation_instance_schema.ResourceData{d}, nil -} - -func flattenSQLSourceRepresentationInstanceName(v interface{}, d *resource_sql_source_representation_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceRegion(v interface{}, d *resource_sql_source_representation_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceDatabaseVersion(v interface{}, d *resource_sql_source_representation_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfiguration(v interface{}, d *resource_sql_source_representation_instance_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["host"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationHost(original["host"], d, config) - transformed["port"] = - flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPort(original["port"], d, config) - return []interface{}{transformed} -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationHost(v interface{}, d *resource_sql_source_representation_instance_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenSQLSourceRepresentationInstanceOnPremisesConfigurationPort(v interface{}, d *resource_sql_source_representation_instance_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_sql_source_representation_instance_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandSQLSourceRepresentationInstanceName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceRegion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceDatabaseVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfiguration(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - transformed := make(map[string]interface{}) - transformedHost, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationHost(d.Get("host"), d, config) - if err != nil { - return nil, err - } else if val := resource_sql_source_representation_instance_reflect.ValueOf(transformedHost); val.IsValid() && !isEmptyValue(val) { - transformed["host"] = transformedHost - } - - transformedPort, err := expandSQLSourceRepresentationInstanceOnPremisesConfigurationPort(d.Get("port"), d, config) - if err != nil { - return nil, err - } else if val := resource_sql_source_representation_instance_reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { - transformed["port"] = transformedPort - } - - return transformed, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationHost(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandSQLSourceRepresentationInstanceOnPremisesConfigurationPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceSQLSourceRepresentationInstanceEncoder(d *resource_sql_source_representation_instance_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - opc := obj["onPremisesConfiguration"].(map[string]interface{}) - opc["hostPort"] = resource_sql_source_representation_instance_fmt.Sprintf("%v:%v", opc["host"], opc["port"]) - delete(opc, "host") - delete(opc, "port") - return obj, nil -} - -func resourceSQLSourceRepresentationInstanceDecoder(d *resource_sql_source_representation_instance_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v, ok := res["onPremisesConfiguration"]; ok { - opc := v.(map[string]interface{}) - hostPort := opc["hostPort"] - spl := resource_sql_source_representation_instance_strings.Split(hostPort.(string), ":") - if len(spl) != 2 { - return nil, resource_sql_source_representation_instance_fmt.Errorf("unexpected value for hostPort, expected [host]:[port], got %q", hostPort) - } - opc["host"] = spl[0] - p, err := resource_sql_source_representation_instance_strconv.Atoi(spl[1]) - if err != nil { - return nil, resource_sql_source_representation_instance_fmt.Errorf("error converting port %q to int: %v", spl[1], err) - } - opc["port"] = p - delete(opc, "hostPort") - } - return res, nil -} - -func resourceSqlSslCert() *resource_sql_ssl_cert_schema.Resource { - return &resource_sql_ssl_cert_schema.Resource{ - Create: resourceSqlSslCertCreate, - Read: resourceSqlSslCertRead, - Delete: resourceSqlSslCertDelete, - - SchemaVersion: 1, - - Timeouts: &resource_sql_ssl_cert_schema.ResourceTimeout{ - Create: resource_sql_ssl_cert_schema.DefaultTimeout(10 * resource_sql_ssl_cert_time.Minute), - Delete: resource_sql_ssl_cert_schema.DefaultTimeout(10 * resource_sql_ssl_cert_time.Minute), - }, - - Schema: map[string]*resource_sql_ssl_cert_schema.Schema{ - "common_name": { - Type: resource_sql_ssl_cert_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The common name to be used in the certificate to identify the client. Constrained to [a-zA-Z.-_ ]+. Changing this forces a new resource to be created.`, - }, - - "instance": { - Type: resource_sql_ssl_cert_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Cloud SQL instance. Changing this forces a new resource to be created.`, - }, - - "project": { - Type: resource_sql_ssl_cert_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "cert": { - Type: resource_sql_ssl_cert_schema.TypeString, - Computed: true, - Description: `The actual certificate data for this client certificate.`, - }, - - "cert_serial_number": { - Type: resource_sql_ssl_cert_schema.TypeString, - Computed: true, - Description: `The serial number extracted from the certificate data.`, - }, - - "create_time": { - Type: resource_sql_ssl_cert_schema.TypeString, - Computed: true, - Description: `The time when the certificate was created in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.`, - }, - - "expiration_time": { - Type: resource_sql_ssl_cert_schema.TypeString, - Computed: true, - Description: `The time when the certificate expires in RFC 3339 format, for example 2012-11-15T16:19:00.094Z.`, - }, - - "private_key": { - Type: resource_sql_ssl_cert_schema.TypeString, - Computed: true, - Sensitive: true, - Description: `The private key associated with the client certificate.`, - }, - - "server_ca_cert": { - Type: resource_sql_ssl_cert_schema.TypeString, - Computed: true, - Description: `The CA cert of the server this client cert was generated from.`, - }, - - "sha1_fingerprint": { - Type: resource_sql_ssl_cert_schema.TypeString, - Computed: true, - Description: `The SHA1 Fingerprint of the certificate.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceSqlSslCertCreate(d *resource_sql_ssl_cert_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - instance := d.Get("instance").(string) - commonName := d.Get("common_name").(string) - - sslCertsInsertRequest := &resource_sql_ssl_cert_sqladminsqladmin.SslCertsInsertRequest{ - CommonName: commonName, - } - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - resp, err := config.NewSqlAdminClient(userAgent).SslCerts.Insert(project, instance, sslCertsInsertRequest).Do() - if err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error, failed to insert "+ - "ssl cert %s into instance %s: %s", commonName, instance, err) - } - - err = sqlAdminOperationWaitTime(config, resp.Operation, project, "Create Ssl Cert", userAgent, d.Timeout(resource_sql_ssl_cert_schema.TimeoutCreate)) - if err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error, failure waiting for creation of %q "+ - "in %q: %s", commonName, instance, err) - } - - fingerprint := resp.ClientCert.CertInfo.Sha1Fingerprint - d.SetId(resource_sql_ssl_cert_fmt.Sprintf("projects/%s/instances/%s/sslCerts/%s", project, instance, fingerprint)) - if err := d.Set("sha1_fingerprint", fingerprint); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting sha1_fingerprint: %s", err) - } - - if err := d.Set("private_key", resp.ClientCert.CertPrivateKey); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting private_key: %s", err) - } - if err := d.Set("server_ca_cert", resp.ServerCaCert.Cert); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting server_ca_cert: %s", err) - } - - return resourceSqlSslCertRead(d, meta) -} - -func resourceSqlSslCertRead(d *resource_sql_ssl_cert_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - instance := d.Get("instance").(string) - commonName := d.Get("common_name").(string) - fingerprint := d.Get("sha1_fingerprint").(string) - - sslCerts, err := config.NewSqlAdminClient(userAgent).SslCerts.Get(project, instance, fingerprint).Do() - if err != nil { - return handleNotFoundError(err, d, resource_sql_ssl_cert_fmt.Sprintf("SQL Ssl Cert %q in instance %q", commonName, instance)) - } - - if sslCerts == nil { - resource_sql_ssl_cert_log.Printf("[WARN] Removing SQL Ssl Cert %q because it's gone", commonName) - d.SetId("") - - return nil - } - - if err := d.Set("instance", sslCerts.Instance); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting instance: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("sha1_fingerprint", sslCerts.Sha1Fingerprint); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting sha1_fingerprint: %s", err) - } - if err := d.Set("common_name", sslCerts.CommonName); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting common_name: %s", err) - } - if err := d.Set("cert", sslCerts.Cert); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting cert: %s", err) - } - if err := d.Set("cert_serial_number", sslCerts.CertSerialNumber); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting cert_serial_number: %s", err) - } - if err := d.Set("create_time", sslCerts.CreateTime); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting create_time: %s", err) - } - if err := d.Set("expiration_time", sslCerts.ExpirationTime); err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error setting expiration_time: %s", err) - } - - d.SetId(resource_sql_ssl_cert_fmt.Sprintf("projects/%s/instances/%s/sslCerts/%s", project, instance, fingerprint)) - return nil -} - -func resourceSqlSslCertDelete(d *resource_sql_ssl_cert_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - instance := d.Get("instance").(string) - commonName := d.Get("common_name").(string) - fingerprint := d.Get("sha1_fingerprint").(string) - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - op, err := config.NewSqlAdminClient(userAgent).SslCerts.Delete(project, instance, fingerprint).Do() - - if err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error, failed to delete "+ - "ssl cert %q in instance %q: %s", commonName, - instance, err) - } - - err = sqlAdminOperationWaitTime(config, op, project, "Delete Ssl Cert", userAgent, d.Timeout(resource_sql_ssl_cert_schema.TimeoutDelete)) - - if err != nil { - return resource_sql_ssl_cert_fmt.Errorf("Error, failure waiting for deletion of ssl cert %q "+ - "in %q: %s", commonName, instance, err) - } - - return nil -} - -func diffSuppressIamUserName(_, old, new string, d *resource_sql_user_schema.ResourceData) bool { - strippedName := resource_sql_user_strings.Split(new, "@")[0] - - userType := d.Get("type").(string) - - if old == strippedName && resource_sql_user_strings.Contains(userType, "IAM") { - return true - } - - return false -} - -func resourceSqlUser() *resource_sql_user_schema.Resource { - return &resource_sql_user_schema.Resource{ - Create: resourceSqlUserCreate, - Read: resourceSqlUserRead, - Update: resourceSqlUserUpdate, - Delete: resourceSqlUserDelete, - Importer: &resource_sql_user_schema.ResourceImporter{ - State: resourceSqlUserImporter, - }, - - Timeouts: &resource_sql_user_schema.ResourceTimeout{ - Create: resource_sql_user_schema.DefaultTimeout(10 * resource_sql_user_time.Minute), - Update: resource_sql_user_schema.DefaultTimeout(10 * resource_sql_user_time.Minute), - Delete: resource_sql_user_schema.DefaultTimeout(10 * resource_sql_user_time.Minute), - }, - - SchemaVersion: 1, - MigrateState: resourceSqlUserMigrateState, - - Schema: map[string]*resource_sql_user_schema.Schema{ - "host": { - Type: resource_sql_user_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The host the user can connect from. This is only supported for MySQL instances. Don't set this field for PostgreSQL instances. Can be an IP address. Changing this forces a new resource to be created.`, - }, - - "instance": { - Type: resource_sql_user_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the Cloud SQL instance. Changing this forces a new resource to be created.`, - }, - - "name": { - Type: resource_sql_user_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: diffSuppressIamUserName, - Description: `The name of the user. Changing this forces a new resource to be created.`, - }, - - "password": { - Type: resource_sql_user_schema.TypeString, - Optional: true, - Sensitive: true, - Description: `The password for the user. Can be updated. For Postgres instances this is a Required field, unless type is set to - either CLOUD_IAM_USER or CLOUD_IAM_SERVICE_ACCOUNT.`, - }, - - "type": { - Type: resource_sql_user_schema.TypeString, - Optional: true, - ForceNew: true, - DiffSuppressFunc: emptyOrDefaultStringSuppress("BUILT_IN"), - Description: `The user type. It determines the method to authenticate the user during login. - The default is the database's built-in user type. Flags include "BUILT_IN", "CLOUD_IAM_USER", or "CLOUD_IAM_SERVICE_ACCOUNT".`, - ValidateFunc: resource_sql_user_validation.StringInSlice([]string{"BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", ""}, false), - }, - - "project": { - Type: resource_sql_user_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "deletion_policy": { - Type: resource_sql_user_schema.TypeString, - Optional: true, - Description: `The deletion policy for the user. Setting ABANDON allows the resource - to be abandoned rather than deleted. This is useful for Postgres, where users cannot be deleted from the API if they - have been granted SQL roles. Possible values are: "ABANDON".`, - ValidateFunc: resource_sql_user_validation.StringInSlice([]string{"ABANDON", ""}, false), - }, - }, - UseJSONNumber: true, - } -} - -func resourceSqlUserCreate(d *resource_sql_user_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - instance := d.Get("instance").(string) - password := d.Get("password").(string) - host := d.Get("host").(string) - typ := d.Get("type").(string) - - user := &resource_sql_user_sqladminsqladmin.User{ - Name: name, - Instance: instance, - Password: password, - Host: host, - Type: typ, - } - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - var op *resource_sql_user_sqladminsqladmin.Operation - insertFunc := func() error { - op, err = config.NewSqlAdminClient(userAgent).Users.Insert(project, instance, - user).Do() - return err - } - err = retryTimeDuration(insertFunc, d.Timeout(resource_sql_user_schema.TimeoutCreate)) - - if err != nil { - return resource_sql_user_fmt.Errorf("Error, failed to insert "+ - "user %s into instance %s: %s", name, instance, err) - } - - d.SetId(resource_sql_user_fmt.Sprintf("%s/%s/%s", user.Name, user.Host, user.Instance)) - - err = sqlAdminOperationWaitTime(config, op, project, "Insert User", userAgent, d.Timeout(resource_sql_user_schema.TimeoutCreate)) - - if err != nil { - return resource_sql_user_fmt.Errorf("Error, failure waiting for insertion of %s "+ - "into %s: %s", name, instance, err) - } - - return resourceSqlUserRead(d, meta) -} - -func resourceSqlUserRead(d *resource_sql_user_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - instance := d.Get("instance").(string) - name := d.Get("name").(string) - host := d.Get("host").(string) - - var users *resource_sql_user_sqladminsqladmin.UsersListResponse - err = nil - err = retryTime(func() error { - users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance).Do() - return err - }, 5) - if err != nil { - return handleNotFoundError(err, d, resource_sql_user_fmt.Sprintf("SQL User %q in instance %q", name, instance)) - } - - var user *resource_sql_user_sqladminsqladmin.User - databaseInstance, err := config.NewSqlAdminClient(userAgent).Instances.Get(project, instance).Do() - if err != nil { - return err - } - - for _, currentUser := range users.Items { - if !resource_sql_user_strings.Contains(databaseInstance.DatabaseVersion, "POSTGRES") { - name = resource_sql_user_strings.Split(name, "@")[0] - } - - if currentUser.Name == name { - - if host == "" || currentUser.Host == host { - user = currentUser - break - } - } - } - - if user == nil { - resource_sql_user_log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - if err := d.Set("host", user.Host); err != nil { - return resource_sql_user_fmt.Errorf("Error setting host: %s", err) - } - if err := d.Set("instance", user.Instance); err != nil { - return resource_sql_user_fmt.Errorf("Error setting instance: %s", err) - } - if err := d.Set("name", user.Name); err != nil { - return resource_sql_user_fmt.Errorf("Error setting name: %s", err) - } - if err := d.Set("type", user.Type); err != nil { - return resource_sql_user_fmt.Errorf("Error setting type: %s", err) - } - if err := d.Set("project", project); err != nil { - return resource_sql_user_fmt.Errorf("Error setting project: %s", err) - } - d.SetId(resource_sql_user_fmt.Sprintf("%s/%s/%s", user.Name, user.Host, user.Instance)) - return nil -} - -func resourceSqlUserUpdate(d *resource_sql_user_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - if d.HasChange("password") { - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - instance := d.Get("instance").(string) - password := d.Get("password").(string) - host := d.Get("host").(string) - - user := &resource_sql_user_sqladminsqladmin.User{ - Name: name, - Instance: instance, - Password: password, - } - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - var op *resource_sql_user_sqladminsqladmin.Operation - updateFunc := func() error { - op, err = config.NewSqlAdminClient(userAgent).Users.Update(project, instance, user).Host(host).Name(name).Do() - return err - } - err = retryTimeDuration(updateFunc, d.Timeout(resource_sql_user_schema.TimeoutUpdate)) - - if err != nil { - return resource_sql_user_fmt.Errorf("Error, failed to update"+ - "user %s into user %s: %s", name, instance, err) - } - - err = sqlAdminOperationWaitTime(config, op, project, "Insert User", userAgent, d.Timeout(resource_sql_user_schema.TimeoutUpdate)) - - if err != nil { - return resource_sql_user_fmt.Errorf("Error, failure waiting for update of %s "+ - "in %s: %s", name, instance, err) - } - - return resourceSqlUserRead(d, meta) - } - - return nil -} - -func resourceSqlUserDelete(d *resource_sql_user_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { - - return nil - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - host := d.Get("host").(string) - instance := d.Get("instance").(string) - - mutexKV.Lock(instanceMutexKey(project, instance)) - defer mutexKV.Unlock(instanceMutexKey(project, instance)) - - var op *resource_sql_user_sqladminsqladmin.Operation - err = retryTimeDuration(func() error { - op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance).Host(host).Name(name).Do() - if err != nil { - return err - } - - if err := sqlAdminOperationWaitTime(config, op, project, "Delete User", userAgent, d.Timeout(resource_sql_user_schema.TimeoutDelete)); err != nil { - return err - } - return nil - }, d.Timeout(resource_sql_user_schema.TimeoutDelete), isSqlOperationInProgressError, isSqlInternalError) - - if err != nil { - return resource_sql_user_fmt.Errorf("Error, failed to delete"+ - "user %s in instance %s: %s", name, - instance, err) - } - - return nil -} - -func resourceSqlUserImporter(d *resource_sql_user_schema.ResourceData, meta interface{}) ([]*resource_sql_user_schema.ResourceData, error) { - parts := resource_sql_user_strings.Split(d.Id(), "/") - - if len(parts) == 3 { - if err := d.Set("project", parts[0]); err != nil { - return nil, resource_sql_user_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("instance", parts[1]); err != nil { - return nil, resource_sql_user_fmt.Errorf("Error setting instance: %s", err) - } - if err := d.Set("name", parts[2]); err != nil { - return nil, resource_sql_user_fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) == 4 { - if err := d.Set("project", parts[0]); err != nil { - return nil, resource_sql_user_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("instance", parts[1]); err != nil { - return nil, resource_sql_user_fmt.Errorf("Error setting instance: %s", err) - } - if err := d.Set("host", parts[2]); err != nil { - return nil, resource_sql_user_fmt.Errorf("Error setting host: %s", err) - } - if err := d.Set("name", parts[3]); err != nil { - return nil, resource_sql_user_fmt.Errorf("Error setting name: %s", err) - } - } else { - return nil, resource_sql_user_fmt.Errorf("Invalid specifier. Expecting {project}/{instance}/{name} for postgres instance and {project}/{instance}/{host}/{name} for MySQL instance") - } - - return []*resource_sql_user_schema.ResourceData{d}, nil -} - -func resourceSqlUserMigrateState( - v int, is *resource_sql_user_migrate_terraform.InstanceState, meta interface{}) (*resource_sql_user_migrate_terraform.InstanceState, error) { - if is.Empty() { - resource_sql_user_migrate_log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") - return is, nil - } - - switch v { - case 0: - resource_sql_user_migrate_log.Println("[INFO] Found Google Sql User State v0; migrating to v1") - is, err := migrateSqlUserStateV0toV1(is) - if err != nil { - return is, err - } - return is, nil - default: - return is, resource_sql_user_migrate_fmt.Errorf("Unexpected schema version: %d", v) - } -} - -func migrateSqlUserStateV0toV1(is *resource_sql_user_migrate_terraform.InstanceState) (*resource_sql_user_migrate_terraform.InstanceState, error) { - resource_sql_user_migrate_log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) - - name := is.Attributes["name"] - instance := is.Attributes["instance"] - is.ID = resource_sql_user_migrate_fmt.Sprintf("%s/%s", instance, name) - - resource_sql_user_migrate_log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) - return is, nil -} - -func resourceStorageBucket() *resource_storage_bucket_schema.Resource { - return &resource_storage_bucket_schema.Resource{ - Create: resourceStorageBucketCreate, - Read: resourceStorageBucketRead, - Update: resourceStorageBucketUpdate, - Delete: resourceStorageBucketDelete, - Importer: &resource_storage_bucket_schema.ResourceImporter{ - State: resourceStorageBucketStateImporter, - }, - CustomizeDiff: resource_storage_bucket_customdiff.All( - resource_storage_bucket_customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked), - ), - - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "name": { - Type: resource_storage_bucket_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the bucket.`, - }, - - "encryption": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "default_kms_key_name": { - Type: resource_storage_bucket_schema.TypeString, - Required: true, - Description: `A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified. You must pay attention to whether the crypto key is available in the location that this bucket is created in. See the docs for more details.`, - }, - }, - }, - Description: `The bucket's encryption configuration.`, - }, - - "requester_pays": { - Type: resource_storage_bucket_schema.TypeBool, - Optional: true, - Description: `Enables Requester Pays on a storage bucket.`, - }, - - "force_destroy": { - Type: resource_storage_bucket_schema.TypeBool, - Optional: true, - Default: false, - Description: `When deleting a bucket, this boolean option will delete all contained objects. If you try to delete a bucket that contains objects, Terraform will fail that run.`, - }, - - "labels": { - Type: resource_storage_bucket_schema.TypeMap, - Optional: true, - Elem: &resource_storage_bucket_schema.Schema{Type: resource_storage_bucket_schema.TypeString}, - Description: `A set of key/value label pairs to assign to the bucket.`, - }, - - "location": { - Type: resource_storage_bucket_schema.TypeString, - Required: true, - ForceNew: true, - StateFunc: func(s interface{}) string { - return resource_storage_bucket_strings.ToUpper(s.(string)) - }, - Description: `The Google Cloud Storage location`, - }, - - "project": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - - "self_link": { - Type: resource_storage_bucket_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - - "url": { - Type: resource_storage_bucket_schema.TypeString, - Computed: true, - Description: `The base URL of the bucket, in the format gs://.`, - }, - - "storage_class": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - Default: "STANDARD", - Description: `The Storage Class of the new bucket. Supported values include: STANDARD, MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, - }, - - "lifecycle_rule": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - MaxItems: 100, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "action": { - Type: resource_storage_bucket_schema.TypeSet, - Required: true, - MinItems: 1, - MaxItems: 1, - Set: resourceGCSBucketLifecycleRuleActionHash, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "type": { - Type: resource_storage_bucket_schema.TypeString, - Required: true, - Description: `The type of the action of this Lifecycle Rule. Supported values include: Delete and SetStorageClass.`, - }, - "storage_class": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - Description: `The target Storage Class of objects affected by this Lifecycle Rule. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, - }, - }, - }, - Description: `The Lifecycle Rule's action configuration. A single block of this type is supported.`, - }, - "condition": { - Type: resource_storage_bucket_schema.TypeSet, - Required: true, - MinItems: 1, - MaxItems: 1, - Set: resourceGCSBucketLifecycleRuleConditionHash, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "age": { - Type: resource_storage_bucket_schema.TypeInt, - Optional: true, - Description: `Minimum age of an object in days to satisfy this condition.`, - }, - "created_before": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, - }, - "custom_time_before": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, - }, - "days_since_custom_time": { - Type: resource_storage_bucket_schema.TypeInt, - Optional: true, - Description: `Number of days elapsed since the user-specified timestamp set on an object.`, - }, - "days_since_noncurrent_time": { - Type: resource_storage_bucket_schema.TypeInt, - Optional: true, - Description: `Number of days elapsed since the noncurrent timestamp of an object. This - condition is relevant only for versioned objects.`, - }, - "noncurrent_time_before": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, - }, - "with_state": { - Type: resource_storage_bucket_schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: resource_storage_bucket_validation.StringInSlice([]string{"LIVE", "ARCHIVED", "ANY", ""}, false), - Description: `Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: "LIVE", "ARCHIVED", "ANY".`, - }, - "matches_storage_class": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - Elem: &resource_storage_bucket_schema.Schema{Type: resource_storage_bucket_schema.TypeString}, - Description: `Storage Class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, DURABLE_REDUCED_AVAILABILITY.`, - }, - "num_newer_versions": { - Type: resource_storage_bucket_schema.TypeInt, - Optional: true, - Description: `Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.`, - }, - }, - }, - Description: `The Lifecycle Rule's condition configuration.`, - }, - }, - }, - Description: `The bucket's Lifecycle Rules configuration.`, - }, - - "versioning": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "enabled": { - Type: resource_storage_bucket_schema.TypeBool, - Required: true, - Description: `While set to true, versioning is fully enabled for this bucket.`, - }, - }, - }, - Description: `The bucket's Versioning configuration.`, - }, - - "website": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "main_page_suffix": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - AtLeastOneOf: []string{"website.0.not_found_page", "website.0.main_page_suffix"}, - Description: `Behaves as the bucket's directory index where missing objects are treated as potential directories.`, - }, - "not_found_page": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - AtLeastOneOf: []string{"website.0.main_page_suffix", "website.0.not_found_page"}, - Description: `The custom object to return when a requested resource is not found.`, - }, - }, - }, - Description: `Configuration if the bucket acts as a website.`, - }, - - "retention_policy": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "is_locked": { - Type: resource_storage_bucket_schema.TypeBool, - Optional: true, - Default: false, - Description: `If set to true, the bucket will be locked and permanently restrict edits to the bucket's retention policy. Caution: Locking a bucket is an irreversible action.`, - }, - "retention_period": { - Type: resource_storage_bucket_schema.TypeInt, - Required: true, - ValidateFunc: resource_storage_bucket_validation.IntBetween(1, resource_storage_bucket_math.MaxInt32), - Description: `The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, overwritten, or archived. The value must be less than 3,155,760,000 seconds.`, - }, - }, - }, - Description: `Configuration of the bucket's data retention policy for how long objects in the bucket should be retained.`, - }, - - "cors": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "origin": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - Elem: &resource_storage_bucket_schema.Schema{ - Type: resource_storage_bucket_schema.TypeString, - }, - Description: `The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".`, - }, - "method": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - Elem: &resource_storage_bucket_schema.Schema{ - Type: resource_storage_bucket_schema.TypeString, - }, - Description: `The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method".`, - }, - "response_header": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - Elem: &resource_storage_bucket_schema.Schema{ - Type: resource_storage_bucket_schema.TypeString, - }, - Description: `The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.`, - }, - "max_age_seconds": { - Type: resource_storage_bucket_schema.TypeInt, - Optional: true, - Description: `The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.`, - }, - }, - }, - Description: `The bucket's Cross-Origin Resource Sharing (CORS) configuration.`, - }, - - "default_event_based_hold": { - Type: resource_storage_bucket_schema.TypeBool, - Optional: true, - }, - - "logging": { - Type: resource_storage_bucket_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_storage_bucket_schema.Resource{ - Schema: map[string]*resource_storage_bucket_schema.Schema{ - "log_bucket": { - Type: resource_storage_bucket_schema.TypeString, - Required: true, - Description: `The bucket that will receive log objects.`, - }, - "log_object_prefix": { - Type: resource_storage_bucket_schema.TypeString, - Optional: true, - Computed: true, - Description: `The object prefix for log objects. If it's not provided, by default Google Cloud Storage sets this to this bucket's name.`, - }, - }, - }, - Description: `The bucket's Access & Storage Logs configuration.`, - }, - "uniform_bucket_level_access": { - Type: resource_storage_bucket_schema.TypeBool, - Optional: true, - Computed: true, - Description: `Enables uniform bucket-level access on a bucket.`, - }, - }, - UseJSONNumber: true, - } -} - -func isPolicyLocked(_ resource_storage_bucket_context.Context, old, new, _ interface{}) bool { - if old == nil || new == nil { - return false - } - - if old.(bool) && !new.(bool) { - return true - } - - return false -} - -func resourceStorageBucketCreate(d *resource_storage_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - bucket := d.Get("name").(string) - location := d.Get("location").(string) - - sb := &resource_storage_bucket_storage.Bucket{ - Name: bucket, - Labels: expandLabels(d), - Location: location, - IamConfiguration: expandIamConfiguration(d), - } - - if v, ok := d.GetOk("storage_class"); ok { - sb.StorageClass = v.(string) - } - - lifecycle, err := expandStorageBucketLifecycle(d.Get("lifecycle_rule")) - if err != nil { - return err - } - sb.Lifecycle = lifecycle - - if v, ok := d.GetOk("versioning"); ok { - sb.Versioning = expandBucketVersioning(v) - } - - if v, ok := d.GetOk("website"); ok { - sb.Website = expandBucketWebsite(v.([]interface{})) - } - - if v, ok := d.GetOk("retention_policy"); ok { - - retention_policies := v.([]interface{}) - - if len(retention_policies) > 0 { - sb.RetentionPolicy = &resource_storage_bucket_storage.BucketRetentionPolicy{} - - retentionPolicy := retention_policies[0].(map[string]interface{}) - - if v, ok := retentionPolicy["retention_period"]; ok { - sb.RetentionPolicy.RetentionPeriod = int64(v.(int)) - } - } - } - - if v, ok := d.GetOk("default_event_based_hold"); ok { - sb.DefaultEventBasedHold = v.(bool) - } - - if v, ok := d.GetOk("cors"); ok { - sb.Cors = expandCors(v.([]interface{})) - } - - if v, ok := d.GetOk("logging"); ok { - sb.Logging = expandBucketLogging(v.([]interface{})) - } - - if v, ok := d.GetOk("encryption"); ok { - sb.Encryption = expandBucketEncryption(v.([]interface{})) - } - - if v, ok := d.GetOk("requester_pays"); ok { - sb.Billing = &resource_storage_bucket_storage.BucketBilling{ - RequesterPays: v.(bool), - } - } - - var res *resource_storage_bucket_storage.Bucket - - err = retry(func() error { - res, err = config.NewStorageClient(userAgent).Buckets.Insert(project, sb).Do() - return err - }) - - if err != nil { - resource_storage_bucket_fmt.Printf("Error creating bucket %s: %v", bucket, err) - return err - } - - resource_storage_bucket_log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) - d.SetId(res.Id) - - err = retryTimeDuration(func() (operr error) { - _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() - return retryErr - }, d.Timeout(resource_storage_bucket_schema.TimeoutCreate), isNotFoundRetryableError("bucket creation")) - - if err != nil { - return resource_storage_bucket_fmt.Errorf("Error reading bucket after creation: %s", err) - } - - if v, ok := d.GetOk("retention_policy"); ok && !res.RetentionPolicy.IsLocked { - retention_policies := v.([]interface{}) - - sb.RetentionPolicy = &resource_storage_bucket_storage.BucketRetentionPolicy{} - - retentionPolicy := retention_policies[0].(map[string]interface{}) - - if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) { - err = lockRetentionPolicy(config.NewStorageClient(userAgent).Buckets, bucket, res.Metageneration) - if err != nil { - return err - } - - resource_storage_bucket_log.Printf("[DEBUG] Locked bucket %v at location %v\n\n", res.Name, res.SelfLink) - } - } - - return resourceStorageBucketRead(d, meta) -} - -func resourceStorageBucketUpdate(d *resource_storage_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - sb := &resource_storage_bucket_storage.Bucket{} - - if detectLifecycleChange(d) { - lifecycle, err := expandStorageBucketLifecycle(d.Get("lifecycle_rule")) - if err != nil { - return err - } - sb.Lifecycle = lifecycle - } - - if d.HasChange("requester_pays") { - v := d.Get("requester_pays") - sb.Billing = &resource_storage_bucket_storage.BucketBilling{ - RequesterPays: v.(bool), - ForceSendFields: []string{"RequesterPays"}, - } - } - - if d.HasChange("versioning") { - if v, ok := d.GetOk("versioning"); ok { - sb.Versioning = expandBucketVersioning(v) - } - } - - if d.HasChange("website") { - sb.Website = expandBucketWebsite(d.Get("website")) - } - - if d.HasChange("retention_policy") { - if v, ok := d.GetOk("retention_policy"); ok { - sb.RetentionPolicy = expandBucketRetentionPolicy(v.([]interface{})) - } else { - sb.NullFields = append(sb.NullFields, "RetentionPolicy") - } - } - - if d.HasChange("cors") { - if v, ok := d.GetOk("cors"); ok { - sb.Cors = expandCors(v.([]interface{})) - } else { - sb.NullFields = append(sb.NullFields, "Cors") - } - } - - if d.HasChange("default_event_based_hold") { - v := d.Get("default_event_based_hold") - sb.DefaultEventBasedHold = v.(bool) - sb.ForceSendFields = append(sb.ForceSendFields, "DefaultEventBasedHold") - } - - if d.HasChange("logging") { - if v, ok := d.GetOk("logging"); ok { - sb.Logging = expandBucketLogging(v.([]interface{})) - } else { - sb.NullFields = append(sb.NullFields, "Logging") - } - } - - if d.HasChange("encryption") { - if v, ok := d.GetOk("encryption"); ok { - sb.Encryption = expandBucketEncryption(v.([]interface{})) - } else { - sb.NullFields = append(sb.NullFields, "Encryption") - } - } - - if d.HasChange("labels") { - sb.Labels = expandLabels(d) - if len(sb.Labels) == 0 { - sb.NullFields = append(sb.NullFields, "Labels") - } - - old, _ := d.GetChange("labels") - for k := range old.(map[string]interface{}) { - if _, ok := sb.Labels[k]; !ok { - sb.NullFields = append(sb.NullFields, resource_storage_bucket_fmt.Sprintf("Labels.%s", k)) - } - } - } - - if d.HasChange("storage_class") { - if v, ok := d.GetOk("storage_class"); ok { - sb.StorageClass = v.(string) - } - } - - if d.HasChange("uniform_bucket_level_access") { - sb.IamConfiguration = expandIamConfiguration(d) - } - - res, err := config.NewStorageClient(userAgent).Buckets.Patch(d.Get("name").(string), sb).Do() - - if err != nil { - return err - } - - if err := d.Set("self_link", res.SelfLink); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting self_link: %s", err) - } - - err = retryTimeDuration(func() (operr error) { - _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() - return retryErr - }, d.Timeout(resource_storage_bucket_schema.TimeoutUpdate), isNotFoundRetryableError("bucket update")) - - if err != nil { - return resource_storage_bucket_fmt.Errorf("Error reading bucket after update: %s", err) - } - - if d.HasChange("retention_policy") { - if v, ok := d.GetOk("retention_policy"); ok { - retention_policies := v.([]interface{}) - - sb.RetentionPolicy = &resource_storage_bucket_storage.BucketRetentionPolicy{} - - retentionPolicy := retention_policies[0].(map[string]interface{}) - - if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) && d.HasChange("retention_policy.0.is_locked") { - err = lockRetentionPolicy(config.NewStorageClient(userAgent).Buckets, d.Get("name").(string), res.Metageneration) - if err != nil { - return err - } - } - } - } - - resource_storage_bucket_log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) - - d.SetId(res.Id) - - return nil -} - -func resourceStorageBucketRead(d *resource_storage_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("name").(string) - - var res *resource_storage_bucket_storage.Bucket - - err = retryTimeDuration(func() (operr error) { - var retryErr error - res, retryErr = config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - return retryErr - }, d.Timeout(resource_storage_bucket_schema.TimeoutCreate), isNotFoundRetryableError("bucket creation")) - - if err != nil { - return handleNotFoundError(err, d, resource_storage_bucket_fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) - } - resource_storage_bucket_log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) - - if d.Get("project") == "" { - project, _ := getProject(d, config) - if err := d.Set("project", project); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting project: %s", err) - } - } - if d.Get("project") == "" { - proj, err := config.NewComputeClient(userAgent).Projects.Get(resource_storage_bucket_strconv.FormatUint(res.ProjectNumber, 10)).Do() - if err != nil { - return err - } - resource_storage_bucket_log.Printf("[DEBUG] Bucket %v is in project number %v, which is project ID %s.\n", res.Name, res.ProjectNumber, proj.Name) - if err := d.Set("project", proj.Name); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting project: %s", err) - } - } - - if err := d.Set("self_link", res.SelfLink); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("url", resource_storage_bucket_fmt.Sprintf("gs://%s", bucket)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting url: %s", err) - } - if err := d.Set("storage_class", res.StorageClass); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting storage_class: %s", err) - } - if err := d.Set("encryption", flattenBucketEncryption(res.Encryption)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting encryption: %s", err) - } - if err := d.Set("location", res.Location); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting location: %s", err) - } - if err := d.Set("cors", flattenCors(res.Cors)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting cors: %s", err) - } - if err := d.Set("default_event_based_hold", res.DefaultEventBasedHold); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting default_event_based_hold: %s", err) - } - if err := d.Set("logging", flattenBucketLogging(res.Logging)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting logging: %s", err) - } - if err := d.Set("versioning", flattenBucketVersioning(res.Versioning)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting versioning: %s", err) - } - if err := d.Set("lifecycle_rule", flattenBucketLifecycle(res.Lifecycle)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting lifecycle_rule: %s", err) - } - if err := d.Set("labels", res.Labels); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting labels: %s", err) - } - if err := d.Set("website", flattenBucketWebsite(res.Website)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting website: %s", err) - } - if err := d.Set("retention_policy", flattenBucketRetentionPolicy(res.RetentionPolicy)); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting retention_policy: %s", err) - } - - if res.IamConfiguration != nil && res.IamConfiguration.UniformBucketLevelAccess != nil { - if err := d.Set("uniform_bucket_level_access", res.IamConfiguration.UniformBucketLevelAccess.Enabled); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting uniform_bucket_level_access: %s", err) - } - } else { - if err := d.Set("uniform_bucket_level_access", false); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting uniform_bucket_level_access: %s", err) - } - } - - if res.Billing == nil { - if err := d.Set("requester_pays", nil); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting requester_pays: %s", err) - } - } else { - if err := d.Set("requester_pays", res.Billing.RequesterPays); err != nil { - return resource_storage_bucket_fmt.Errorf("Error setting requester_pays: %s", err) - } - } - - d.SetId(res.Id) - return nil -} - -func resourceStorageBucketDelete(d *resource_storage_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("name").(string) - - var listError, deleteObjectError error - for deleteObjectError == nil { - res, err := config.NewStorageClient(userAgent).Objects.List(bucket).Versions(true).Do() - if err != nil { - resource_storage_bucket_log.Printf("Error listing contents of bucket %s: %v", bucket, err) - - listError = err - break - } - - if len(res.Items) == 0 { - break - } - - if d.Get("retention_policy.0.is_locked").(bool) { - for _, item := range res.Items { - expiration, err := resource_storage_bucket_time.Parse(resource_storage_bucket_time.RFC3339, item.RetentionExpirationTime) - if err != nil { - return err - } - if expiration.After(resource_storage_bucket_time.Now()) { - deleteErr := resource_storage_bucket_errors.New("Bucket '" + d.Get("name").(string) + "' contains objects that have not met the retention period yet and cannot be deleted.") - resource_storage_bucket_log.Printf("Error! %s : %s\n\n", bucket, deleteErr) - return deleteErr - } - } - } - - if !d.Get("force_destroy").(bool) { - deleteErr := resource_storage_bucket_fmt.Errorf("Error trying to delete bucket %s containing objects without `force_destroy` set to true", bucket) - resource_storage_bucket_log.Printf("Error! %s : %s\n\n", bucket, deleteErr) - return deleteErr - } - - resource_storage_bucket_log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n") - - wp := resource_storage_bucket_workerpool.New(resource_storage_bucket_runtime.NumCPU() - 1) - - for _, object := range res.Items { - resource_storage_bucket_log.Printf("[DEBUG] Found %s", object.Name) - object := object - - wp.Submit(func() { - resource_storage_bucket_log.Printf("[TRACE] Attempting to delete %s", object.Name) - if err := config.NewStorageClient(userAgent).Objects.Delete(bucket, object.Name).Generation(object.Generation).Do(); err != nil { - deleteObjectError = err - resource_storage_bucket_log.Printf("[ERR] Failed to delete storage object %s: %s", object.Name, err) - } else { - resource_storage_bucket_log.Printf("[TRACE] Successfully deleted %s", object.Name) - } - }) - } - - wp.StopWait() - } - - err = resource_storage_bucket_resource.Retry(1*resource_storage_bucket_time.Minute, func() *resource_storage_bucket_resource.RetryError { - err := config.NewStorageClient(userAgent).Buckets.Delete(bucket).Do() - if err == nil { - return nil - } - if gerr, ok := err.(*resource_storage_bucket_googleapi.Error); ok && gerr.Code == 429 { - return resource_storage_bucket_resource.RetryableError(gerr) - } - return resource_storage_bucket_resource.NonRetryableError(err) - }) - if gerr, ok := err.(*resource_storage_bucket_googleapi.Error); ok && gerr.Code == 409 && resource_storage_bucket_strings.Contains(gerr.Message, "not empty") && listError != nil { - return resource_storage_bucket_fmt.Errorf("could not delete non-empty bucket due to error when listing contents: %v", listError) - } - if gerr, ok := err.(*resource_storage_bucket_googleapi.Error); ok && gerr.Code == 409 && resource_storage_bucket_strings.Contains(gerr.Message, "not empty") && deleteObjectError != nil { - return resource_storage_bucket_fmt.Errorf("could not delete non-empty bucket due to error when deleting contents: %v", deleteObjectError) - } - if err != nil { - resource_storage_bucket_log.Printf("Error deleting bucket %s: %v", bucket, err) - return err - } - resource_storage_bucket_log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket) - - return nil -} - -func resourceStorageBucketStateImporter(d *resource_storage_bucket_schema.ResourceData, meta interface{}) ([]*resource_storage_bucket_schema.ResourceData, error) { - - parts := resource_storage_bucket_strings.Split(d.Id(), "/") - if len(parts) == 1 { - if err := d.Set("name", parts[0]); err != nil { - return nil, resource_storage_bucket_fmt.Errorf("Error setting name: %s", err) - } - } else if len(parts) > 1 { - if err := d.Set("project", parts[0]); err != nil { - return nil, resource_storage_bucket_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("name", parts[1]); err != nil { - return nil, resource_storage_bucket_fmt.Errorf("Error setting name: %s", err) - } - } - - if err := d.Set("force_destroy", false); err != nil { - return nil, resource_storage_bucket_fmt.Errorf("Error setting force_destroy: %s", err) - } - return []*resource_storage_bucket_schema.ResourceData{d}, nil -} - -func expandCors(configured []interface{}) []*resource_storage_bucket_storage.BucketCors { - if len(configured) == 0 { - return nil - } - corsRules := make([]*resource_storage_bucket_storage.BucketCors, 0, len(configured)) - for _, raw := range configured { - data := raw.(map[string]interface{}) - corsRule := resource_storage_bucket_storage.BucketCors{ - Origin: convertStringArr(data["origin"].([]interface{})), - Method: convertStringArr(data["method"].([]interface{})), - ResponseHeader: convertStringArr(data["response_header"].([]interface{})), - MaxAgeSeconds: int64(data["max_age_seconds"].(int)), - } - - corsRules = append(corsRules, &corsRule) - } - return corsRules -} - -func flattenCors(corsRules []*resource_storage_bucket_storage.BucketCors) []map[string]interface{} { - corsRulesSchema := make([]map[string]interface{}, 0, len(corsRules)) - for _, corsRule := range corsRules { - data := map[string]interface{}{ - "origin": corsRule.Origin, - "method": corsRule.Method, - "response_header": corsRule.ResponseHeader, - "max_age_seconds": corsRule.MaxAgeSeconds, - } - - corsRulesSchema = append(corsRulesSchema, data) - } - return corsRulesSchema -} - -func expandBucketEncryption(configured interface{}) *resource_storage_bucket_storage.BucketEncryption { - encs := configured.([]interface{}) - if len(encs) == 0 || encs[0] == nil { - return nil - } - enc := encs[0].(map[string]interface{}) - keyname := enc["default_kms_key_name"] - if keyname == nil || keyname.(string) == "" { - return nil - } - bucketenc := &resource_storage_bucket_storage.BucketEncryption{ - DefaultKmsKeyName: keyname.(string), - } - return bucketenc -} - -func flattenBucketEncryption(enc *resource_storage_bucket_storage.BucketEncryption) []map[string]interface{} { - encryption := make([]map[string]interface{}, 0, 1) - - if enc == nil { - return encryption - } - - encryption = append(encryption, map[string]interface{}{ - "default_kms_key_name": enc.DefaultKmsKeyName, - }) - - return encryption -} - -func expandBucketLogging(configured interface{}) *resource_storage_bucket_storage.BucketLogging { - loggings := configured.([]interface{}) - if len(loggings) == 0 { - return nil - } - - logging := loggings[0].(map[string]interface{}) - - bucketLogging := &resource_storage_bucket_storage.BucketLogging{ - LogBucket: logging["log_bucket"].(string), - LogObjectPrefix: logging["log_object_prefix"].(string), - } - - return bucketLogging -} - -func flattenBucketLogging(bucketLogging *resource_storage_bucket_storage.BucketLogging) []map[string]interface{} { - loggings := make([]map[string]interface{}, 0, 1) - - if bucketLogging == nil { - return loggings - } - - logging := map[string]interface{}{ - "log_bucket": bucketLogging.LogBucket, - "log_object_prefix": bucketLogging.LogObjectPrefix, - } - - loggings = append(loggings, logging) - return loggings -} - -func expandBucketRetentionPolicy(configured interface{}) *resource_storage_bucket_storage.BucketRetentionPolicy { - retentionPolicies := configured.([]interface{}) - if len(retentionPolicies) == 0 { - return nil - } - retentionPolicy := retentionPolicies[0].(map[string]interface{}) - - bucketRetentionPolicy := &resource_storage_bucket_storage.BucketRetentionPolicy{ - IsLocked: retentionPolicy["is_locked"].(bool), - RetentionPeriod: int64(retentionPolicy["retention_period"].(int)), - } - - return bucketRetentionPolicy -} - -func flattenBucketRetentionPolicy(bucketRetentionPolicy *resource_storage_bucket_storage.BucketRetentionPolicy) []map[string]interface{} { - bucketRetentionPolicies := make([]map[string]interface{}, 0, 1) - - if bucketRetentionPolicy == nil { - return bucketRetentionPolicies - } - - retentionPolicy := map[string]interface{}{ - "is_locked": bucketRetentionPolicy.IsLocked, - "retention_period": bucketRetentionPolicy.RetentionPeriod, - } - - bucketRetentionPolicies = append(bucketRetentionPolicies, retentionPolicy) - return bucketRetentionPolicies -} - -func expandBucketVersioning(configured interface{}) *resource_storage_bucket_storage.BucketVersioning { - versionings := configured.([]interface{}) - if len(versionings) == 0 { - return nil - } - - versioning := versionings[0].(map[string]interface{}) - - bucketVersioning := &resource_storage_bucket_storage.BucketVersioning{} - - bucketVersioning.Enabled = versioning["enabled"].(bool) - bucketVersioning.ForceSendFields = append(bucketVersioning.ForceSendFields, "Enabled") - - return bucketVersioning -} - -func flattenBucketVersioning(bucketVersioning *resource_storage_bucket_storage.BucketVersioning) []map[string]interface{} { - versionings := make([]map[string]interface{}, 0, 1) - - if bucketVersioning == nil { - return versionings - } - - versioning := map[string]interface{}{ - "enabled": bucketVersioning.Enabled, - } - versionings = append(versionings, versioning) - return versionings -} - -func flattenBucketLifecycle(lifecycle *resource_storage_bucket_storage.BucketLifecycle) []map[string]interface{} { - if lifecycle == nil || lifecycle.Rule == nil { - return []map[string]interface{}{} - } - - rules := make([]map[string]interface{}, 0, len(lifecycle.Rule)) - - for _, rule := range lifecycle.Rule { - rules = append(rules, map[string]interface{}{ - "action": resource_storage_bucket_schema.NewSet(resourceGCSBucketLifecycleRuleActionHash, []interface{}{flattenBucketLifecycleRuleAction(rule.Action)}), - "condition": resource_storage_bucket_schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(rule.Condition)}), - }) - } - - return rules -} - -func flattenBucketLifecycleRuleAction(action *resource_storage_bucket_storage.BucketLifecycleRuleAction) map[string]interface{} { - return map[string]interface{}{ - "type": action.Type, - "storage_class": action.StorageClass, - } -} - -func flattenBucketLifecycleRuleCondition(condition *resource_storage_bucket_storage.BucketLifecycleRuleCondition) map[string]interface{} { - ruleCondition := map[string]interface{}{ - "age": int(condition.Age), - "created_before": condition.CreatedBefore, - "matches_storage_class": convertStringArrToInterface(condition.MatchesStorageClass), - "num_newer_versions": int(condition.NumNewerVersions), - "custom_time_before": condition.CustomTimeBefore, - "days_since_custom_time": int(condition.DaysSinceCustomTime), - "days_since_noncurrent_time": int(condition.DaysSinceNoncurrentTime), - "noncurrent_time_before": condition.NoncurrentTimeBefore, - } - if condition.IsLive == nil { - ruleCondition["with_state"] = "ANY" - } else { - if *condition.IsLive { - ruleCondition["with_state"] = "LIVE" - } else { - ruleCondition["with_state"] = "ARCHIVED" - } - } - return ruleCondition -} - -func flattenBucketWebsite(website *resource_storage_bucket_storage.BucketWebsite) []map[string]interface{} { - if website == nil { - return nil - } - websites := make([]map[string]interface{}, 0, 1) - websites = append(websites, map[string]interface{}{ - "main_page_suffix": website.MainPageSuffix, - "not_found_page": website.NotFoundPage, - }) - - return websites -} - -func expandBucketWebsite(v interface{}) *resource_storage_bucket_storage.BucketWebsite { - if v == nil { - return nil - } - vs := v.([]interface{}) - - if len(vs) < 1 || vs[0] == nil { - return nil - } - - website := vs[0].(map[string]interface{}) - w := &resource_storage_bucket_storage.BucketWebsite{} - - if v := website["not_found_page"]; v != "" { - w.NotFoundPage = v.(string) - } - - if v := website["main_page_suffix"]; v != "" { - w.MainPageSuffix = v.(string) - } - return w -} - -func expandIamConfiguration(d *resource_storage_bucket_schema.ResourceData) *resource_storage_bucket_storage.BucketIamConfiguration { - return &resource_storage_bucket_storage.BucketIamConfiguration{ - ForceSendFields: []string{"UniformBucketLevelAccess"}, - UniformBucketLevelAccess: &resource_storage_bucket_storage.BucketIamConfigurationUniformBucketLevelAccess{ - Enabled: d.Get("uniform_bucket_level_access").(bool), - ForceSendFields: []string{"Enabled"}, - }, - } -} - -func expandStorageBucketLifecycle(v interface{}) (*resource_storage_bucket_storage.BucketLifecycle, error) { - if v == nil { - return &resource_storage_bucket_storage.BucketLifecycle{ - ForceSendFields: []string{"Rule"}, - }, nil - } - lifecycleRules := v.([]interface{}) - transformedRules := make([]*resource_storage_bucket_storage.BucketLifecycleRule, 0, len(lifecycleRules)) - - for _, v := range lifecycleRules { - rule, err := expandStorageBucketLifecycleRule(v) - if err != nil { - return nil, err - } - transformedRules = append(transformedRules, rule) - } - - if len(transformedRules) == 0 { - return &resource_storage_bucket_storage.BucketLifecycle{ - ForceSendFields: []string{"Rule"}, - }, nil - } - - return &resource_storage_bucket_storage.BucketLifecycle{ - Rule: transformedRules, - }, nil -} - -func expandStorageBucketLifecycleRule(v interface{}) (*resource_storage_bucket_storage.BucketLifecycleRule, error) { - if v == nil { - return nil, nil - } - - rule := v.(map[string]interface{}) - transformed := &resource_storage_bucket_storage.BucketLifecycleRule{} - - if v, ok := rule["action"]; ok { - action, err := expandStorageBucketLifecycleRuleAction(v) - if err != nil { - return nil, err - } - transformed.Action = action - } else { - return nil, resource_storage_bucket_fmt.Errorf("exactly one action is required for lifecycle_rule") - } - - if v, ok := rule["condition"]; ok { - cond, err := expandStorageBucketLifecycleRuleCondition(v) - if err != nil { - return nil, err - } - transformed.Condition = cond - } - - return transformed, nil -} - -func expandStorageBucketLifecycleRuleAction(v interface{}) (*resource_storage_bucket_storage.BucketLifecycleRuleAction, error) { - if v == nil { - return nil, resource_storage_bucket_fmt.Errorf("exactly one action is required for lifecycle_rule") - } - - actions := v.(*resource_storage_bucket_schema.Set).List() - if len(actions) != 1 { - return nil, resource_storage_bucket_fmt.Errorf("exactly one action is required for lifecycle_rule") - } - - action := actions[0].(map[string]interface{}) - transformed := &resource_storage_bucket_storage.BucketLifecycleRuleAction{} - - if v, ok := action["type"]; ok { - transformed.Type = v.(string) - } - - if v, ok := action["storage_class"]; ok { - transformed.StorageClass = v.(string) - } - - return transformed, nil -} - -func expandStorageBucketLifecycleRuleCondition(v interface{}) (*resource_storage_bucket_storage.BucketLifecycleRuleCondition, error) { - if v == nil { - return nil, nil - } - conditions := v.(*resource_storage_bucket_schema.Set).List() - if len(conditions) != 1 { - return nil, resource_storage_bucket_fmt.Errorf("One and only one condition can be provided per lifecycle_rule") - } - - condition := conditions[0].(map[string]interface{}) - transformed := &resource_storage_bucket_storage.BucketLifecycleRuleCondition{} - - if v, ok := condition["age"]; ok { - transformed.Age = int64(v.(int)) - } - - if v, ok := condition["created_before"]; ok { - transformed.CreatedBefore = v.(string) - } - - withStateV, withStateOk := condition["with_state"] - - if withStateOk { - switch withStateV.(string) { - case "LIVE": - transformed.IsLive = resource_storage_bucket_googleapi.Bool(true) - case "ARCHIVED": - transformed.IsLive = resource_storage_bucket_googleapi.Bool(false) - case "ANY", "": - - transformed.IsLive = nil - default: - return nil, resource_storage_bucket_fmt.Errorf("unexpected value %q for condition.with_state", withStateV.(string)) - } - } - - if v, ok := condition["matches_storage_class"]; ok { - classes := v.([]interface{}) - transformedClasses := make([]string, 0, len(classes)) - - for _, v := range classes { - transformedClasses = append(transformedClasses, v.(string)) - } - transformed.MatchesStorageClass = transformedClasses - } - - if v, ok := condition["num_newer_versions"]; ok { - transformed.NumNewerVersions = int64(v.(int)) - } - - if v, ok := condition["custom_time_before"]; ok { - transformed.CustomTimeBefore = v.(string) - } - - if v, ok := condition["days_since_custom_time"]; ok { - transformed.DaysSinceCustomTime = int64(v.(int)) - } - - if v, ok := condition["days_since_noncurrent_time"]; ok { - transformed.DaysSinceNoncurrentTime = int64(v.(int)) - } - - if v, ok := condition["noncurrent_time_before"]; ok { - transformed.NoncurrentTimeBefore = v.(string) - } - - return transformed, nil -} - -func resourceGCSBucketLifecycleRuleActionHash(v interface{}) int { - if v == nil { - return 0 - } - - var buf resource_storage_bucket_bytes.Buffer - m := v.(map[string]interface{}) - - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%s-", m["type"].(string))) - - if v, ok := m["storage_class"]; ok { - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%s-", v.(string))) - } - - return hashcode(buf.String()) -} - -func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { - if v == nil { - return 0 - } - - var buf resource_storage_bucket_bytes.Buffer - m := v.(map[string]interface{}) - - if v, ok := m["age"]; ok { - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%d-", v.(int))) - } - - if v, ok := m["days_since_noncurrent_time"]; ok { - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%d-", v.(int))) - } - - if v, ok := m["created_before"]; ok { - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%s-", v.(string))) - } - - withStateV, withStateOk := m["with_state"] - if withStateOk { - switch withStateV.(string) { - case "LIVE": - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%t-", true)) - case "ARCHIVED": - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%t-", false)) - } - } - - if v, ok := m["matches_storage_class"]; ok { - matches_storage_classes := v.([]interface{}) - for _, matches_storage_class := range matches_storage_classes { - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%s-", matches_storage_class)) - } - } - - if v, ok := m["num_newer_versions"]; ok { - buf.WriteString(resource_storage_bucket_fmt.Sprintf("%d-", v.(int))) - } - - return hashcode(buf.String()) -} - -func lockRetentionPolicy(bucketsService *resource_storage_bucket_storage.BucketsService, bucketName string, metageneration int64) error { - lockPolicyCall := bucketsService.LockRetentionPolicy(bucketName, metageneration) - if _, err := lockPolicyCall.Do(); err != nil { - return err - } - - return nil -} - -func detectLifecycleChange(d *resource_storage_bucket_schema.ResourceData) bool { - if d.HasChange("lifecycle_rule.#") { - return true - } - - if l, ok := d.GetOk("lifecycle_rule"); ok { - lifecycleRules := l.([]interface{}) - for i := range lifecycleRules { - if d.HasChange(resource_storage_bucket_fmt.Sprintf("lifecycle_rule.%d.action", i)) || d.HasChange(resource_storage_bucket_fmt.Sprintf("lifecycle_rule.%d.condition", i)) { - return true - } - } - } - - return false -} - -func resourceStorageBucketAccessControl() *resource_storage_bucket_access_control_schema.Resource { - return &resource_storage_bucket_access_control_schema.Resource{ - Create: resourceStorageBucketAccessControlCreate, - Read: resourceStorageBucketAccessControlRead, - Update: resourceStorageBucketAccessControlUpdate, - Delete: resourceStorageBucketAccessControlDelete, - - Importer: &resource_storage_bucket_access_control_schema.ResourceImporter{ - State: resourceStorageBucketAccessControlImport, - }, - - Timeouts: &resource_storage_bucket_access_control_schema.ResourceTimeout{ - Create: resource_storage_bucket_access_control_schema.DefaultTimeout(4 * resource_storage_bucket_access_control_time.Minute), - Update: resource_storage_bucket_access_control_schema.DefaultTimeout(4 * resource_storage_bucket_access_control_time.Minute), - Delete: resource_storage_bucket_access_control_schema.DefaultTimeout(4 * resource_storage_bucket_access_control_time.Minute), - }, - - Schema: map[string]*resource_storage_bucket_access_control_schema.Schema{ - "bucket": { - Type: resource_storage_bucket_access_control_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the bucket.`, - }, - "entity": { - Type: resource_storage_bucket_access_control_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The entity holding the permission, in one of the following forms: - user-userId - user-email - group-groupId - group-email - domain-domain - project-team-projectId - allUsers - allAuthenticatedUsers -Examples: - The user liz@example.com would be user-liz@example.com. - The group example@googlegroups.com would be - group-example@googlegroups.com. - To refer to all members of the Google Apps for Business domain - example.com, the entity would be domain-example.com.`, - }, - "role": { - Type: resource_storage_bucket_access_control_schema.TypeString, - Optional: true, - ValidateFunc: resource_storage_bucket_access_control_validation.StringInSlice([]string{"OWNER", "READER", "WRITER", ""}, false), - Description: `The access permission for the entity. Possible values: ["OWNER", "READER", "WRITER"]`, - }, - "domain": { - Type: resource_storage_bucket_access_control_schema.TypeString, - Computed: true, - Description: `The domain associated with the entity.`, - }, - "email": { - Type: resource_storage_bucket_access_control_schema.TypeString, - Computed: true, - Description: `The email address associated with the entity.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageBucketAccessControlCreate(d *resource_storage_bucket_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageBucketAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(resource_storage_bucket_access_control_reflect.ValueOf(bucketProp)) && (ok || !resource_storage_bucket_access_control_reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageBucketAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(resource_storage_bucket_access_control_reflect.ValueOf(entityProp)) && (ok || !resource_storage_bucket_access_control_reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - roleProp, err := expandStorageBucketAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_storage_bucket_access_control_reflect.ValueOf(roleProp)) && (ok || !resource_storage_bucket_access_control_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl") - if err != nil { - return err - } - - resource_storage_bucket_access_control_log.Printf("[DEBUG] Creating new BucketAccessControl: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_storage_bucket_access_control_schema.TimeoutCreate)) - if err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error creating BucketAccessControl: %s", err) - } - - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_storage_bucket_access_control_log.Printf("[DEBUG] Finished creating BucketAccessControl %q: %#v", d.Id(), res) - - return resourceStorageBucketAccessControlRead(d, meta) -} - -func resourceStorageBucketAccessControlRead(d *resource_storage_bucket_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_storage_bucket_access_control_fmt.Sprintf("StorageBucketAccessControl %q", d.Id())) - } - - if err := d.Set("bucket", flattenStorageBucketAccessControlBucket(res["bucket"], d, config)); err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("domain", flattenStorageBucketAccessControlDomain(res["domain"], d, config)); err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("email", flattenStorageBucketAccessControlEmail(res["email"], d, config)); err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("entity", flattenStorageBucketAccessControlEntity(res["entity"], d, config)); err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - if err := d.Set("role", flattenStorageBucketAccessControlRole(res["role"], d, config)); err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error reading BucketAccessControl: %s", err) - } - - return nil -} - -func resourceStorageBucketAccessControlUpdate(d *resource_storage_bucket_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageBucketAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(resource_storage_bucket_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_bucket_access_control_reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageBucketAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(resource_storage_bucket_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_bucket_access_control_reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - roleProp, err := expandStorageBucketAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_storage_bucket_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_bucket_access_control_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") - if err != nil { - return err - } - - resource_storage_bucket_access_control_log.Printf("[DEBUG] Updating BucketAccessControl %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_storage_bucket_access_control_schema.TimeoutUpdate)) - - if err != nil { - return resource_storage_bucket_access_control_fmt.Errorf("Error updating BucketAccessControl %q: %s", d.Id(), err) - } else { - resource_storage_bucket_access_control_log.Printf("[DEBUG] Finished updating BucketAccessControl %q: %#v", d.Id(), res) - } - - return resourceStorageBucketAccessControlRead(d, meta) -} - -func resourceStorageBucketAccessControlDelete(d *resource_storage_bucket_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/acl/{{entity}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_storage_bucket_access_control_log.Printf("[DEBUG] Deleting BucketAccessControl %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_storage_bucket_access_control_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "BucketAccessControl") - } - - resource_storage_bucket_access_control_log.Printf("[DEBUG] Finished deleting BucketAccessControl %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageBucketAccessControlImport(d *resource_storage_bucket_access_control_schema.ResourceData, meta interface{}) ([]*resource_storage_bucket_access_control_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return nil, resource_storage_bucket_access_control_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_storage_bucket_access_control_schema.ResourceData{d}, nil -} - -func flattenStorageBucketAccessControlBucket(v interface{}, d *resource_storage_bucket_access_control_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenStorageBucketAccessControlDomain(v interface{}, d *resource_storage_bucket_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageBucketAccessControlEmail(v interface{}, d *resource_storage_bucket_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageBucketAccessControlEntity(v interface{}, d *resource_storage_bucket_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageBucketAccessControlRole(v interface{}, d *resource_storage_bucket_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageBucketAccessControlBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageBucketAccessControlEntity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageBucketAccessControlRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceStorageBucketAcl() *resource_storage_bucket_acl_schema.Resource { - return &resource_storage_bucket_acl_schema.Resource{ - Create: resourceStorageBucketAclCreate, - Read: resourceStorageBucketAclRead, - Update: resourceStorageBucketAclUpdate, - Delete: resourceStorageBucketAclDelete, - CustomizeDiff: resourceStorageRoleEntityCustomizeDiff, - - Schema: map[string]*resource_storage_bucket_acl_schema.Schema{ - "bucket": { - Type: resource_storage_bucket_acl_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the bucket it applies to.`, - }, - - "default_acl": { - Type: resource_storage_bucket_acl_schema.TypeString, - Optional: true, - Description: `Configure this ACL to be the default ACL.`, - }, - - "predefined_acl": { - Type: resource_storage_bucket_acl_schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"role_entity"}, - Description: `The canned GCS ACL to apply. Must be set if role_entity is not.`, - }, - - "role_entity": { - Type: resource_storage_bucket_acl_schema.TypeList, - Optional: true, - Computed: true, - Elem: &resource_storage_bucket_acl_schema.Schema{Type: resource_storage_bucket_acl_schema.TypeString}, - ConflictsWith: []string{"predefined_acl"}, - Description: `List of role/entity pairs in the form ROLE:entity. See GCS Bucket ACL documentation for more details. Must be set if predefined_acl is not.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageRoleEntityCustomizeDiff(_ resource_storage_bucket_acl_context.Context, diff *resource_storage_bucket_acl_schema.ResourceDiff, meta interface{}) error { - keys := diff.GetChangedKeysPrefix("role_entity") - if len(keys) < 1 { - return nil - } - count := diff.Get("role_entity.#").(int) - if count < 1 { - return nil - } - state := map[string]struct{}{} - conf := map[string]struct{}{} - for i := 0; i < count; i++ { - old, new := diff.GetChange(resource_storage_bucket_acl_fmt.Sprintf("role_entity.%d", i)) - state[old.(string)] = struct{}{} - conf[new.(string)] = struct{}{} - } - if len(state) != len(conf) { - return nil - } - for k := range state { - if _, ok := conf[k]; !ok { - - if resource_storage_bucket_acl_strings.Contains(k, "OWNER:project-owners-") { - continue - } - return nil - } - } - return diff.Clear("role_entity") -} - -type RoleEntity struct { - Role string - Entity string -} - -func getBucketAclId(bucket string) string { - return bucket + "-acl" -} - -func getRoleEntityPair(role_entity string) (*RoleEntity, error) { - split := resource_storage_bucket_acl_strings.Split(role_entity, ":") - if len(split) != 2 { - return nil, resource_storage_bucket_acl_fmt.Errorf("Error, each role entity pair must be " + - "formatted as ROLE:entity") - } - - return &RoleEntity{Role: split[0], Entity: split[1]}, nil -} - -func resourceStorageBucketAclCreate(d *resource_storage_bucket_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - predefined_acl := "" - default_acl := "" - role_entity := make([]interface{}, 0) - - if v, ok := d.GetOk("predefined_acl"); ok { - predefined_acl = v.(string) - } - - if v, ok := d.GetOk("role_entity"); ok { - role_entity = v.([]interface{}) - } - - if v, ok := d.GetOk("default_acl"); ok { - default_acl = v.(string) - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - if len(predefined_acl) > 0 { - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, - res).PredefinedAcl(predefined_acl).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - } - - if len(role_entity) > 0 { - current, err := config.NewStorageClient(userAgent).BucketAccessControls.List(bucket).Do() - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error retrieving current ACLs: %s", err) - } - for _, v := range role_entity { - pair, err := getRoleEntityPair(v.(string)) - if err != nil { - return err - } - var alreadyInserted bool - for _, cur := range current.Items { - if cur.Entity == pair.Entity && cur.Role == pair.Role { - alreadyInserted = true - break - } - } - if alreadyInserted { - resource_storage_bucket_acl_log.Printf("[DEBUG]: pair %s-%s already exists, not trying to insert again\n", pair.Role, pair.Entity) - continue - } - bucketAccessControl := &resource_storage_bucket_acl_storage.BucketAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - resource_storage_bucket_acl_log.Printf("[DEBUG]: storing re %s-%s", pair.Role, pair.Entity) - - _, err = config.NewStorageClient(userAgent).BucketAccessControls.Insert(bucket, bucketAccessControl).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - } - - if len(default_acl) > 0 { - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, - res).PredefinedDefaultObjectAcl(default_acl).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - } - - d.SetId(getBucketAclId(bucket)) - return resourceStorageBucketAclRead(d, meta) -} - -func resourceStorageBucketAclRead(d *resource_storage_bucket_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - if _, ok := d.GetOk("role_entity"); ok { - res, err := config.NewStorageClient(userAgent).BucketAccessControls.List(bucket).Do() - - if err != nil { - return handleNotFoundError(err, d, resource_storage_bucket_acl_fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) - } - entities := make([]string, 0, len(res.Items)) - for _, item := range res.Items { - entities = append(entities, item.Role+":"+item.Entity) - } - - if err := d.Set("role_entity", entities); err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error setting role_entity: %s", err) - } - } else { - - if err := d.Set("role_entity", nil); err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error setting role_entity: %s", err) - } - } - - return nil -} - -func resourceStorageBucketAclUpdate(d *resource_storage_bucket_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - if d.HasChange("role_entity") { - bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error reading bucket %q: %v", bucket, err) - } - - project := resource_storage_bucket_acl_strconv.FormatUint(bkt.ProjectNumber, 10) - o, n := d.GetChange("role_entity") - old_re, new_re := o.([]interface{}), n.([]interface{}) - - old_re_map := make(map[string]string) - for _, v := range old_re { - res, err := getRoleEntityPair(v.(string)) - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf( - "Old state has malformed Role/Entity pair: %v", err) - } - - old_re_map[res.Entity] = res.Role - } - - for _, v := range new_re { - pair, err := getRoleEntityPair(v.(string)) - - bucketAccessControl := &resource_storage_bucket_acl_storage.BucketAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - } - - if old_re_map[pair.Entity] != bucketAccessControl.Role { - _, err = config.NewStorageClient(userAgent).BucketAccessControls.Insert( - bucket, bucketAccessControl).Do() - } - - delete(old_re_map, pair.Entity) - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - for entity, role := range old_re_map { - if entity == resource_storage_bucket_acl_fmt.Sprintf("project-owners-%s", project) && role == "OWNER" { - resource_storage_bucket_acl_log.Printf("[WARN]: Skipping %s-%s; not deleting owner ACL.", role, entity) - continue - } - resource_storage_bucket_acl_log.Printf("[DEBUG]: removing entity %s", entity) - err := config.NewStorageClient(userAgent).BucketAccessControls.Delete(bucket, entity).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) - } - } - - return resourceStorageBucketAclRead(d, meta) - } - - if d.HasChange("default_acl") { - default_acl := d.Get("default_acl").(string) - - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, - res).PredefinedDefaultObjectAcl(default_acl).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error updating bucket %s: %v", bucket, err) - } - - return resourceStorageBucketAclRead(d, meta) - } - - return nil -} - -func resourceStorageBucketAclDelete(d *resource_storage_bucket_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - bkt, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error retrieving bucket %q: %v", bucket, err) - } - project := resource_storage_bucket_acl_strconv.FormatUint(bkt.ProjectNumber, 10) - - re_local := d.Get("role_entity").([]interface{}) - for _, v := range re_local { - res, err := getRoleEntityPair(v.(string)) - if err != nil { - return err - } - - if res.Entity == resource_storage_bucket_acl_fmt.Sprintf("project-owners-%s", project) && res.Role == "OWNER" { - resource_storage_bucket_acl_log.Printf("[WARN]: Skipping %s-%s; not deleting owner ACL.", res.Role, res.Entity) - continue - } - - resource_storage_bucket_acl_log.Printf("[DEBUG]: removing entity %s", res.Entity) - - err = config.NewStorageClient(userAgent).BucketAccessControls.Delete(bucket, res.Entity).Do() - - if err != nil { - return resource_storage_bucket_acl_fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) - } - } - - return nil -} - -func resourceStorageBucketObject() *resource_storage_bucket_object_schema.Resource { - return &resource_storage_bucket_object_schema.Resource{ - Create: resourceStorageBucketObjectCreate, - Read: resourceStorageBucketObjectRead, - Update: resourceStorageBucketObjectUpdate, - Delete: resourceStorageBucketObjectDelete, - - Timeouts: &resource_storage_bucket_object_schema.ResourceTimeout{ - Create: resource_storage_bucket_object_schema.DefaultTimeout(4 * resource_storage_bucket_object_time.Minute), - Update: resource_storage_bucket_object_schema.DefaultTimeout(4 * resource_storage_bucket_object_time.Minute), - Delete: resource_storage_bucket_object_schema.DefaultTimeout(4 * resource_storage_bucket_object_time.Minute), - }, - - Schema: map[string]*resource_storage_bucket_object_schema.Schema{ - "bucket": { - Type: resource_storage_bucket_object_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the containing bucket.`, - }, - - "name": { - Type: resource_storage_bucket_object_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the object. If you're interpolating the name of this object, see output_name instead.`, - }, - - "cache_control": { - Type: resource_storage_bucket_object_schema.TypeString, - ForceNew: true, - Optional: true, - Description: `Cache-Control directive to specify caching behavior of object data. If omitted and object is accessible to all anonymous users, the default will be public, max-age=3600`, - }, - - "content_disposition": { - Type: resource_storage_bucket_object_schema.TypeString, - ForceNew: true, - Optional: true, - Description: `Content-Disposition of the object data.`, - }, - - "content_encoding": { - Type: resource_storage_bucket_object_schema.TypeString, - ForceNew: true, - Optional: true, - Description: `Content-Encoding of the object data.`, - }, - - "content_language": { - Type: resource_storage_bucket_object_schema.TypeString, - ForceNew: true, - Optional: true, - Description: `Content-Language of the object data.`, - }, - - "content_type": { - Type: resource_storage_bucket_object_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `Content-Type of the object data. Defaults to "application/octet-stream" or "text/plain; charset=utf-8".`, - }, - - "content": { - Type: resource_storage_bucket_object_schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"source"}, - Sensitive: true, - Description: `Data as string to be uploaded. Must be defined if source is not. Note: The content field is marked as sensitive. To view the raw contents of the object, please define an output.`, - }, - - "crc32c": { - Type: resource_storage_bucket_object_schema.TypeString, - Computed: true, - Description: `Base 64 CRC32 hash of the uploaded data.`, - }, - - "md5hash": { - Type: resource_storage_bucket_object_schema.TypeString, - Computed: true, - Description: `Base 64 MD5 hash of the uploaded data.`, - }, - - "source": { - Type: resource_storage_bucket_object_schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"content"}, - Description: `A path to the data you want to upload. Must be defined if content is not.`, - }, - - "detect_md5hash": { - Type: resource_storage_bucket_object_schema.TypeString, - - Optional: true, - ForceNew: true, - - Default: "different hash", - - DiffSuppressFunc: func(k, old, new string, d *resource_storage_bucket_object_schema.ResourceData) bool { - localMd5Hash := "" - if source, ok := d.GetOkExists("source"); ok { - localMd5Hash = getFileMd5Hash(source.(string)) - } - - if content, ok := d.GetOkExists("content"); ok { - localMd5Hash = getContentMd5Hash([]byte(content.(string))) - } - - if localMd5Hash == "" { - return false - } - - if old != localMd5Hash { - return false - } - - return true - }, - }, - - "storage_class": { - Type: resource_storage_bucket_object_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - Description: `The StorageClass of the new bucket object. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE. If not provided, this defaults to the bucket's default storage class or to a standard class.`, - }, - - "kms_key_name": { - Type: resource_storage_bucket_object_schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - ConflictsWith: []string{"customer_encryption"}, - DiffSuppressFunc: compareCryptoKeyVersions, - Description: `Resource name of the Cloud KMS key that will be used to encrypt the object. Overrides the object metadata's kmsKeyName value, if any.`, - }, - - "customer_encryption": { - Type: resource_storage_bucket_object_schema.TypeList, - MaxItems: 1, - Optional: true, - Sensitive: true, - ConflictsWith: []string{"kms_key_name"}, - Description: `Encryption key; encoded using base64.`, - Elem: &resource_storage_bucket_object_schema.Resource{ - Schema: map[string]*resource_storage_bucket_object_schema.Schema{ - "encryption_algorithm": { - Type: resource_storage_bucket_object_schema.TypeString, - Optional: true, - Default: "AES256", - ForceNew: true, - Description: `The encryption algorithm. Default: AES256`, - }, - "encryption_key": { - Type: resource_storage_bucket_object_schema.TypeString, - Required: true, - ForceNew: true, - Sensitive: true, - Description: `Base64 encoded customer supplied encryption key.`, - ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { - _, err := resource_storage_bucket_object_base64.StdEncoding.DecodeString(val.(string)) - if err != nil { - errs = append(errs, resource_storage_bucket_object_fmt.Errorf("Failed to decode (base64) customer_encryption, expecting valid base64 encoded key")) - } - return - }, - }, - }, - }, - }, - - "event_based_hold": { - Type: resource_storage_bucket_object_schema.TypeBool, - Optional: true, - Description: `Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any).`, - }, - - "temporary_hold": { - Type: resource_storage_bucket_object_schema.TypeBool, - Optional: true, - Description: `Whether an object is under temporary hold. While this flag is set to true, the object is protected against deletion and overwrites.`, - }, - - "metadata": { - Type: resource_storage_bucket_object_schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &resource_storage_bucket_object_schema.Schema{Type: resource_storage_bucket_object_schema.TypeString}, - Description: `User-provided metadata, in key/value pairs.`, - }, - - "self_link": { - Type: resource_storage_bucket_object_schema.TypeString, - Computed: true, - Description: `A url reference to this object.`, - }, - - "output_name": { - Type: resource_storage_bucket_object_schema.TypeString, - Computed: true, - Description: `The name of the object. Use this field in interpolations with google_storage_object_acl to recreate google_storage_object_acl resources when your google_storage_bucket_object is recreated.`, - }, - - "media_link": { - Type: resource_storage_bucket_object_schema.TypeString, - Computed: true, - Description: `A url reference to download this object.`, - }, - }, - UseJSONNumber: true, - } -} - -func objectGetID(object *resource_storage_bucket_object_storage.Object) string { - return object.Bucket + "-" + object.Name -} - -func compareCryptoKeyVersions(_, old, new string, _ *resource_storage_bucket_object_schema.ResourceData) bool { - - kmsKeyWithoutVersions := resource_storage_bucket_object_strings.Split(old, "/cryptoKeyVersions")[0] - if kmsKeyWithoutVersions == new { - return true - } - - return false -} - -func resourceStorageBucketObjectCreate(d *resource_storage_bucket_object_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - var media resource_storage_bucket_object_io.Reader - - if v, ok := d.GetOk("source"); ok { - var err error - media, err = resource_storage_bucket_object_os.Open(v.(string)) - if err != nil { - return err - } - } else if v, ok := d.GetOk("content"); ok { - media = resource_storage_bucket_object_bytes.NewReader([]byte(v.(string))) - } else { - return resource_storage_bucket_object_fmt.Errorf("Error, either \"content\" or \"source\" must be specified") - } - - objectsService := resource_storage_bucket_object_storage.NewObjectsService(config.NewStorageClientWithTimeoutOverride(userAgent, d.Timeout(resource_storage_bucket_object_schema.TimeoutCreate))) - object := &resource_storage_bucket_object_storage.Object{Bucket: bucket} - - if v, ok := d.GetOk("cache_control"); ok { - object.CacheControl = v.(string) - } - - if v, ok := d.GetOk("content_disposition"); ok { - object.ContentDisposition = v.(string) - } - - if v, ok := d.GetOk("content_encoding"); ok { - object.ContentEncoding = v.(string) - } - - if v, ok := d.GetOk("content_language"); ok { - object.ContentLanguage = v.(string) - } - - if v, ok := d.GetOk("content_type"); ok { - object.ContentType = v.(string) - } - - if v, ok := d.GetOk("metadata"); ok { - object.Metadata = convertStringMap(v.(map[string]interface{})) - } - - if v, ok := d.GetOk("storage_class"); ok { - object.StorageClass = v.(string) - } - - if v, ok := d.GetOk("kms_key_name"); ok { - object.KmsKeyName = v.(string) - } - - if v, ok := d.GetOk("event_based_hold"); ok { - object.EventBasedHold = v.(bool) - } - - if v, ok := d.GetOk("temporary_hold"); ok { - object.TemporaryHold = v.(bool) - } - - insertCall := objectsService.Insert(bucket, object) - insertCall.Name(name) - insertCall.Media(media) - - if v, ok := d.GetOk("customer_encryption"); ok { - customerEncryption := expandCustomerEncryption(v.([]interface{})) - setEncryptionHeaders(customerEncryption, insertCall.Header()) - } - - _, err = insertCall.Do() - - if err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error uploading object %s: %s", name, err) - } - - return resourceStorageBucketObjectRead(d, meta) -} - -func resourceStorageBucketObjectUpdate(d *resource_storage_bucket_object_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - objectsService := resource_storage_bucket_object_storage.NewObjectsService(config.NewStorageClientWithTimeoutOverride(userAgent, d.Timeout(resource_storage_bucket_object_schema.TimeoutUpdate))) - getCall := objectsService.Get(bucket, name) - - res, err := getCall.Do() - if err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error retrieving object during update %s: %s", name, err) - } - - if d.HasChange("event_based_hold") { - v := d.Get("event_based_hold") - res.EventBasedHold = v.(bool) - } - - if d.HasChange("temporary_hold") { - v := d.Get("temporary_hold") - res.TemporaryHold = v.(bool) - } - - updateCall := objectsService.Update(bucket, name, res) - _, err = updateCall.Do() - - if err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error updating object %s: %s", name, err) - } - - return nil -} - -func resourceStorageBucketObjectRead(d *resource_storage_bucket_object_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - objectsService := resource_storage_bucket_object_storage.NewObjectsService(config.NewStorageClientWithTimeoutOverride(userAgent, d.Timeout(resource_storage_bucket_object_schema.TimeoutRead))) - getCall := objectsService.Get(bucket, name) - - if v, ok := d.GetOk("customer_encryption"); ok { - customerEncryption := expandCustomerEncryption(v.([]interface{})) - setEncryptionHeaders(customerEncryption, getCall.Header()) - } - - res, err := getCall.Do() - - if err != nil { - return handleNotFoundError(err, d, resource_storage_bucket_object_fmt.Sprintf("Storage Bucket Object %q", d.Get("name").(string))) - } - - if err := d.Set("md5hash", res.Md5Hash); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting md5hash: %s", err) - } - if err := d.Set("detect_md5hash", res.Md5Hash); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting detect_md5hash: %s", err) - } - if err := d.Set("crc32c", res.Crc32c); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting crc32c: %s", err) - } - if err := d.Set("cache_control", res.CacheControl); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting cache_control: %s", err) - } - if err := d.Set("content_disposition", res.ContentDisposition); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting content_disposition: %s", err) - } - if err := d.Set("content_encoding", res.ContentEncoding); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting content_encoding: %s", err) - } - if err := d.Set("content_language", res.ContentLanguage); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting content_language: %s", err) - } - if err := d.Set("content_type", res.ContentType); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting content_type: %s", err) - } - if err := d.Set("storage_class", res.StorageClass); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting storage_class: %s", err) - } - if err := d.Set("kms_key_name", res.KmsKeyName); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting kms_key_name: %s", err) - } - if err := d.Set("self_link", res.SelfLink); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("output_name", res.Name); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting output_name: %s", err) - } - if err := d.Set("metadata", res.Metadata); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting metadata: %s", err) - } - if err := d.Set("media_link", res.MediaLink); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting media_link: %s", err) - } - if err := d.Set("event_based_hold", res.EventBasedHold); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting event_based_hold: %s", err) - } - if err := d.Set("temporary_hold", res.TemporaryHold); err != nil { - return resource_storage_bucket_object_fmt.Errorf("Error setting temporary_hold: %s", err) - } - - d.SetId(objectGetID(res)) - - return nil -} - -func resourceStorageBucketObjectDelete(d *resource_storage_bucket_object_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - name := d.Get("name").(string) - - objectsService := resource_storage_bucket_object_storage.NewObjectsService(config.NewStorageClientWithTimeoutOverride(userAgent, d.Timeout(resource_storage_bucket_object_schema.TimeoutDelete))) - - DeleteCall := objectsService.Delete(bucket, name) - err = DeleteCall.Do() - - if err != nil { - if gerr, ok := err.(*resource_storage_bucket_object_googleapi.Error); ok && gerr.Code == 404 { - resource_storage_bucket_object_log.Printf("[WARN] Removing Bucket Object %q because it's gone", name) - - d.SetId("") - - return nil - } - - return resource_storage_bucket_object_fmt.Errorf("Error deleting contents of object %s: %s", name, err) - } - - return nil -} - -func setEncryptionHeaders(customerEncryption map[string]string, headers resource_storage_bucket_object_http.Header) { - decodedKey, _ := resource_storage_bucket_object_base64.StdEncoding.DecodeString(customerEncryption["encryption_key"]) - keyHash := resource_storage_bucket_object_sha256.Sum256(decodedKey) - headers.Set("x-goog-encryption-algorithm", customerEncryption["encryption_algorithm"]) - headers.Set("x-goog-encryption-key", customerEncryption["encryption_key"]) - headers.Set("x-goog-encryption-key-sha256", resource_storage_bucket_object_base64.StdEncoding.EncodeToString(keyHash[:])) -} - -func getFileMd5Hash(filename string) string { - data, err := resource_storage_bucket_object_ioutil.ReadFile(filename) - if err != nil { - resource_storage_bucket_object_log.Printf("[WARN] Failed to read source file %q. Cannot compute md5 hash for it.", filename) - return "" - } - return getContentMd5Hash(data) -} - -func getContentMd5Hash(content []byte) string { - h := resource_storage_bucket_object_md5.New() - if _, err := h.Write(content); err != nil { - resource_storage_bucket_object_log.Printf("[WARN] Failed to compute md5 hash for content: %v", err) - } - return resource_storage_bucket_object_base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - -func expandCustomerEncryption(input []interface{}) map[string]string { - expanded := make(map[string]string) - if input == nil { - return expanded - } - for _, v := range input { - original := v.(map[string]interface{}) - expanded["encryption_key"] = original["encryption_key"].(string) - expanded["encryption_algorithm"] = original["encryption_algorithm"].(string) - } - return expanded -} - -func resourceStorageDefaultObjectAccessControl() *resource_storage_default_object_access_control_schema.Resource { - return &resource_storage_default_object_access_control_schema.Resource{ - Create: resourceStorageDefaultObjectAccessControlCreate, - Read: resourceStorageDefaultObjectAccessControlRead, - Update: resourceStorageDefaultObjectAccessControlUpdate, - Delete: resourceStorageDefaultObjectAccessControlDelete, - - Importer: &resource_storage_default_object_access_control_schema.ResourceImporter{ - State: resourceStorageDefaultObjectAccessControlImport, - }, - - Timeouts: &resource_storage_default_object_access_control_schema.ResourceTimeout{ - Create: resource_storage_default_object_access_control_schema.DefaultTimeout(4 * resource_storage_default_object_access_control_time.Minute), - Update: resource_storage_default_object_access_control_schema.DefaultTimeout(4 * resource_storage_default_object_access_control_time.Minute), - Delete: resource_storage_default_object_access_control_schema.DefaultTimeout(4 * resource_storage_default_object_access_control_time.Minute), - }, - - Schema: map[string]*resource_storage_default_object_access_control_schema.Schema{ - "bucket": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the bucket.`, - }, - "entity": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Required: true, - Description: `The entity holding the permission, in one of the following forms: - * user-{{userId}} - * user-{{email}} (such as "user-liz@example.com") - * group-{{groupId}} - * group-{{email}} (such as "group-example@googlegroups.com") - * domain-{{domain}} (such as "domain-example.com") - * project-team-{{projectId}} - * allUsers - * allAuthenticatedUsers`, - }, - "role": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Required: true, - ValidateFunc: resource_storage_default_object_access_control_validation.StringInSlice([]string{"OWNER", "READER"}, false), - Description: `The access permission for the entity. Possible values: ["OWNER", "READER"]`, - }, - "object": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Optional: true, - Description: `The name of the object, if applied to an object.`, - }, - "domain": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Computed: true, - Description: `The domain associated with the entity.`, - }, - "email": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Computed: true, - Description: `The email address associated with the entity.`, - }, - "entity_id": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Computed: true, - Description: `The ID for the entity`, - }, - "generation": { - Type: resource_storage_default_object_access_control_schema.TypeInt, - Computed: true, - Description: `The content generation of the object, if applied to an object.`, - }, - "project_team": { - Type: resource_storage_default_object_access_control_schema.TypeList, - Computed: true, - Description: `The project team associated with the entity`, - Elem: &resource_storage_default_object_access_control_schema.Resource{ - Schema: map[string]*resource_storage_default_object_access_control_schema.Schema{ - "project_number": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Optional: true, - Description: `The project team associated with the entity`, - }, - "team": { - Type: resource_storage_default_object_access_control_schema.TypeString, - Optional: true, - ValidateFunc: resource_storage_default_object_access_control_validation.StringInSlice([]string{"editors", "owners", "viewers", ""}, false), - Description: `The team. Possible values: ["editors", "owners", "viewers"]`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageDefaultObjectAccessControlCreate(d *resource_storage_default_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageDefaultObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(bucketProp)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageDefaultObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(entityProp)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageDefaultObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(objectProp)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageDefaultObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(roleProp)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl") - if err != nil { - return err - } - - resource_storage_default_object_access_control_log.Printf("[DEBUG] Creating new DefaultObjectAccessControl: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_storage_default_object_access_control_schema.TimeoutCreate)) - if err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error creating DefaultObjectAccessControl: %s", err) - } - - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_storage_default_object_access_control_log.Printf("[DEBUG] Finished creating DefaultObjectAccessControl %q: %#v", d.Id(), res) - - return resourceStorageDefaultObjectAccessControlRead(d, meta) -} - -func resourceStorageDefaultObjectAccessControlRead(d *resource_storage_default_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_storage_default_object_access_control_fmt.Sprintf("StorageDefaultObjectAccessControl %q", d.Id())) - } - - if err := d.Set("domain", flattenStorageDefaultObjectAccessControlDomain(res["domain"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("email", flattenStorageDefaultObjectAccessControlEmail(res["email"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("entity", flattenStorageDefaultObjectAccessControlEntity(res["entity"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("entity_id", flattenStorageDefaultObjectAccessControlEntityId(res["entityId"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("generation", flattenStorageDefaultObjectAccessControlGeneration(res["generation"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("object", flattenStorageDefaultObjectAccessControlObject(res["object"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("project_team", flattenStorageDefaultObjectAccessControlProjectTeam(res["projectTeam"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - if err := d.Set("role", flattenStorageDefaultObjectAccessControlRole(res["role"], d, config)); err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error reading DefaultObjectAccessControl: %s", err) - } - - return nil -} - -func resourceStorageDefaultObjectAccessControlUpdate(d *resource_storage_default_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageDefaultObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageDefaultObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageDefaultObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageDefaultObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_storage_default_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_default_object_access_control_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") - if err != nil { - return err - } - - resource_storage_default_object_access_control_log.Printf("[DEBUG] Updating DefaultObjectAccessControl %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_storage_default_object_access_control_schema.TimeoutUpdate)) - - if err != nil { - return resource_storage_default_object_access_control_fmt.Errorf("Error updating DefaultObjectAccessControl %q: %s", d.Id(), err) - } else { - resource_storage_default_object_access_control_log.Printf("[DEBUG] Finished updating DefaultObjectAccessControl %q: %#v", d.Id(), res) - } - - return resourceStorageDefaultObjectAccessControlRead(d, meta) -} - -func resourceStorageDefaultObjectAccessControlDelete(d *resource_storage_default_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/defaultObjectAcl/{{entity}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_storage_default_object_access_control_log.Printf("[DEBUG] Deleting DefaultObjectAccessControl %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_storage_default_object_access_control_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "DefaultObjectAccessControl") - } - - resource_storage_default_object_access_control_log.Printf("[DEBUG] Finished deleting DefaultObjectAccessControl %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageDefaultObjectAccessControlImport(d *resource_storage_default_object_access_control_schema.ResourceData, meta interface{}) ([]*resource_storage_default_object_access_control_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{bucket}}/{{entity}}") - if err != nil { - return nil, resource_storage_default_object_access_control_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_storage_default_object_access_control_schema.ResourceData{d}, nil -} - -func flattenStorageDefaultObjectAccessControlDomain(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlEmail(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlEntity(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlEntityId(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlGeneration(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_storage_default_object_access_control_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenStorageDefaultObjectAccessControlObject(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlProjectTeam(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_number"] = - flattenStorageDefaultObjectAccessControlProjectTeamProjectNumber(original["projectNumber"], d, config) - transformed["team"] = - flattenStorageDefaultObjectAccessControlProjectTeamTeam(original["team"], d, config) - return []interface{}{transformed} -} - -func flattenStorageDefaultObjectAccessControlProjectTeamProjectNumber(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlProjectTeamTeam(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageDefaultObjectAccessControlRole(v interface{}, d *resource_storage_default_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageDefaultObjectAccessControlBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageDefaultObjectAccessControlEntity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageDefaultObjectAccessControlObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageDefaultObjectAccessControlRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceStorageDefaultObjectAcl() *resource_storage_default_object_acl_schema.Resource { - return &resource_storage_default_object_acl_schema.Resource{ - Create: resourceStorageDefaultObjectAclCreateUpdate, - Read: resourceStorageDefaultObjectAclRead, - Update: resourceStorageDefaultObjectAclCreateUpdate, - Delete: resourceStorageDefaultObjectAclDelete, - - Schema: map[string]*resource_storage_default_object_acl_schema.Schema{ - "bucket": { - Type: resource_storage_default_object_acl_schema.TypeString, - Required: true, - ForceNew: true, - }, - - "role_entity": { - Type: resource_storage_default_object_acl_schema.TypeSet, - Optional: true, - Computed: true, - Elem: &resource_storage_default_object_acl_schema.Schema{ - Type: resource_storage_default_object_acl_schema.TypeString, - ValidateFunc: validateRoleEntityPair, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageDefaultObjectAclCreateUpdate(d *resource_storage_default_object_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - defaultObjectAcl := []*resource_storage_default_object_acl_storage.ObjectAccessControl{} - for _, v := range d.Get("role_entity").(*resource_storage_default_object_acl_schema.Set).List() { - pair := getValidatedRoleEntityPair(v.(string)) - defaultObjectAcl = append(defaultObjectAcl, &resource_storage_default_object_acl_storage.ObjectAccessControl{ - Role: pair.Role, - Entity: pair.Entity, - }) - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - if err != nil { - return resource_storage_default_object_acl_fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - if len(defaultObjectAcl) == 0 { - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, res).IfMetagenerationMatch(res.Metageneration).PredefinedDefaultObjectAcl("private").Do() - if err != nil { - return resource_storage_default_object_acl_fmt.Errorf("Error updating default object acl to empty for bucket %s: %v", bucket, err) - } - } else { - res.DefaultObjectAcl = defaultObjectAcl - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, res).IfMetagenerationMatch(res.Metageneration).Do() - if err != nil { - return resource_storage_default_object_acl_fmt.Errorf("Error updating default object acl for bucket %s: %v", bucket, err) - } - } - - return resourceStorageDefaultObjectAclRead(d, meta) -} - -func resourceStorageDefaultObjectAclRead(d *resource_storage_default_object_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Projection("full").Do() - if err != nil { - return handleNotFoundError(err, d, resource_storage_default_object_acl_fmt.Sprintf("Default Storage Object ACL for Bucket %q", d.Get("bucket").(string))) - } - - var roleEntities []string - for _, roleEntity := range res.DefaultObjectAcl { - role := roleEntity.Role - entity := roleEntity.Entity - roleEntities = append(roleEntities, resource_storage_default_object_acl_fmt.Sprintf("%s:%s", role, entity)) - } - - err = d.Set("role_entity", roleEntities) - if err != nil { - return err - } - - d.SetId(bucket) - return nil -} - -func resourceStorageDefaultObjectAclDelete(d *resource_storage_default_object_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - bucket := d.Get("bucket").(string) - res, err := config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() - if err != nil { - return resource_storage_default_object_acl_fmt.Errorf("Error reading bucket %s: %v", bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Buckets.Update(bucket, res).IfMetagenerationMatch(res.Metageneration).PredefinedDefaultObjectAcl("private").Do() - if err != nil { - return resource_storage_default_object_acl_fmt.Errorf("Error deleting (updating to private) default object acl for bucket %s: %v", bucket, err) - } - - return nil -} - -func resourceStorageHmacKey() *resource_storage_hmac_key_schema.Resource { - return &resource_storage_hmac_key_schema.Resource{ - Create: resourceStorageHmacKeyCreate, - Read: resourceStorageHmacKeyRead, - Update: resourceStorageHmacKeyUpdate, - Delete: resourceStorageHmacKeyDelete, - - Importer: &resource_storage_hmac_key_schema.ResourceImporter{ - State: resourceStorageHmacKeyImport, - }, - - Timeouts: &resource_storage_hmac_key_schema.ResourceTimeout{ - Create: resource_storage_hmac_key_schema.DefaultTimeout(4 * resource_storage_hmac_key_time.Minute), - Update: resource_storage_hmac_key_schema.DefaultTimeout(4 * resource_storage_hmac_key_time.Minute), - Delete: resource_storage_hmac_key_schema.DefaultTimeout(4 * resource_storage_hmac_key_time.Minute), - }, - - Schema: map[string]*resource_storage_hmac_key_schema.Schema{ - "service_account_email": { - Type: resource_storage_hmac_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The email address of the key's associated service account.`, - }, - "state": { - Type: resource_storage_hmac_key_schema.TypeString, - Optional: true, - ValidateFunc: resource_storage_hmac_key_validation.StringInSlice([]string{"ACTIVE", "INACTIVE", ""}, false), - Description: `The state of the key. Can be set to one of ACTIVE, INACTIVE. Default value: "ACTIVE" Possible values: ["ACTIVE", "INACTIVE"]`, - Default: "ACTIVE", - }, - "access_id": { - Type: resource_storage_hmac_key_schema.TypeString, - Computed: true, - Description: `The access ID of the HMAC Key.`, - }, - "secret": { - Type: resource_storage_hmac_key_schema.TypeString, - Computed: true, - Description: `HMAC secret key material.`, - Sensitive: true, - }, - "time_created": { - Type: resource_storage_hmac_key_schema.TypeString, - Computed: true, - Description: `'The creation time of the HMAC key in RFC 3339 format. '`, - }, - "updated": { - Type: resource_storage_hmac_key_schema.TypeString, - Computed: true, - Description: `'The last modification time of the HMAC key metadata in RFC 3339 format.'`, - }, - "project": { - Type: resource_storage_hmac_key_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageHmacKeyCreate(d *resource_storage_hmac_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - serviceAccountEmailProp, err := expandStorageHmacKeyServiceAccountEmail(d.Get("service_account_email"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account_email"); !isEmptyValue(resource_storage_hmac_key_reflect.ValueOf(serviceAccountEmailProp)) && (ok || !resource_storage_hmac_key_reflect.DeepEqual(v, serviceAccountEmailProp)) { - obj["serviceAccountEmail"] = serviceAccountEmailProp - } - stateProp, err := expandStorageHmacKeyState(d.Get("state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("state"); !isEmptyValue(resource_storage_hmac_key_reflect.ValueOf(stateProp)) && (ok || !resource_storage_hmac_key_reflect.DeepEqual(v, stateProp)) { - obj["state"] = stateProp - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys?serviceAccountEmail={{service_account_email}}") - if err != nil { - return err - } - - resource_storage_hmac_key_log.Printf("[DEBUG] Creating new HmacKey: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_storage_hmac_key_schema.TimeoutCreate)) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error creating HmacKey: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - secret, ok := res["secret"].(string) - if !ok { - return resource_storage_hmac_key_fmt.Errorf("The response to CREATE was missing an expected field. Your create did not work.") - } - - if err := d.Set("secret", secret); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error setting secret: %s", err) - } - - metadata := res["metadata"].(map[string]interface{}) - accessId, ok := metadata["accessId"].(string) - if !ok { - return resource_storage_hmac_key_fmt.Errorf("The response to CREATE was missing an expected field. Your create did not work.") - } - - if err := d.Set("access_id", accessId); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error setting access_id: %s", err) - } - - id, err = replaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error constructing id: %s", err) - } - - d.SetId(id) - - err = PollingWaitTime(resourceStorageHmacKeyPollRead(d, meta), PollCheckForExistence, "Creating HmacKey", d.Timeout(resource_storage_hmac_key_schema.TimeoutCreate), 1) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error waiting to create HmacKey: %s", err) - } - - resource_storage_hmac_key_log.Printf("[DEBUG] Finished creating HmacKey %q: %#v", d.Id(), res) - - return resourceStorageHmacKeyRead(d, meta) -} - -func resourceStorageHmacKeyPollRead(d *resource_storage_hmac_key_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return nil, err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return nil, resource_storage_hmac_key_fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = resourceStorageHmacKeyDecoder(d, meta, res) - if err != nil { - return nil, err - } - if res == nil { - - return nil, &resource_storage_hmac_key_googleapi.Error{ - Code: 404, - Message: "could not find object StorageHmacKey", - } - } - - return res, nil - } -} - -func resourceStorageHmacKeyRead(d *resource_storage_hmac_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_storage_hmac_key_fmt.Sprintf("StorageHmacKey %q", d.Id())) - } - - res, err = resourceStorageHmacKeyDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_storage_hmac_key_log.Printf("[DEBUG] Removing StorageHmacKey because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error reading HmacKey: %s", err) - } - - if err := d.Set("service_account_email", flattenStorageHmacKeyServiceAccountEmail(res["serviceAccountEmail"], d, config)); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("state", flattenStorageHmacKeyState(res["state"], d, config)); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("access_id", flattenStorageHmacKeyAccessId(res["accessId"], d, config)); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("time_created", flattenStorageHmacKeyTimeCreated(res["timeCreated"], d, config)); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error reading HmacKey: %s", err) - } - if err := d.Set("updated", flattenStorageHmacKeyUpdated(res["updated"], d, config)); err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error reading HmacKey: %s", err) - } - - return nil -} - -func resourceStorageHmacKeyUpdate(d *resource_storage_hmac_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("state") { - obj := make(map[string]interface{}) - - getUrl, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - getRes, err := sendRequest(config, "GET", billingProject, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_storage_hmac_key_fmt.Sprintf("StorageHmacKey %q", d.Id())) - } - - obj["etag"] = getRes["etag"] - - stateProp, err := expandStorageHmacKeyState(d.Get("state"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("state"); !isEmptyValue(resource_storage_hmac_key_reflect.ValueOf(v)) && (ok || !resource_storage_hmac_key_reflect.DeepEqual(v, stateProp)) { - obj["state"] = stateProp - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_storage_hmac_key_schema.TimeoutUpdate)) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error updating HmacKey %q: %s", d.Id(), err) - } else { - resource_storage_hmac_key_log.Printf("[DEBUG] Finished updating HmacKey %q: %#v", d.Id(), res) - } - - } - - d.Partial(false) - - return resourceStorageHmacKeyRead(d, meta) -} - -func resourceStorageHmacKeyDelete(d *resource_storage_hmac_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error fetching project for HmacKey: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - var obj map[string]interface{} - getUrl, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - getRes, err := sendRequest(config, "GET", project, getUrl, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_storage_hmac_key_fmt.Sprintf("StorageHmacKey %q", d.Id())) - } - - if v := getRes["state"]; v == "ACTIVE" { - getRes["state"] = "INACTIVE" - updateUrl, err := replaceVars(d, config, "{{StorageBasePath}}projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return err - } - - resource_storage_hmac_key_log.Printf("[DEBUG] Deactivating HmacKey %q: %#v", d.Id(), getRes) - _, err = sendRequestWithTimeout(config, "PUT", project, updateUrl, userAgent, getRes, d.Timeout(resource_storage_hmac_key_schema.TimeoutUpdate)) - if err != nil { - return resource_storage_hmac_key_fmt.Errorf("Error deactivating HmacKey %q: %s", d.Id(), err) - } - } - resource_storage_hmac_key_log.Printf("[DEBUG] Deleting HmacKey %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_storage_hmac_key_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "HmacKey") - } - - resource_storage_hmac_key_log.Printf("[DEBUG] Finished deleting HmacKey %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageHmacKeyImport(d *resource_storage_hmac_key_schema.ResourceData, meta interface{}) ([]*resource_storage_hmac_key_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/hmacKeys/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/hmacKeys/{{access_id}}") - if err != nil { - return nil, resource_storage_hmac_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_storage_hmac_key_schema.ResourceData{d}, nil -} - -func flattenStorageHmacKeyServiceAccountEmail(v interface{}, d *resource_storage_hmac_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyState(v interface{}, d *resource_storage_hmac_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyAccessId(v interface{}, d *resource_storage_hmac_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyTimeCreated(v interface{}, d *resource_storage_hmac_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageHmacKeyUpdated(v interface{}, d *resource_storage_hmac_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageHmacKeyServiceAccountEmail(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageHmacKeyState(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceStorageHmacKeyDecoder(d *resource_storage_hmac_key_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - if v := res["state"]; v == "DELETED" { - return nil, nil - } - - return res, nil -} - -func resourceStorageNotification() *resource_storage_notification_schema.Resource { - return &resource_storage_notification_schema.Resource{ - Create: resourceStorageNotificationCreate, - Read: resourceStorageNotificationRead, - Delete: resourceStorageNotificationDelete, - Importer: &resource_storage_notification_schema.ResourceImporter{ - State: resource_storage_notification_schema.ImportStatePassthrough, - }, - - Schema: map[string]*resource_storage_notification_schema.Schema{ - "bucket": { - Type: resource_storage_notification_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the bucket.`, - }, - - "payload_format": { - Type: resource_storage_notification_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_notification_validation.StringInSlice([]string{"JSON_API_V1", "NONE"}, false), - Description: `The desired content of the Payload. One of "JSON_API_V1" or "NONE".`, - }, - - "topic": { - Type: resource_storage_notification_schema.TypeString, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The Cloud Pub/Sub topic to which this subscription publishes. Expects either the topic name, assumed to belong to the default GCP provider project, or the project-level name, i.e. projects/my-gcp-project/topics/my-topic or my-topic. If the project is not set in the provider, you will need to use the project-level name.`, - }, - - "custom_attributes": { - Type: resource_storage_notification_schema.TypeMap, - Optional: true, - ForceNew: true, - Elem: &resource_storage_notification_schema.Schema{ - Type: resource_storage_notification_schema.TypeString, - }, - Description: ` A set of key/value attribute pairs to attach to each Cloud Pub/Sub message published for this notification subscription`, - }, - - "event_types": { - Type: resource_storage_notification_schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &resource_storage_notification_schema.Schema{ - Type: resource_storage_notification_schema.TypeString, - ValidateFunc: resource_storage_notification_validation.StringInSlice([]string{ - "OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE", "OBJECT_DELETE", "OBJECT_ARCHIVE"}, - false), - }, - Description: `List of event type filters for this notification config. If not specified, Cloud Storage will send notifications for all event types. The valid types are: "OBJECT_FINALIZE", "OBJECT_METADATA_UPDATE", "OBJECT_DELETE", "OBJECT_ARCHIVE"`, - }, - - "object_name_prefix": { - Type: resource_storage_notification_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Specifies a prefix path filter for this notification config. Cloud Storage will only send notifications for objects in this bucket whose names begin with the specified prefix.`, - }, - - "notification_id": { - Type: resource_storage_notification_schema.TypeString, - Computed: true, - Description: `The ID of the created notification.`, - }, - - "self_link": { - Type: resource_storage_notification_schema.TypeString, - Computed: true, - Description: `The URI of the created resource.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageNotificationCreate(d *resource_storage_notification_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - - topicName := d.Get("topic").(string) - computedTopicName := getComputedTopicName("", topicName) - if computedTopicName != topicName { - project, err := getProject(d, config) - if err != nil { - return err - } - computedTopicName = getComputedTopicName(project, topicName) - } - - storageNotification := &resource_storage_notification_storage.Notification{ - CustomAttributes: expandStringMap(d, "custom_attributes"), - EventTypes: convertStringSet(d.Get("event_types").(*resource_storage_notification_schema.Set)), - ObjectNamePrefix: d.Get("object_name_prefix").(string), - PayloadFormat: d.Get("payload_format").(string), - Topic: computedTopicName, - } - - res, err := config.NewStorageClient(userAgent).Notifications.Insert(bucket, storageNotification).Do() - if err != nil { - return resource_storage_notification_fmt.Errorf("Error creating notification config for bucket %s: %v", bucket, err) - } - - d.SetId(resource_storage_notification_fmt.Sprintf("%s/notificationConfigs/%s", bucket, res.Id)) - - return resourceStorageNotificationRead(d, meta) -} - -func resourceStorageNotificationRead(d *resource_storage_notification_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket, notificationID := resourceStorageNotificationParseID(d.Id()) - - res, err := config.NewStorageClient(userAgent).Notifications.Get(bucket, notificationID).Do() - if err != nil { - return handleNotFoundError(err, d, resource_storage_notification_fmt.Sprintf("Notification configuration %s for bucket %s", notificationID, bucket)) - } - - if err := d.Set("bucket", bucket); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting bucket: %s", err) - } - if err := d.Set("payload_format", res.PayloadFormat); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting payload_format: %s", err) - } - if err := d.Set("topic", res.Topic); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting topic: %s", err) - } - if err := d.Set("object_name_prefix", res.ObjectNamePrefix); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting object_name_prefix: %s", err) - } - if err := d.Set("event_types", res.EventTypes); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting event_types: %s", err) - } - if err := d.Set("notification_id", notificationID); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting notification_id: %s", err) - } - if err := d.Set("self_link", res.SelfLink); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting self_link: %s", err) - } - if err := d.Set("custom_attributes", res.CustomAttributes); err != nil { - return resource_storage_notification_fmt.Errorf("Error setting custom_attributes: %s", err) - } - - return nil -} - -func resourceStorageNotificationDelete(d *resource_storage_notification_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket, notificationID := resourceStorageNotificationParseID(d.Id()) - - err = config.NewStorageClient(userAgent).Notifications.Delete(bucket, notificationID).Do() - if err != nil { - return resource_storage_notification_fmt.Errorf("Error deleting notification configuration %s for bucket %s: %v", notificationID, bucket, err) - } - - return nil -} - -func resourceStorageNotificationParseID(id string) (string, string) { - - parts := resource_storage_notification_strings.Split(id, "/") - - return parts[0], parts[2] -} - -func resourceStorageObjectAccessControl() *resource_storage_object_access_control_schema.Resource { - return &resource_storage_object_access_control_schema.Resource{ - Create: resourceStorageObjectAccessControlCreate, - Read: resourceStorageObjectAccessControlRead, - Update: resourceStorageObjectAccessControlUpdate, - Delete: resourceStorageObjectAccessControlDelete, - - Importer: &resource_storage_object_access_control_schema.ResourceImporter{ - State: resourceStorageObjectAccessControlImport, - }, - - Timeouts: &resource_storage_object_access_control_schema.ResourceTimeout{ - Create: resource_storage_object_access_control_schema.DefaultTimeout(4 * resource_storage_object_access_control_time.Minute), - Update: resource_storage_object_access_control_schema.DefaultTimeout(4 * resource_storage_object_access_control_time.Minute), - Delete: resource_storage_object_access_control_schema.DefaultTimeout(4 * resource_storage_object_access_control_time.Minute), - }, - - Schema: map[string]*resource_storage_object_access_control_schema.Schema{ - "bucket": { - Type: resource_storage_object_access_control_schema.TypeString, - Required: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of the bucket.`, - }, - "entity": { - Type: resource_storage_object_access_control_schema.TypeString, - Required: true, - Description: `The entity holding the permission, in one of the following forms: - * user-{{userId}} - * user-{{email}} (such as "user-liz@example.com") - * group-{{groupId}} - * group-{{email}} (such as "group-example@googlegroups.com") - * domain-{{domain}} (such as "domain-example.com") - * project-team-{{projectId}} - * allUsers - * allAuthenticatedUsers`, - }, - "object": { - Type: resource_storage_object_access_control_schema.TypeString, - Required: true, - Description: `The name of the object to apply the access control to.`, - }, - "role": { - Type: resource_storage_object_access_control_schema.TypeString, - Required: true, - ValidateFunc: resource_storage_object_access_control_validation.StringInSlice([]string{"OWNER", "READER"}, false), - Description: `The access permission for the entity. Possible values: ["OWNER", "READER"]`, - }, - "domain": { - Type: resource_storage_object_access_control_schema.TypeString, - Computed: true, - Description: `The domain associated with the entity.`, - }, - "email": { - Type: resource_storage_object_access_control_schema.TypeString, - Computed: true, - Description: `The email address associated with the entity.`, - }, - "entity_id": { - Type: resource_storage_object_access_control_schema.TypeString, - Computed: true, - Description: `The ID for the entity`, - }, - "generation": { - Type: resource_storage_object_access_control_schema.TypeInt, - Computed: true, - Description: `The content generation of the object, if applied to an object.`, - }, - "project_team": { - Type: resource_storage_object_access_control_schema.TypeList, - Computed: true, - Description: `The project team associated with the entity`, - Elem: &resource_storage_object_access_control_schema.Resource{ - Schema: map[string]*resource_storage_object_access_control_schema.Schema{ - "project_number": { - Type: resource_storage_object_access_control_schema.TypeString, - Optional: true, - Description: `The project team associated with the entity`, - }, - "team": { - Type: resource_storage_object_access_control_schema.TypeString, - Optional: true, - ValidateFunc: resource_storage_object_access_control_validation.StringInSlice([]string{"editors", "owners", "viewers", ""}, false), - Description: `The team. Possible values: ["editors", "owners", "viewers"]`, - }, - }, - }, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageObjectAccessControlCreate(d *resource_storage_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(bucketProp)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(entityProp)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(objectProp)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(roleProp)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl") - if err != nil { - return err - } - - resource_storage_object_access_control_log.Printf("[DEBUG] Creating new ObjectAccessControl: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_storage_object_access_control_schema.TimeoutCreate)) - if err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error creating ObjectAccessControl: %s", err) - } - - id, err := replaceVars(d, config, "{{bucket}}/{{object}}/{{entity}}") - if err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_storage_object_access_control_log.Printf("[DEBUG] Finished creating ObjectAccessControl %q: %#v", d.Id(), res) - - return resourceStorageObjectAccessControlRead(d, meta) -} - -func resourceStorageObjectAccessControlRead(d *resource_storage_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_storage_object_access_control_fmt.Sprintf("StorageObjectAccessControl %q", d.Id())) - } - - if err := d.Set("bucket", flattenStorageObjectAccessControlBucket(res["bucket"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("domain", flattenStorageObjectAccessControlDomain(res["domain"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("email", flattenStorageObjectAccessControlEmail(res["email"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("entity", flattenStorageObjectAccessControlEntity(res["entity"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("entity_id", flattenStorageObjectAccessControlEntityId(res["entityId"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("generation", flattenStorageObjectAccessControlGeneration(res["generation"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("object", flattenStorageObjectAccessControlObject(res["object"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("project_team", flattenStorageObjectAccessControlProjectTeam(res["projectTeam"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - if err := d.Set("role", flattenStorageObjectAccessControlRole(res["role"], d, config)); err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error reading ObjectAccessControl: %s", err) - } - - return nil -} - -func resourceStorageObjectAccessControlUpdate(d *resource_storage_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - bucketProp, err := expandStorageObjectAccessControlBucket(d.Get("bucket"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("bucket"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, bucketProp)) { - obj["bucket"] = bucketProp - } - entityProp, err := expandStorageObjectAccessControlEntity(d.Get("entity"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("entity"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, entityProp)) { - obj["entity"] = entityProp - } - objectProp, err := expandStorageObjectAccessControlObject(d.Get("object"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("object"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, objectProp)) { - obj["object"] = objectProp - } - roleProp, err := expandStorageObjectAccessControlRole(d.Get("role"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("role"); !isEmptyValue(resource_storage_object_access_control_reflect.ValueOf(v)) && (ok || !resource_storage_object_access_control_reflect.DeepEqual(v, roleProp)) { - obj["role"] = roleProp - } - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") - if err != nil { - return err - } - - resource_storage_object_access_control_log.Printf("[DEBUG] Updating ObjectAccessControl %q: %#v", d.Id(), obj) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PUT", billingProject, url, userAgent, obj, d.Timeout(resource_storage_object_access_control_schema.TimeoutUpdate)) - - if err != nil { - return resource_storage_object_access_control_fmt.Errorf("Error updating ObjectAccessControl %q: %s", d.Id(), err) - } else { - resource_storage_object_access_control_log.Printf("[DEBUG] Finished updating ObjectAccessControl %q: %#v", d.Id(), res) - } - - return resourceStorageObjectAccessControlRead(d, meta) -} - -func resourceStorageObjectAccessControlDelete(d *resource_storage_object_access_control_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{StorageBasePath}}b/{{bucket}}/o/{{%object}}/acl/{{entity}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_storage_object_access_control_log.Printf("[DEBUG] Deleting ObjectAccessControl %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_storage_object_access_control_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "ObjectAccessControl") - } - - resource_storage_object_access_control_log.Printf("[DEBUG] Finished deleting ObjectAccessControl %q: %#v", d.Id(), res) - return nil -} - -func resourceStorageObjectAccessControlImport(d *resource_storage_object_access_control_schema.ResourceData, meta interface{}) ([]*resource_storage_object_access_control_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "(?P[^/]+)/(?P.+)/(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "{{bucket}}/{{object}}/{{entity}}") - if err != nil { - return nil, resource_storage_object_access_control_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_storage_object_access_control_schema.ResourceData{d}, nil -} - -func flattenStorageObjectAccessControlBucket(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return ConvertSelfLinkToV1(v.(string)) -} - -func flattenStorageObjectAccessControlDomain(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlEmail(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlEntity(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlEntityId(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlGeneration(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_storage_object_access_control_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenStorageObjectAccessControlObject(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlProjectTeam(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["project_number"] = - flattenStorageObjectAccessControlProjectTeamProjectNumber(original["projectNumber"], d, config) - transformed["team"] = - flattenStorageObjectAccessControlProjectTeamTeam(original["team"], d, config) - return []interface{}{transformed} -} - -func flattenStorageObjectAccessControlProjectTeamProjectNumber(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlProjectTeamTeam(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenStorageObjectAccessControlRole(v interface{}, d *resource_storage_object_access_control_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandStorageObjectAccessControlBucket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageObjectAccessControlEntity(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageObjectAccessControlObject(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandStorageObjectAccessControlRole(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceStorageObjectAcl() *resource_storage_object_acl_schema.Resource { - return &resource_storage_object_acl_schema.Resource{ - Create: resourceStorageObjectAclCreate, - Read: resourceStorageObjectAclRead, - Update: resourceStorageObjectAclUpdate, - Delete: resourceStorageObjectAclDelete, - CustomizeDiff: resourceStorageObjectAclDiff, - - Schema: map[string]*resource_storage_object_acl_schema.Schema{ - "bucket": { - Type: resource_storage_object_acl_schema.TypeString, - Required: true, - ForceNew: true, - }, - - "object": { - Type: resource_storage_object_acl_schema.TypeString, - Required: true, - ForceNew: true, - }, - - "predefined_acl": { - Type: resource_storage_object_acl_schema.TypeString, - Optional: true, - ConflictsWith: []string{"role_entity"}, - ValidateFunc: resource_storage_object_acl_validation.StringInSlice([]string{"private", "bucketOwnerRead", "bucketOwnerFullControl", "projectPrivate", "authenticatedRead", "publicRead", ""}, false), - }, - - "role_entity": { - Type: resource_storage_object_acl_schema.TypeSet, - Optional: true, - Computed: true, - Elem: &resource_storage_object_acl_schema.Schema{ - Type: resource_storage_object_acl_schema.TypeString, - ValidateFunc: validateRoleEntityPair, - }, - ConflictsWith: []string{"predefined_acl"}, - }, - }, - UseJSONNumber: true, - } -} - -func resourceStorageObjectAclDiff(_ resource_storage_object_acl_context.Context, diff *resource_storage_object_acl_schema.ResourceDiff, meta interface{}) error { - config := meta.(*Config) - bucket, ok := diff.GetOk("bucket") - if !ok { - - return nil - } - object, ok := diff.GetOk("object") - if !ok { - - return nil - } - - sObject, err := config.NewStorageClient(config.userAgent).Objects.Get(bucket.(string), object.(string)).Projection("full").Do() - if err != nil { - - return nil - } - - objectOwner := sObject.Owner.Entity - ownerRole := resource_storage_object_acl_fmt.Sprintf("%s:%s", "OWNER", objectOwner) - oldRoleEntity, newRoleEntity := diff.GetChange("role_entity") - - for _, v := range newRoleEntity.(*resource_storage_object_acl_schema.Set).List() { - res := getValidatedRoleEntityPair(v.(string)) - - if res.Entity == objectOwner && res.Role != "OWNER" { - return resource_storage_object_acl_fmt.Errorf("New state tried setting object owner entity (%s) to non-'OWNER' role", objectOwner) - } - } - - if oldRoleEntity.(*resource_storage_object_acl_schema.Set).Contains(ownerRole) && - !newRoleEntity.(*resource_storage_object_acl_schema.Set).Contains(ownerRole) { - newRoleEntity.(*resource_storage_object_acl_schema.Set).Add(ownerRole) - return diff.SetNew("role_entity", newRoleEntity) - } - - return nil -} - -func getObjectAclId(object string) string { - return object + "-acl" -} - -func resourceStorageObjectAclCreate(d *resource_storage_object_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - if predefinedAcl, ok := d.GetOk("predefined_acl"); ok { - res, err := config.NewStorageClient(userAgent).Objects.Get(bucket, object).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s: %v", object, bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Objects.Update(bucket, object, res).PredefinedAcl(predefinedAcl.(string)).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error updating object %s in %s: %v", object, bucket, err) - } - - return resourceStorageObjectAclRead(d, meta) - } else if reMap := d.Get("role_entity").(*resource_storage_object_acl_schema.Set); reMap.Len() > 0 { - sObject, err := config.NewStorageClient(userAgent).Objects.Get(bucket, object).Projection("full").Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s: %v", object, bucket, err) - } - - objectOwner := sObject.Owner.Entity - roleEntitiesUpstream, err := getRoleEntitiesAsStringsFromApi(config, bucket, object, userAgent) - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s: %v", object, bucket, err) - } - - create, update, remove, err := getRoleEntityChange(roleEntitiesUpstream, convertStringSet(reMap), objectOwner) - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s. Invalid schema: %v", object, bucket, err) - } - - err = performStorageObjectRoleEntityOperations(create, update, remove, config, bucket, object, userAgent) - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error creating object %s in %s: %v", object, bucket, err) - } - - return resourceStorageObjectAclRead(d, meta) - } - - return resource_storage_object_acl_fmt.Errorf("Error, you must specify either \"predefined_acl\" or \"role_entity\"") -} - -func resourceStorageObjectAclRead(d *resource_storage_object_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - roleEntities, err := getRoleEntitiesAsStringsFromApi(config, bucket, object, userAgent) - if err != nil { - return handleNotFoundError(err, d, resource_storage_object_acl_fmt.Sprintf("Storage Object ACL for Bucket %q", d.Get("bucket").(string))) - } - - err = d.Set("role_entity", roleEntities) - if err != nil { - return err - } - - d.SetId(getObjectAclId(object)) - return nil -} - -func resourceStorageObjectAclUpdate(d *resource_storage_object_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - if _, ok := d.GetOk("predefined_acl"); d.HasChange("predefined_acl") && ok { - res, err := config.NewStorageClient(userAgent).Objects.Get(bucket, object).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s: %v", object, bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Objects.Update(bucket, object, res).PredefinedAcl(d.Get("predefined_acl").(string)).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error updating object %s in %s: %v", object, bucket, err) - } - - return resourceStorageObjectAclRead(d, meta) - } else { - sObject, err := config.NewStorageClient(userAgent).Objects.Get(bucket, object).Projection("full").Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s: %v", object, bucket, err) - } - - objectOwner := sObject.Owner.Entity - - o, n := d.GetChange("role_entity") - create, update, remove, err := getRoleEntityChange( - convertStringSet(o.(*resource_storage_object_acl_schema.Set)), - convertStringSet(n.(*resource_storage_object_acl_schema.Set)), - objectOwner) - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s. Invalid schema: %v", object, bucket, err) - } - - err = performStorageObjectRoleEntityOperations(create, update, remove, config, bucket, object, userAgent) - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error updating object %s in %s: %v", object, bucket, err) - } - - return resourceStorageObjectAclRead(d, meta) - } -} - -func resourceStorageObjectAclDelete(d *resource_storage_object_acl_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - bucket := d.Get("bucket").(string) - object := d.Get("object").(string) - - lockName, err := replaceVars(d, config, "storage/buckets/{{bucket}}/objects/{{object}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - res, err := config.NewStorageClient(userAgent).Objects.Get(bucket, object).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error reading object %s in %s: %v", object, bucket, err) - } - - _, err = config.NewStorageClient(userAgent).Objects.Update(bucket, object, res).PredefinedAcl("private").Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error updating object %s in %s: %v", object, bucket, err) - } - - return nil -} - -func getRoleEntitiesAsStringsFromApi(config *Config, bucket, object, userAgent string) ([]string, error) { - var roleEntities []string - res, err := config.NewStorageClient(userAgent).ObjectAccessControls.List(bucket, object).Do() - if err != nil { - return nil, err - } - - for _, roleEntity := range res.Items { - role := roleEntity.Role - entity := roleEntity.Entity - roleEntities = append(roleEntities, resource_storage_object_acl_fmt.Sprintf("%s:%s", role, entity)) - } - - return roleEntities, nil -} - -func getRoleEntityChange(old []string, new []string, owner string) (create, update, remove []*RoleEntity, err error) { - newEntitiesUsed := make(map[string]struct{}) - for _, v := range new { - res := getValidatedRoleEntityPair(v) - if _, ok := newEntitiesUsed[res.Entity]; ok { - return nil, nil, nil, resource_storage_object_acl_fmt.Errorf("New state has duplicate Entity: %v", res.Entity) - } - - newEntitiesUsed[res.Entity] = struct{}{} - } - - oldEntitiesUsed := make(map[string]string) - for _, v := range old { - res := getValidatedRoleEntityPair(v) - - if res.Entity == owner { - continue - } - - oldEntitiesUsed[res.Entity] = res.Role - } - - for _, re := range new { - res := getValidatedRoleEntityPair(re) - - if res.Entity == owner { - continue - } - - if v, ok := oldEntitiesUsed[res.Entity]; ok { - if res.Role != v { - update = append(update, res) - } - - delete(oldEntitiesUsed, res.Entity) - } else { - create = append(create, res) - } - } - - for _, v := range old { - res := getValidatedRoleEntityPair(v) - - if _, ok := oldEntitiesUsed[res.Entity]; ok { - remove = append(remove, res) - } - } - - return create, update, remove, nil -} - -func performStorageObjectRoleEntityOperations(create []*RoleEntity, update []*RoleEntity, remove []*RoleEntity, config *Config, bucket, object, userAgent string) error { - for _, v := range create { - objectAccessControl := &resource_storage_object_acl_storage.ObjectAccessControl{ - Role: v.Role, - Entity: v.Entity, - } - _, err := config.NewStorageClient(userAgent).ObjectAccessControls.Insert(bucket, object, objectAccessControl).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error inserting ACL item %s for object %s in %s: %v", v.Entity, object, bucket, err) - } - } - - for _, v := range update { - objectAccessControl := &resource_storage_object_acl_storage.ObjectAccessControl{ - Role: v.Role, - Entity: v.Entity, - } - _, err := config.NewStorageClient(userAgent).ObjectAccessControls.Update(bucket, object, v.Entity, objectAccessControl).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error updating ACL item %s for object %s in %s: %v", v.Entity, object, bucket, err) - } - } - - for _, v := range remove { - err := config.NewStorageClient(userAgent).ObjectAccessControls.Delete(bucket, object, v.Entity).Do() - if err != nil { - return resource_storage_object_acl_fmt.Errorf("Error deleting ACL item %s for object %s in %s: %v", v.Entity, object, bucket, err) - } - } - - return nil -} - -func validateRoleEntityPair(v interface{}, k string) (ws []string, errors []error) { - split := resource_storage_object_acl_strings.Split(v.(string), ":") - if len(split) != 2 { - errors = append(errors, resource_storage_object_acl_fmt.Errorf("Role entity pairs must be formatted as 'ROLE:entity': %s", v)) - } - - return -} - -func getValidatedRoleEntityPair(roleEntity string) *RoleEntity { - split := resource_storage_object_acl_strings.Split(roleEntity, ":") - return &RoleEntity{Role: split[0], Entity: split[1]} -} - -var ( - objectConditionsKeys = []string{ - "transfer_spec.0.object_conditions.0.min_time_elapsed_since_last_modification", - "transfer_spec.0.object_conditions.0.max_time_elapsed_since_last_modification", - "transfer_spec.0.object_conditions.0.include_prefixes", - "transfer_spec.0.object_conditions.0.exclude_prefixes", - } - - transferOptionsKeys = []string{ - "transfer_spec.0.transfer_options.0.overwrite_objects_already_existing_in_sink", - "transfer_spec.0.transfer_options.0.delete_objects_unique_in_sink", - "transfer_spec.0.transfer_options.0.delete_objects_from_source_after_transfer", - } - - transferSpecDataSourceKeys = []string{ - "transfer_spec.0.gcs_data_source", - "transfer_spec.0.aws_s3_data_source", - "transfer_spec.0.http_data_source", - "transfer_spec.0.azure_blob_storage_data_source", - } -) - -func resourceStorageTransferJob() *resource_storage_transfer_job_schema.Resource { - return &resource_storage_transfer_job_schema.Resource{ - Create: resourceStorageTransferJobCreate, - Read: resourceStorageTransferJobRead, - Update: resourceStorageTransferJobUpdate, - Delete: resourceStorageTransferJobDelete, - Importer: &resource_storage_transfer_job_schema.ResourceImporter{ - State: resourceStorageTransferJobStateImporter, - }, - - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "name": { - Type: resource_storage_transfer_job_schema.TypeString, - Computed: true, - Description: `The name of the Transfer Job.`, - }, - "description": { - Type: resource_storage_transfer_job_schema.TypeString, - Required: true, - ValidateFunc: resource_storage_transfer_job_validation.StringLenBetween(0, 1024), - Description: `Unique description to identify the Transfer Job.`, - }, - "project": { - Type: resource_storage_transfer_job_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The project in which the resource belongs. If it is not provided, the provider project is used.`, - }, - "transfer_spec": { - Type: resource_storage_transfer_job_schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "object_conditions": objectConditionsSchema(), - "transfer_options": transferOptionsSchema(), - "gcs_data_sink": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: gcsDataSchema(), - Description: `A Google Cloud Storage data sink.`, - }, - "gcs_data_source": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: gcsDataSchema(), - ExactlyOneOf: transferSpecDataSourceKeys, - Description: `A Google Cloud Storage data source.`, - }, - "aws_s3_data_source": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: awsS3DataSchema(), - ExactlyOneOf: transferSpecDataSourceKeys, - Description: `An AWS S3 data source.`, - }, - "http_data_source": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: httpDataSchema(), - ExactlyOneOf: transferSpecDataSourceKeys, - Description: `A HTTP URL data source.`, - }, - "azure_blob_storage_data_source": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: azureBlobStorageDataSchema(), - ExactlyOneOf: transferSpecDataSourceKeys, - Description: `An Azure Blob Storage data source.`, - }, - }, - }, - Description: `Transfer specification.`, - }, - "schedule": { - Type: resource_storage_transfer_job_schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "schedule_start_date": { - Type: resource_storage_transfer_job_schema.TypeList, - Required: true, - ForceNew: true, - MaxItems: 1, - Elem: dateObjectSchema(), - Description: `The first day the recurring transfer is scheduled to run. If schedule_start_date is in the past, the transfer will run for the first time on the following day.`, - }, - "schedule_end_date": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: dateObjectSchema(), - Description: `The last day the recurring transfer will be run. If schedule_end_date is the same as schedule_start_date, the transfer will be executed only once.`, - }, - "start_time_of_day": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: timeObjectSchema(), - DiffSuppressFunc: diffSuppressEmptyStartTimeOfDay, - Description: `The time in UTC at which the transfer will be scheduled to start in a day. Transfers may start later than this time. If not specified, recurring and one-time transfers that are scheduled to run today will run immediately; recurring transfers that are scheduled to run on a future date will start at approximately midnight UTC on that date. Note that when configuring a transfer with the Cloud Platform Console, the transfer's start time in a day is specified in your local timezone.`, - }, - }, - }, - Description: `Schedule specification defining when the Transfer Job should be scheduled to start, end and what time to run.`, - }, - "status": { - Type: resource_storage_transfer_job_schema.TypeString, - Optional: true, - Default: "ENABLED", - ValidateFunc: resource_storage_transfer_job_validation.StringInSlice([]string{"ENABLED", "DISABLED", "DELETED"}, false), - Description: `Status of the job. Default: ENABLED. NOTE: The effect of the new job status takes place during a subsequent job run. For example, if you change the job status from ENABLED to DISABLED, and an operation spawned by the transfer is running, the status change would not affect the current operation.`, - }, - "creation_time": { - Type: resource_storage_transfer_job_schema.TypeString, - Computed: true, - Description: `When the Transfer Job was created.`, - }, - "last_modification_time": { - Type: resource_storage_transfer_job_schema.TypeString, - Computed: true, - Description: `When the Transfer Job was last modified.`, - }, - "deletion_time": { - Type: resource_storage_transfer_job_schema.TypeString, - Computed: true, - Description: `When the Transfer Job was deleted.`, - }, - }, - UseJSONNumber: true, - } -} - -func objectConditionsSchema() *resource_storage_transfer_job_schema.Schema { - return &resource_storage_transfer_job_schema.Schema{ - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "min_time_elapsed_since_last_modification": { - Type: resource_storage_transfer_job_schema.TypeString, - ValidateFunc: validateDuration(), - Optional: true, - AtLeastOneOf: objectConditionsKeys, - Description: `A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "max_time_elapsed_since_last_modification": { - Type: resource_storage_transfer_job_schema.TypeString, - ValidateFunc: validateDuration(), - Optional: true, - AtLeastOneOf: objectConditionsKeys, - Description: `A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, - }, - "include_prefixes": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - AtLeastOneOf: objectConditionsKeys, - Elem: &resource_storage_transfer_job_schema.Schema{ - MaxItems: 1000, - Type: resource_storage_transfer_job_schema.TypeString, - }, - Description: `If include_refixes is specified, objects that satisfy the object conditions must have names that start with one of the include_prefixes and that do not start with any of the exclude_prefixes. If include_prefixes is not specified, all objects except those that have names starting with one of the exclude_prefixes must satisfy the object conditions.`, - }, - "exclude_prefixes": { - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - AtLeastOneOf: objectConditionsKeys, - Elem: &resource_storage_transfer_job_schema.Schema{ - MaxItems: 1000, - Type: resource_storage_transfer_job_schema.TypeString, - }, - Description: `exclude_prefixes must follow the requirements described for include_prefixes.`, - }, - }, - }, - Description: `Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' last_modification_time do not exclude objects in a data sink.`, - } -} - -func transferOptionsSchema() *resource_storage_transfer_job_schema.Schema { - return &resource_storage_transfer_job_schema.Schema{ - Type: resource_storage_transfer_job_schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "overwrite_objects_already_existing_in_sink": { - Type: resource_storage_transfer_job_schema.TypeBool, - Optional: true, - AtLeastOneOf: transferOptionsKeys, - Description: `Whether overwriting objects that already exist in the sink is allowed.`, - }, - "delete_objects_unique_in_sink": { - Type: resource_storage_transfer_job_schema.TypeBool, - Optional: true, - AtLeastOneOf: transferOptionsKeys, - ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_from_source_after_transfer"}, - Description: `Whether objects that exist only in the sink should be deleted. Note that this option and delete_objects_from_source_after_transfer are mutually exclusive.`, - }, - "delete_objects_from_source_after_transfer": { - Type: resource_storage_transfer_job_schema.TypeBool, - Optional: true, - AtLeastOneOf: transferOptionsKeys, - ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_unique_in_sink"}, - Description: `Whether objects should be deleted from the source after they are transferred to the sink. Note that this option and delete_objects_unique_in_sink are mutually exclusive.`, - }, - }, - }, - Description: `Characteristics of how to treat files from datasource and sink during job. If the option delete_objects_unique_in_sink is true, object conditions based on objects' last_modification_time are ignored and do not exclude objects in a data source or a data sink.`, - } -} - -func timeObjectSchema() *resource_storage_transfer_job_schema.Resource { - return &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "hours": { - Type: resource_storage_transfer_job_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_transfer_job_validation.IntBetween(0, 24), - Description: `Hours of day in 24 hour format. Should be from 0 to 23.`, - }, - "minutes": { - Type: resource_storage_transfer_job_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_transfer_job_validation.IntBetween(0, 59), - Description: `Minutes of hour of day. Must be from 0 to 59.`, - }, - "seconds": { - Type: resource_storage_transfer_job_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_transfer_job_validation.IntBetween(0, 60), - Description: `Seconds of minutes of the time. Must normally be from 0 to 59.`, - }, - "nanos": { - Type: resource_storage_transfer_job_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_transfer_job_validation.IntBetween(0, 999999999), - Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, - }, - }, - } -} - -func dateObjectSchema() *resource_storage_transfer_job_schema.Resource { - return &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "year": { - Type: resource_storage_transfer_job_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_transfer_job_validation.IntBetween(0, 9999), - Description: `Year of date. Must be from 1 to 9999.`, - }, - - "month": { - Type: resource_storage_transfer_job_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_transfer_job_validation.IntBetween(1, 12), - Description: `Month of year. Must be from 1 to 12.`, - }, - - "day": { - Type: resource_storage_transfer_job_schema.TypeInt, - Required: true, - ForceNew: true, - ValidateFunc: resource_storage_transfer_job_validation.IntBetween(0, 31), - Description: `Day of month. Must be from 1 to 31 and valid for the year and month.`, - }, - }, - } -} - -func gcsDataSchema() *resource_storage_transfer_job_schema.Resource { - return &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "bucket_name": { - Required: true, - Type: resource_storage_transfer_job_schema.TypeString, - Description: `Google Cloud Storage bucket name.`, - }, - "path": { - Optional: true, - Computed: true, - Type: resource_storage_transfer_job_schema.TypeString, - Description: `Google Cloud Storage path in bucket to transfer`, - }, - }, - } -} - -func awsS3DataSchema() *resource_storage_transfer_job_schema.Resource { - return &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "bucket_name": { - Required: true, - Type: resource_storage_transfer_job_schema.TypeString, - Description: `S3 Bucket name.`, - }, - "aws_access_key": { - Type: resource_storage_transfer_job_schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "access_key_id": { - Type: resource_storage_transfer_job_schema.TypeString, - Required: true, - Sensitive: true, - Description: `AWS Key ID.`, - }, - "secret_access_key": { - Type: resource_storage_transfer_job_schema.TypeString, - Required: true, - Sensitive: true, - Description: `AWS Secret Access Key.`, - }, - }, - }, - Description: `AWS credentials block.`, - }, - }, - } -} - -func httpDataSchema() *resource_storage_transfer_job_schema.Resource { - return &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "list_url": { - Type: resource_storage_transfer_job_schema.TypeString, - Required: true, - Description: `The URL that points to the file that stores the object list entries. This file must allow public access. Currently, only URLs with HTTP and HTTPS schemes are supported.`, - }, - }, - } -} - -func azureBlobStorageDataSchema() *resource_storage_transfer_job_schema.Resource { - return &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "storage_account": { - Required: true, - Type: resource_storage_transfer_job_schema.TypeString, - Description: `The name of the Azure Storage account.`, - }, - "container": { - Required: true, - Type: resource_storage_transfer_job_schema.TypeString, - Description: `The container to transfer from the Azure Storage account.`, - }, - "path": { - Optional: true, - Computed: true, - Type: resource_storage_transfer_job_schema.TypeString, - Description: `Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.`, - }, - "azure_credentials": { - Type: resource_storage_transfer_job_schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &resource_storage_transfer_job_schema.Resource{ - Schema: map[string]*resource_storage_transfer_job_schema.Schema{ - "sas_token": { - Type: resource_storage_transfer_job_schema.TypeString, - Required: true, - Sensitive: true, - Description: `Azure shared access signature.`, - }, - }, - }, - Description: ` Credentials used to authenticate API requests to Azure.`, - }, - }, - } -} - -func diffSuppressEmptyStartTimeOfDay(k, old, new string, d *resource_storage_transfer_job_schema.ResourceData) bool { - return k == "schedule.0.start_time_of_day.#" && old == "1" && new == "0" -} - -func resourceStorageTransferJobCreate(d *resource_storage_transfer_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - transferJob := &resource_storage_transfer_job_storagetransfer.TransferJob{ - Description: d.Get("description").(string), - ProjectId: project, - Status: d.Get("status").(string), - Schedule: expandTransferSchedules(d.Get("schedule").([]interface{})), - TransferSpec: expandTransferSpecs(d.Get("transfer_spec").([]interface{})), - } - - var res *resource_storage_transfer_job_storagetransfer.TransferJob - - err = retry(func() error { - res, err = config.NewStorageTransferClient(userAgent).TransferJobs.Create(transferJob).Do() - return err - }) - - if err != nil { - resource_storage_transfer_job_fmt.Printf("Error creating transfer job %v: %v", transferJob, err) - return err - } - - if err := d.Set("name", res.Name); err != nil { - return resource_storage_transfer_job_fmt.Errorf("Error setting name: %s", err) - } - - name := GetResourceNameFromSelfLink(res.Name) - d.SetId(resource_storage_transfer_job_fmt.Sprintf("%s/%s", project, name)) - - return resourceStorageTransferJobRead(d, meta) -} - -func resourceStorageTransferJobRead(d *resource_storage_transfer_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - name := d.Get("name").(string) - res, err := config.NewStorageTransferClient(userAgent).TransferJobs.Get(name, project).Do() - if err != nil { - return handleNotFoundError(err, d, resource_storage_transfer_job_fmt.Sprintf("Transfer Job %q", name)) - } - resource_storage_transfer_job_log.Printf("[DEBUG] Read transfer job: %v in project: %v \n\n", res.Name, res.ProjectId) - - if err := d.Set("project", res.ProjectId); err != nil { - return resource_storage_transfer_job_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("description", res.Description); err != nil { - return resource_storage_transfer_job_fmt.Errorf("Error setting description: %s", err) - } - if err := d.Set("status", res.Status); err != nil { - return resource_storage_transfer_job_fmt.Errorf("Error setting status: %s", err) - } - if err := d.Set("last_modification_time", res.LastModificationTime); err != nil { - return resource_storage_transfer_job_fmt.Errorf("Error setting last_modification_time: %s", err) - } - if err := d.Set("creation_time", res.CreationTime); err != nil { - return resource_storage_transfer_job_fmt.Errorf("Error setting creation_time: %s", err) - } - if err := d.Set("deletion_time", res.DeletionTime); err != nil { - return resource_storage_transfer_job_fmt.Errorf("Error setting deletion_time: %s", err) - } - - err = d.Set("schedule", flattenTransferSchedule(res.Schedule)) - if err != nil { - return err - } - - err = d.Set("transfer_spec", flattenTransferSpec(res.TransferSpec, d)) - if err != nil { - return err - } - - return nil -} - -func resourceStorageTransferJobUpdate(d *resource_storage_transfer_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - transferJob := &resource_storage_transfer_job_storagetransfer.TransferJob{} - fieldMask := []string{} - - if d.HasChange("description") { - if v, ok := d.GetOk("description"); ok { - fieldMask = append(fieldMask, "description") - transferJob.Description = v.(string) - } - } - - if d.HasChange("status") { - if v, ok := d.GetOk("status"); ok { - fieldMask = append(fieldMask, "status") - transferJob.Status = v.(string) - } - } - - if d.HasChange("schedule") { - if v, ok := d.GetOk("schedule"); ok { - fieldMask = append(fieldMask, "schedule") - transferJob.Schedule = expandTransferSchedules(v.([]interface{})) - } - } - - if d.HasChange("transfer_spec") { - if v, ok := d.GetOk("transfer_spec"); ok { - fieldMask = append(fieldMask, "transfer_spec") - transferJob.TransferSpec = expandTransferSpecs(v.([]interface{})) - } - } - - updateRequest := &resource_storage_transfer_job_storagetransfer.UpdateTransferJobRequest{ - ProjectId: project, - TransferJob: transferJob, - } - - updateRequest.UpdateTransferJobFieldMask = resource_storage_transfer_job_strings.Join(fieldMask, ",") - - res, err := config.NewStorageTransferClient(userAgent).TransferJobs.Patch(d.Get("name").(string), updateRequest).Do() - if err != nil { - return err - } - - resource_storage_transfer_job_log.Printf("[DEBUG] Patched transfer job: %v\n\n", res.Name) - return nil -} - -func resourceStorageTransferJobDelete(d *resource_storage_transfer_job_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - transferJobName := d.Get("name").(string) - - transferJob := &resource_storage_transfer_job_storagetransfer.TransferJob{ - Status: "DELETED", - } - - fieldMask := "status" - - updateRequest := &resource_storage_transfer_job_storagetransfer.UpdateTransferJobRequest{ - ProjectId: project, - TransferJob: transferJob, - } - - updateRequest.UpdateTransferJobFieldMask = fieldMask - - resource_storage_transfer_job_log.Printf("[DEBUG] Setting status to DELETE for: %v\n\n", transferJobName) - err = resource_storage_transfer_job_resource.Retry(1*resource_storage_transfer_job_time.Minute, func() *resource_storage_transfer_job_resource.RetryError { - _, err := config.NewStorageTransferClient(userAgent).TransferJobs.Patch(transferJobName, updateRequest).Do() - if err != nil { - return resource_storage_transfer_job_resource.RetryableError(err) - } - - return nil - }) - - if err != nil { - resource_storage_transfer_job_fmt.Printf("Error deleting transfer job %v: %v\n\n", transferJob, err) - return err - } - - resource_storage_transfer_job_log.Printf("[DEBUG] Deleted transfer job %v\n\n", transferJob) - - return nil -} - -func resourceStorageTransferJobStateImporter(d *resource_storage_transfer_job_schema.ResourceData, meta interface{}) ([]*resource_storage_transfer_job_schema.ResourceData, error) { - parts := resource_storage_transfer_job_strings.Split(d.Id(), "/") - switch len(parts) { - case 2: - if err := d.Set("project", parts[0]); err != nil { - return nil, resource_storage_transfer_job_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("name", resource_storage_transfer_job_fmt.Sprintf("transferJobs/%s", parts[1])); err != nil { - return nil, resource_storage_transfer_job_fmt.Errorf("Error setting name: %s", err) - } - default: - return nil, resource_storage_transfer_job_fmt.Errorf("Invalid transfer job specifier. Expecting {projectId}/{transferJobName}") - } - return []*resource_storage_transfer_job_schema.ResourceData{d}, nil -} - -func expandDates(dates []interface{}) *resource_storage_transfer_job_storagetransfer.Date { - if len(dates) == 0 || dates[0] == nil { - return nil - } - - dateMap := dates[0].(map[string]interface{}) - date := &resource_storage_transfer_job_storagetransfer.Date{} - if v, ok := dateMap["day"]; ok { - date.Day = int64(v.(int)) - } - - if v, ok := dateMap["month"]; ok { - date.Month = int64(v.(int)) - } - - if v, ok := dateMap["year"]; ok { - date.Year = int64(v.(int)) - } - - resource_storage_transfer_job_log.Printf("[DEBUG] not nil date: %#v", dates) - return date -} - -func flattenDate(date *resource_storage_transfer_job_storagetransfer.Date) []map[string]interface{} { - data := map[string]interface{}{ - "year": date.Year, - "month": date.Month, - "day": date.Day, - } - - return []map[string]interface{}{data} -} - -func expandTimeOfDays(times []interface{}) *resource_storage_transfer_job_storagetransfer.TimeOfDay { - if len(times) == 0 || times[0] == nil { - return nil - } - - timeMap := times[0].(map[string]interface{}) - resource_storage_transfer_job_time := &resource_storage_transfer_job_storagetransfer.TimeOfDay{} - if v, ok := timeMap["hours"]; ok { - resource_storage_transfer_job_time.Hours = int64(v.(int)) - } - - if v, ok := timeMap["minutes"]; ok { - resource_storage_transfer_job_time.Minutes = int64(v.(int)) - } - - if v, ok := timeMap["seconds"]; ok { - resource_storage_transfer_job_time.Seconds = int64(v.(int)) - } - - if v, ok := timeMap["nanos"]; ok { - resource_storage_transfer_job_time.Nanos = int64(v.(int)) - } - - return resource_storage_transfer_job_time -} - -func flattenTimeOfDay(timeOfDay *resource_storage_transfer_job_storagetransfer.TimeOfDay) []map[string]interface{} { - data := map[string]interface{}{ - "hours": timeOfDay.Hours, - "minutes": timeOfDay.Minutes, - "seconds": timeOfDay.Seconds, - "nanos": timeOfDay.Nanos, - } - - return []map[string]interface{}{data} -} - -func expandTransferSchedules(transferSchedules []interface{}) *resource_storage_transfer_job_storagetransfer.Schedule { - if len(transferSchedules) == 0 || transferSchedules[0] == nil { - return nil - } - - schedule := transferSchedules[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.Schedule{ - ScheduleStartDate: expandDates(schedule["schedule_start_date"].([]interface{})), - ScheduleEndDate: expandDates(schedule["schedule_end_date"].([]interface{})), - StartTimeOfDay: expandTimeOfDays(schedule["start_time_of_day"].([]interface{})), - } -} - -func flattenTransferSchedule(transferSchedule *resource_storage_transfer_job_storagetransfer.Schedule) []map[string][]map[string]interface{} { - data := map[string][]map[string]interface{}{ - "schedule_start_date": flattenDate(transferSchedule.ScheduleStartDate), - } - - if transferSchedule.ScheduleEndDate != nil { - data["schedule_end_date"] = flattenDate(transferSchedule.ScheduleEndDate) - } - - if transferSchedule.StartTimeOfDay != nil { - data["start_time_of_day"] = flattenTimeOfDay(transferSchedule.StartTimeOfDay) - } - - return []map[string][]map[string]interface{}{data} -} - -func expandGcsData(gcsDatas []interface{}) *resource_storage_transfer_job_storagetransfer.GcsData { - if len(gcsDatas) == 0 || gcsDatas[0] == nil { - return nil - } - - gcsData := gcsDatas[0].(map[string]interface{}) - var apiData = &resource_storage_transfer_job_storagetransfer.GcsData{ - BucketName: gcsData["bucket_name"].(string), - } - var path = gcsData["path"].(string) - apiData.Path = path - - return apiData -} - -func flattenGcsData(gcsData *resource_storage_transfer_job_storagetransfer.GcsData) []map[string]interface{} { - data := map[string]interface{}{ - "bucket_name": gcsData.BucketName, - "path": gcsData.Path, - } - return []map[string]interface{}{data} -} - -func expandAwsAccessKeys(awsAccessKeys []interface{}) *resource_storage_transfer_job_storagetransfer.AwsAccessKey { - if len(awsAccessKeys) == 0 || awsAccessKeys[0] == nil { - return nil - } - - awsAccessKey := awsAccessKeys[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.AwsAccessKey{ - AccessKeyId: awsAccessKey["access_key_id"].(string), - SecretAccessKey: awsAccessKey["secret_access_key"].(string), - } -} - -func flattenAwsAccessKeys(d *resource_storage_transfer_job_schema.ResourceData) []map[string]interface{} { - data := map[string]interface{}{ - "access_key_id": d.Get("transfer_spec.0.aws_s3_data_source.0.aws_access_key.0.access_key_id"), - "secret_access_key": d.Get("transfer_spec.0.aws_s3_data_source.0.aws_access_key.0.secret_access_key"), - } - - return []map[string]interface{}{data} -} - -func expandAwsS3Data(awsS3Datas []interface{}) *resource_storage_transfer_job_storagetransfer.AwsS3Data { - if len(awsS3Datas) == 0 || awsS3Datas[0] == nil { - return nil - } - - awsS3Data := awsS3Datas[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.AwsS3Data{ - BucketName: awsS3Data["bucket_name"].(string), - AwsAccessKey: expandAwsAccessKeys(awsS3Data["aws_access_key"].([]interface{})), - } -} - -func flattenAwsS3Data(awsS3Data *resource_storage_transfer_job_storagetransfer.AwsS3Data, d *resource_storage_transfer_job_schema.ResourceData) []map[string]interface{} { - data := map[string]interface{}{ - "bucket_name": awsS3Data.BucketName, - "aws_access_key": flattenAwsAccessKeys(d), - } - - return []map[string]interface{}{data} -} - -func expandHttpData(httpDatas []interface{}) *resource_storage_transfer_job_storagetransfer.HttpData { - if len(httpDatas) == 0 || httpDatas[0] == nil { - return nil - } - - httpData := httpDatas[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.HttpData{ - ListUrl: httpData["list_url"].(string), - } -} - -func flattenHttpData(httpData *resource_storage_transfer_job_storagetransfer.HttpData) []map[string]interface{} { - data := map[string]interface{}{ - "list_url": httpData.ListUrl, - } - - return []map[string]interface{}{data} -} - -func expandAzureCredentials(azureCredentials []interface{}) *resource_storage_transfer_job_storagetransfer.AzureCredentials { - if len(azureCredentials) == 0 || azureCredentials[0] == nil { - return nil - } - - azureCredential := azureCredentials[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.AzureCredentials{ - SasToken: azureCredential["sas_token"].(string), - } -} - -func flattenAzureCredentials(d *resource_storage_transfer_job_schema.ResourceData) []map[string]interface{} { - data := map[string]interface{}{ - "sas_token": d.Get("transfer_spec.0.azure_blob_storage_data_source.0.azure_credentials.0.sas_token"), - } - - return []map[string]interface{}{data} -} - -func expandAzureBlobStorageData(azureBlobStorageDatas []interface{}) *resource_storage_transfer_job_storagetransfer.AzureBlobStorageData { - if len(azureBlobStorageDatas) == 0 || azureBlobStorageDatas[0] == nil { - return nil - } - - azureBlobStorageData := azureBlobStorageDatas[0].(map[string]interface{}) - - return &resource_storage_transfer_job_storagetransfer.AzureBlobStorageData{ - Container: azureBlobStorageData["container"].(string), - Path: azureBlobStorageData["path"].(string), - StorageAccount: azureBlobStorageData["storage_account"].(string), - AzureCredentials: expandAzureCredentials(azureBlobStorageData["azure_credentials"].([]interface{})), - } -} - -func flattenAzureBlobStorageData(azureBlobStorageData *resource_storage_transfer_job_storagetransfer.AzureBlobStorageData, d *resource_storage_transfer_job_schema.ResourceData) []map[string]interface{} { - data := map[string]interface{}{ - "container": azureBlobStorageData.Container, - "path": azureBlobStorageData.Path, - "storage_account": azureBlobStorageData.StorageAccount, - "azure_credentials": flattenAzureCredentials(d), - } - - return []map[string]interface{}{data} -} - -func expandObjectConditions(conditions []interface{}) *resource_storage_transfer_job_storagetransfer.ObjectConditions { - if len(conditions) == 0 || conditions[0] == nil { - return nil - } - - condition := conditions[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.ObjectConditions{ - ExcludePrefixes: convertStringArr(condition["exclude_prefixes"].([]interface{})), - IncludePrefixes: convertStringArr(condition["include_prefixes"].([]interface{})), - MaxTimeElapsedSinceLastModification: condition["max_time_elapsed_since_last_modification"].(string), - MinTimeElapsedSinceLastModification: condition["min_time_elapsed_since_last_modification"].(string), - } -} - -func flattenObjectCondition(condition *resource_storage_transfer_job_storagetransfer.ObjectConditions) []map[string]interface{} { - data := map[string]interface{}{ - "exclude_prefixes": condition.ExcludePrefixes, - "include_prefixes": condition.IncludePrefixes, - "max_time_elapsed_since_last_modification": condition.MaxTimeElapsedSinceLastModification, - "min_time_elapsed_since_last_modification": condition.MinTimeElapsedSinceLastModification, - } - return []map[string]interface{}{data} -} - -func expandTransferOptions(options []interface{}) *resource_storage_transfer_job_storagetransfer.TransferOptions { - if len(options) == 0 || options[0] == nil { - return nil - } - - option := options[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.TransferOptions{ - DeleteObjectsFromSourceAfterTransfer: option["delete_objects_from_source_after_transfer"].(bool), - DeleteObjectsUniqueInSink: option["delete_objects_unique_in_sink"].(bool), - OverwriteObjectsAlreadyExistingInSink: option["overwrite_objects_already_existing_in_sink"].(bool), - } -} - -func flattenTransferOption(option *resource_storage_transfer_job_storagetransfer.TransferOptions) []map[string]interface{} { - data := map[string]interface{}{ - "delete_objects_from_source_after_transfer": option.DeleteObjectsFromSourceAfterTransfer, - "delete_objects_unique_in_sink": option.DeleteObjectsUniqueInSink, - "overwrite_objects_already_existing_in_sink": option.OverwriteObjectsAlreadyExistingInSink, - } - - return []map[string]interface{}{data} -} - -func expandTransferSpecs(transferSpecs []interface{}) *resource_storage_transfer_job_storagetransfer.TransferSpec { - if len(transferSpecs) == 0 || transferSpecs[0] == nil { - return nil - } - - transferSpec := transferSpecs[0].(map[string]interface{}) - return &resource_storage_transfer_job_storagetransfer.TransferSpec{ - GcsDataSink: expandGcsData(transferSpec["gcs_data_sink"].([]interface{})), - ObjectConditions: expandObjectConditions(transferSpec["object_conditions"].([]interface{})), - TransferOptions: expandTransferOptions(transferSpec["transfer_options"].([]interface{})), - GcsDataSource: expandGcsData(transferSpec["gcs_data_source"].([]interface{})), - AwsS3DataSource: expandAwsS3Data(transferSpec["aws_s3_data_source"].([]interface{})), - HttpDataSource: expandHttpData(transferSpec["http_data_source"].([]interface{})), - AzureBlobStorageDataSource: expandAzureBlobStorageData(transferSpec["azure_blob_storage_data_source"].([]interface{})), - } -} - -func flattenTransferSpec(transferSpec *resource_storage_transfer_job_storagetransfer.TransferSpec, d *resource_storage_transfer_job_schema.ResourceData) []map[string][]map[string]interface{} { - - data := map[string][]map[string]interface{}{ - "gcs_data_sink": flattenGcsData(transferSpec.GcsDataSink), - } - - if transferSpec.ObjectConditions != nil { - data["object_conditions"] = flattenObjectCondition(transferSpec.ObjectConditions) - } - if transferSpec.TransferOptions != nil { - data["transfer_options"] = flattenTransferOption(transferSpec.TransferOptions) - } - if transferSpec.GcsDataSource != nil { - data["gcs_data_source"] = flattenGcsData(transferSpec.GcsDataSource) - } else if transferSpec.AwsS3DataSource != nil { - data["aws_s3_data_source"] = flattenAwsS3Data(transferSpec.AwsS3DataSource, d) - } else if transferSpec.HttpDataSource != nil { - data["http_data_source"] = flattenHttpData(transferSpec.HttpDataSource) - } else if transferSpec.AzureBlobStorageDataSource != nil { - data["azure_blob_storage_data_source"] = flattenAzureBlobStorageData(transferSpec.AzureBlobStorageDataSource, d) - } - - return []map[string][]map[string]interface{}{data} -} - -func resourceTagsTagBinding() *resource_tags_tag_binding_schema.Resource { - return &resource_tags_tag_binding_schema.Resource{ - Create: resourceTagsTagBindingCreate, - Read: resourceTagsTagBindingRead, - Delete: resourceTagsTagBindingDelete, - - Importer: &resource_tags_tag_binding_schema.ResourceImporter{ - State: resourceTagsTagBindingImport, - }, - - Timeouts: &resource_tags_tag_binding_schema.ResourceTimeout{ - Create: resource_tags_tag_binding_schema.DefaultTimeout(4 * resource_tags_tag_binding_time.Minute), - Delete: resource_tags_tag_binding_schema.DefaultTimeout(4 * resource_tags_tag_binding_time.Minute), - }, - - Schema: map[string]*resource_tags_tag_binding_schema.Schema{ - "parent": { - Type: resource_tags_tag_binding_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The full resource name of the resource the TagValue is bound to. E.g. //cloudresourcemanager.googleapis.com/projects/123`, - }, - "tag_value": { - Type: resource_tags_tag_binding_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The TagValue of the TagBinding. Must be of the form tagValues/456.`, - }, - "name": { - Type: resource_tags_tag_binding_schema.TypeString, - Computed: true, - Description: `The generated id for the TagBinding. This is a string of the form: 'tagBindings/{full-resource-name}/{tag-value-name}'`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTagsTagBindingCreate(d *resource_tags_tag_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandNestedTagsTagBindingParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_tags_tag_binding_reflect.ValueOf(parentProp)) && (ok || !resource_tags_tag_binding_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - tagValueProp, err := expandNestedTagsTagBindingTagValue(d.Get("tag_value"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tag_value"); !isEmptyValue(resource_tags_tag_binding_reflect.ValueOf(tagValueProp)) && (ok || !resource_tags_tag_binding_reflect.DeepEqual(v, tagValueProp)) { - obj["tagValue"] = tagValueProp - } - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagBindings") - if err != nil { - return err - } - - resource_tags_tag_binding_log.Printf("[DEBUG] Creating new TagBinding: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_binding_schema.TimeoutCreate)) - if err != nil { - return resource_tags_tag_binding_fmt.Errorf("Error creating TagBinding: %s", err) - } - - id, err := replaceVars(d, config, "tagBindings/{{name}}") - if err != nil { - return resource_tags_tag_binding_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = tagsOperationWaitTimeWithResponse( - config, res, &opRes, "Creating TagBinding", userAgent, - d.Timeout(resource_tags_tag_binding_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_tags_tag_binding_fmt.Errorf("Error waiting to create TagBinding: %s", err) - } - - if _, ok := opRes["tagBindings"]; ok { - opRes, err = flattenNestedTagsTagBinding(d, meta, opRes) - if err != nil { - return resource_tags_tag_binding_fmt.Errorf("Error getting nested object from operation response: %s", err) - } - if opRes == nil { - - return resource_tags_tag_binding_fmt.Errorf("Error decoding response from operation, could not find nested object") - } - } - if err := d.Set("name", flattenNestedTagsTagBindingName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "tagBindings/{{name}}") - if err != nil { - return resource_tags_tag_binding_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_tags_tag_binding_log.Printf("[DEBUG] Finished creating TagBinding %q: %#v", d.Id(), res) - - return resourceTagsTagBindingRead(d, meta) -} - -func resourceTagsTagBindingRead(d *resource_tags_tag_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagBindings/?parent={{parent}}&pageSize=300") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_tags_tag_binding_fmt.Sprintf("TagsTagBinding %q", d.Id())) - } - - res, err = flattenNestedTagsTagBinding(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_tags_tag_binding_log.Printf("[DEBUG] Removing TagsTagBinding because it couldn't be matched.") - d.SetId("") - return nil - } - - if err := d.Set("name", flattenNestedTagsTagBindingName(res["name"], d, config)); err != nil { - return resource_tags_tag_binding_fmt.Errorf("Error reading TagBinding: %s", err) - } - if err := d.Set("parent", flattenNestedTagsTagBindingParent(res["parent"], d, config)); err != nil { - return resource_tags_tag_binding_fmt.Errorf("Error reading TagBinding: %s", err) - } - if err := d.Set("tag_value", flattenNestedTagsTagBindingTagValue(res["tagValue"], d, config)); err != nil { - return resource_tags_tag_binding_fmt.Errorf("Error reading TagBinding: %s", err) - } - - return nil -} - -func resourceTagsTagBindingDelete(d *resource_tags_tag_binding_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagBindings/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_tags_tag_binding_log.Printf("[DEBUG] Deleting TagBinding %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_binding_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagBinding") - } - - err = tagsOperationWaitTime( - config, res, "Deleting TagBinding", userAgent, - d.Timeout(resource_tags_tag_binding_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_tags_tag_binding_log.Printf("[DEBUG] Finished deleting TagBinding %q: %#v", d.Id(), res) - return nil -} - -func resourceTagsTagBindingImport(d *resource_tags_tag_binding_schema.ResourceData, meta interface{}) ([]*resource_tags_tag_binding_schema.ResourceData, error) { - config := meta.(*Config) - - if err := parseImportId([]string{ - "tagBindings/(?P.+)", - "(?P.+)", - }, d, config); err != nil { - return nil, err - } - - name := d.Get("name").(string) - d.SetId(name) - - return []*resource_tags_tag_binding_schema.ResourceData{d}, nil -} - -func flattenNestedTagsTagBindingName(v interface{}, d *resource_tags_tag_binding_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - parts := resource_tags_tag_binding_strings.Split(v.(string), "/") - return resource_tags_tag_binding_strings.Join(parts[len(parts)-3:], "/") -} - -func flattenNestedTagsTagBindingParent(v interface{}, d *resource_tags_tag_binding_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenNestedTagsTagBindingTagValue(v interface{}, d *resource_tags_tag_binding_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandNestedTagsTagBindingParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandNestedTagsTagBindingTagValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func flattenNestedTagsTagBinding(d *resource_tags_tag_binding_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - var v interface{} - var ok bool - - v, ok = res["tagBindings"] - if !ok || v == nil { - return nil, nil - } - - switch v.(type) { - case []interface{}: - break - case map[string]interface{}: - - v = []interface{}{v} - default: - return nil, resource_tags_tag_binding_fmt.Errorf("expected list or map for value tagBindings. Actual value: %v", v) - } - - _, item, err := resourceTagsTagBindingFindNestedObjectInList(d, meta, v.([]interface{})) - if err != nil { - return nil, err - } - return item, nil -} - -func resourceTagsTagBindingFindNestedObjectInList(d *resource_tags_tag_binding_schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { - expectedName := d.Get("name") - expectedFlattenedName := flattenNestedTagsTagBindingName(expectedName, d, meta.(*Config)) - - for idx, itemRaw := range items { - if itemRaw == nil { - continue - } - item := itemRaw.(map[string]interface{}) - - itemName := flattenNestedTagsTagBindingName(item["name"], d, meta.(*Config)) - - if !(isEmptyValue(resource_tags_tag_binding_reflect.ValueOf(itemName)) && isEmptyValue(resource_tags_tag_binding_reflect.ValueOf(expectedFlattenedName))) && !resource_tags_tag_binding_reflect.DeepEqual(itemName, expectedFlattenedName) { - resource_tags_tag_binding_log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) - continue - } - resource_tags_tag_binding_log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) - return idx, item, nil - } - return -1, nil, nil -} - -func resourceTagsTagKey() *resource_tags_tag_key_schema.Resource { - return &resource_tags_tag_key_schema.Resource{ - Create: resourceTagsTagKeyCreate, - Read: resourceTagsTagKeyRead, - Update: resourceTagsTagKeyUpdate, - Delete: resourceTagsTagKeyDelete, - - Importer: &resource_tags_tag_key_schema.ResourceImporter{ - State: resourceTagsTagKeyImport, - }, - - Timeouts: &resource_tags_tag_key_schema.ResourceTimeout{ - Create: resource_tags_tag_key_schema.DefaultTimeout(4 * resource_tags_tag_key_time.Minute), - Update: resource_tags_tag_key_schema.DefaultTimeout(4 * resource_tags_tag_key_time.Minute), - Delete: resource_tags_tag_key_schema.DefaultTimeout(4 * resource_tags_tag_key_time.Minute), - }, - - Schema: map[string]*resource_tags_tag_key_schema.Schema{ - "parent": { - Type: resource_tags_tag_key_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Input only. The resource name of the new TagKey's parent. Must be of the form organizations/{org_id}.`, - }, - "short_name": { - Type: resource_tags_tag_key_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_tags_tag_key_validation.StringLenBetween(1, 63), - Description: `Input only. The user friendly name for a TagKey. The short name should be unique for TagKeys within the same tag namespace. - -The short name must be 1-63 characters, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, - }, - "description": { - Type: resource_tags_tag_key_schema.TypeString, - Optional: true, - ValidateFunc: resource_tags_tag_key_validation.StringLenBetween(0, 256), - Description: `User-assigned description of the TagKey. Must not exceed 256 characters.`, - }, - "create_time": { - Type: resource_tags_tag_key_schema.TypeString, - Computed: true, - Description: `Output only. Creation time. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: resource_tags_tag_key_schema.TypeString, - Computed: true, - Description: `The generated numeric id for the TagKey.`, - }, - "namespaced_name": { - Type: resource_tags_tag_key_schema.TypeString, - Computed: true, - Description: `Output only. Namespaced name of the TagKey.`, - }, - "update_time": { - Type: resource_tags_tag_key_schema.TypeString, - Computed: true, - Description: `Output only. Update time. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTagsTagKeyCreate(d *resource_tags_tag_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandTagsTagKeyParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_tags_tag_key_reflect.ValueOf(parentProp)) && (ok || !resource_tags_tag_key_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - shortNameProp, err := expandTagsTagKeyShortName(d.Get("short_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_name"); !isEmptyValue(resource_tags_tag_key_reflect.ValueOf(shortNameProp)) && (ok || !resource_tags_tag_key_reflect.DeepEqual(v, shortNameProp)) { - obj["shortName"] = shortNameProp - } - descriptionProp, err := expandTagsTagKeyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_tags_tag_key_reflect.ValueOf(descriptionProp)) && (ok || !resource_tags_tag_key_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - lockName, err := replaceVars(d, config, "tagKeys/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys") - if err != nil { - return err - } - - resource_tags_tag_key_log.Printf("[DEBUG] Creating new TagKey: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_key_schema.TimeoutCreate)) - if err != nil { - return resource_tags_tag_key_fmt.Errorf("Error creating TagKey: %s", err) - } - - id, err := replaceVars(d, config, "tagKeys/{{name}}") - if err != nil { - return resource_tags_tag_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = tagsOperationWaitTimeWithResponse( - config, res, &opRes, "Creating TagKey", userAgent, - d.Timeout(resource_tags_tag_key_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_tags_tag_key_fmt.Errorf("Error waiting to create TagKey: %s", err) - } - - if err := d.Set("name", flattenTagsTagKeyName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "tagKeys/{{name}}") - if err != nil { - return resource_tags_tag_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_tags_tag_key_log.Printf("[DEBUG] Finished creating TagKey %q: %#v", d.Id(), res) - - return resourceTagsTagKeyRead(d, meta) -} - -func resourceTagsTagKeyRead(d *resource_tags_tag_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_tags_tag_key_fmt.Sprintf("TagsTagKey %q", d.Id())) - } - - if err := d.Set("name", flattenTagsTagKeyName(res["name"], d, config)); err != nil { - return resource_tags_tag_key_fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("parent", flattenTagsTagKeyParent(res["parent"], d, config)); err != nil { - return resource_tags_tag_key_fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("short_name", flattenTagsTagKeyShortName(res["shortName"], d, config)); err != nil { - return resource_tags_tag_key_fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("namespaced_name", flattenTagsTagKeyNamespacedName(res["namespacedName"], d, config)); err != nil { - return resource_tags_tag_key_fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("description", flattenTagsTagKeyDescription(res["description"], d, config)); err != nil { - return resource_tags_tag_key_fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("create_time", flattenTagsTagKeyCreateTime(res["createTime"], d, config)); err != nil { - return resource_tags_tag_key_fmt.Errorf("Error reading TagKey: %s", err) - } - if err := d.Set("update_time", flattenTagsTagKeyUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_tags_tag_key_fmt.Errorf("Error reading TagKey: %s", err) - } - - return nil -} - -func resourceTagsTagKeyUpdate(d *resource_tags_tag_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandTagsTagKeyDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_tags_tag_key_reflect.ValueOf(v)) && (ok || !resource_tags_tag_key_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - lockName, err := replaceVars(d, config, "tagKeys/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") - if err != nil { - return err - } - - resource_tags_tag_key_log.Printf("[DEBUG] Updating TagKey %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_tags_tag_key_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_key_schema.TimeoutUpdate)) - - if err != nil { - return resource_tags_tag_key_fmt.Errorf("Error updating TagKey %q: %s", d.Id(), err) - } else { - resource_tags_tag_key_log.Printf("[DEBUG] Finished updating TagKey %q: %#v", d.Id(), res) - } - - err = tagsOperationWaitTime( - config, res, "Updating TagKey", userAgent, - d.Timeout(resource_tags_tag_key_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceTagsTagKeyRead(d, meta) -} - -func resourceTagsTagKeyDelete(d *resource_tags_tag_key_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "tagKeys/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagKeys/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_tags_tag_key_log.Printf("[DEBUG] Deleting TagKey %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_key_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagKey") - } - - err = tagsOperationWaitTime( - config, res, "Deleting TagKey", userAgent, - d.Timeout(resource_tags_tag_key_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_tags_tag_key_log.Printf("[DEBUG] Finished deleting TagKey %q: %#v", d.Id(), res) - return nil -} - -func resourceTagsTagKeyImport(d *resource_tags_tag_key_schema.ResourceData, meta interface{}) ([]*resource_tags_tag_key_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "tagKeys/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "tagKeys/{{name}}") - if err != nil { - return nil, resource_tags_tag_key_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_tags_tag_key_schema.ResourceData{d}, nil -} - -func flattenTagsTagKeyName(v interface{}, d *resource_tags_tag_key_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenTagsTagKeyParent(v interface{}, d *resource_tags_tag_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyShortName(v interface{}, d *resource_tags_tag_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyNamespacedName(v interface{}, d *resource_tags_tag_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyDescription(v interface{}, d *resource_tags_tag_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyCreateTime(v interface{}, d *resource_tags_tag_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagKeyUpdateTime(v interface{}, d *resource_tags_tag_key_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandTagsTagKeyParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagKeyShortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagKeyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceTagsTagValue() *resource_tags_tag_value_schema.Resource { - return &resource_tags_tag_value_schema.Resource{ - Create: resourceTagsTagValueCreate, - Read: resourceTagsTagValueRead, - Update: resourceTagsTagValueUpdate, - Delete: resourceTagsTagValueDelete, - - Importer: &resource_tags_tag_value_schema.ResourceImporter{ - State: resourceTagsTagValueImport, - }, - - Timeouts: &resource_tags_tag_value_schema.ResourceTimeout{ - Create: resource_tags_tag_value_schema.DefaultTimeout(4 * resource_tags_tag_value_time.Minute), - Update: resource_tags_tag_value_schema.DefaultTimeout(4 * resource_tags_tag_value_time.Minute), - Delete: resource_tags_tag_value_schema.DefaultTimeout(4 * resource_tags_tag_value_time.Minute), - }, - - Schema: map[string]*resource_tags_tag_value_schema.Schema{ - "parent": { - Type: resource_tags_tag_value_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Input only. The resource name of the new TagValue's parent. Must be of the form tagKeys/{tag_key_id}.`, - }, - "short_name": { - Type: resource_tags_tag_value_schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: resource_tags_tag_value_validation.StringLenBetween(1, 63), - Description: `Input only. User-assigned short name for TagValue. The short name should be unique for TagValues within the same parent TagKey. - -The short name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between.`, - }, - "description": { - Type: resource_tags_tag_value_schema.TypeString, - Optional: true, - ValidateFunc: resource_tags_tag_value_validation.StringLenBetween(0, 256), - Description: `User-assigned description of the TagValue. Must not exceed 256 characters.`, - }, - "create_time": { - Type: resource_tags_tag_value_schema.TypeString, - Computed: true, - Description: `Output only. Creation time. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - "name": { - Type: resource_tags_tag_value_schema.TypeString, - Computed: true, - Description: `The generated numeric id for the TagValue.`, - }, - "namespaced_name": { - Type: resource_tags_tag_value_schema.TypeString, - Computed: true, - Description: `Output only. Namespaced name of the TagValue. Will be in the format {organizationId}/{tag_key_short_name}/{shortName}.`, - }, - "update_time": { - Type: resource_tags_tag_value_schema.TypeString, - Computed: true, - Description: `Output only. Update time. - -A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTagsTagValueCreate(d *resource_tags_tag_value_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - parentProp, err := expandTagsTagValueParent(d.Get("parent"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("parent"); !isEmptyValue(resource_tags_tag_value_reflect.ValueOf(parentProp)) && (ok || !resource_tags_tag_value_reflect.DeepEqual(v, parentProp)) { - obj["parent"] = parentProp - } - shortNameProp, err := expandTagsTagValueShortName(d.Get("short_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("short_name"); !isEmptyValue(resource_tags_tag_value_reflect.ValueOf(shortNameProp)) && (ok || !resource_tags_tag_value_reflect.DeepEqual(v, shortNameProp)) { - obj["shortName"] = shortNameProp - } - descriptionProp, err := expandTagsTagValueDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_tags_tag_value_reflect.ValueOf(descriptionProp)) && (ok || !resource_tags_tag_value_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - lockName, err := replaceVars(d, config, "tagValues/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues") - if err != nil { - return err - } - - resource_tags_tag_value_log.Printf("[DEBUG] Creating new TagValue: %#v", obj) - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_value_schema.TimeoutCreate)) - if err != nil { - return resource_tags_tag_value_fmt.Errorf("Error creating TagValue: %s", err) - } - - id, err := replaceVars(d, config, "tagValues/{{name}}") - if err != nil { - return resource_tags_tag_value_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = tagsOperationWaitTimeWithResponse( - config, res, &opRes, "Creating TagValue", userAgent, - d.Timeout(resource_tags_tag_value_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_tags_tag_value_fmt.Errorf("Error waiting to create TagValue: %s", err) - } - - if err := d.Set("name", flattenTagsTagValueName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "tagValues/{{name}}") - if err != nil { - return resource_tags_tag_value_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_tags_tag_value_log.Printf("[DEBUG] Finished creating TagValue %q: %#v", d.Id(), res) - - return resourceTagsTagValueRead(d, meta) -} - -func resourceTagsTagValueRead(d *resource_tags_tag_value_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_tags_tag_value_fmt.Sprintf("TagsTagValue %q", d.Id())) - } - - if err := d.Set("name", flattenTagsTagValueName(res["name"], d, config)); err != nil { - return resource_tags_tag_value_fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("parent", flattenTagsTagValueParent(res["parent"], d, config)); err != nil { - return resource_tags_tag_value_fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("short_name", flattenTagsTagValueShortName(res["shortName"], d, config)); err != nil { - return resource_tags_tag_value_fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("namespaced_name", flattenTagsTagValueNamespacedName(res["namespacedName"], d, config)); err != nil { - return resource_tags_tag_value_fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("description", flattenTagsTagValueDescription(res["description"], d, config)); err != nil { - return resource_tags_tag_value_fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("create_time", flattenTagsTagValueCreateTime(res["createTime"], d, config)); err != nil { - return resource_tags_tag_value_fmt.Errorf("Error reading TagValue: %s", err) - } - if err := d.Set("update_time", flattenTagsTagValueUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_tags_tag_value_fmt.Errorf("Error reading TagValue: %s", err) - } - - return nil -} - -func resourceTagsTagValueUpdate(d *resource_tags_tag_value_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - obj := make(map[string]interface{}) - descriptionProp, err := expandTagsTagValueDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_tags_tag_value_reflect.ValueOf(v)) && (ok || !resource_tags_tag_value_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - - lockName, err := replaceVars(d, config, "tagValues/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") - if err != nil { - return err - } - - resource_tags_tag_value_log.Printf("[DEBUG] Updating TagValue %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_tags_tag_value_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_value_schema.TimeoutUpdate)) - - if err != nil { - return resource_tags_tag_value_fmt.Errorf("Error updating TagValue %q: %s", d.Id(), err) - } else { - resource_tags_tag_value_log.Printf("[DEBUG] Finished updating TagValue %q: %#v", d.Id(), res) - } - - err = tagsOperationWaitTime( - config, res, "Updating TagValue", userAgent, - d.Timeout(resource_tags_tag_value_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceTagsTagValueRead(d, meta) -} - -func resourceTagsTagValueDelete(d *resource_tags_tag_value_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - lockName, err := replaceVars(d, config, "tagValues/{{parent}}") - if err != nil { - return err - } - mutexKV.Lock(lockName) - defer mutexKV.Unlock(lockName) - - url, err := replaceVars(d, config, "{{TagsBasePath}}tagValues/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_tags_tag_value_log.Printf("[DEBUG] Deleting TagValue %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_tags_tag_value_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "TagValue") - } - - err = tagsOperationWaitTime( - config, res, "Deleting TagValue", userAgent, - d.Timeout(resource_tags_tag_value_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_tags_tag_value_log.Printf("[DEBUG] Finished deleting TagValue %q: %#v", d.Id(), res) - return nil -} - -func resourceTagsTagValueImport(d *resource_tags_tag_value_schema.ResourceData, meta interface{}) ([]*resource_tags_tag_value_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "tagValues/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "tagValues/{{name}}") - if err != nil { - return nil, resource_tags_tag_value_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_tags_tag_value_schema.ResourceData{d}, nil -} - -func flattenTagsTagValueName(v interface{}, d *resource_tags_tag_value_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenTagsTagValueParent(v interface{}, d *resource_tags_tag_value_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueShortName(v interface{}, d *resource_tags_tag_value_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueNamespacedName(v interface{}, d *resource_tags_tag_value_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueDescription(v interface{}, d *resource_tags_tag_value_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueCreateTime(v interface{}, d *resource_tags_tag_value_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTagsTagValueUpdateTime(v interface{}, d *resource_tags_tag_value_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandTagsTagValueParent(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagValueShortName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTagsTagValueDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func compareTpuNodeSchedulingConfig(k, old, new string, d *resource_tpu_node_schema.ResourceData) bool { - if k == "scheduling_config.0.preemptible" { - return old == "" && new == "false" - } - if k == "scheduling_config.#" { - o, n := d.GetChange("scheduling_config.0.preemptible") - return o.(bool) == n.(bool) - } - return false -} - -func tpuNodeCustomizeDiff(_ resource_tpu_node_context.Context, diff *resource_tpu_node_schema.ResourceDiff, meta interface{}) error { - old, new := diff.GetChange("network") - config := meta.(*Config) - - networkLinkRegex := resource_tpu_node_regexp.MustCompile("projects/(.+)/global/networks/(.+)") - - var pid string - - if networkLinkRegex.MatchString(new.(string)) { - parts := networkLinkRegex.FindStringSubmatch(new.(string)) - pid = parts[1] - } - - project, err := config.NewResourceManagerClient(config.userAgent).Projects.Get(pid).Do() - if err != nil { - return resource_tpu_node_fmt.Errorf("Failed to retrieve project, pid: %s, err: %s", pid, err) - } - - if networkLinkRegex.MatchString(old.(string)) { - parts := networkLinkRegex.FindStringSubmatch(old.(string)) - i, err := resource_tpu_node_strconv.ParseInt(parts[1], 10, 64) - if err == nil { - if project.ProjectNumber == i { - if err := diff.SetNew("network", old); err != nil { - return err - } - return nil - } - } - } - return nil -} - -func resourceTPUNode() *resource_tpu_node_schema.Resource { - return &resource_tpu_node_schema.Resource{ - Create: resourceTPUNodeCreate, - Read: resourceTPUNodeRead, - Update: resourceTPUNodeUpdate, - Delete: resourceTPUNodeDelete, - - Importer: &resource_tpu_node_schema.ResourceImporter{ - State: resourceTPUNodeImport, - }, - - Timeouts: &resource_tpu_node_schema.ResourceTimeout{ - Create: resource_tpu_node_schema.DefaultTimeout(15 * resource_tpu_node_time.Minute), - Update: resource_tpu_node_schema.DefaultTimeout(15 * resource_tpu_node_time.Minute), - Delete: resource_tpu_node_schema.DefaultTimeout(15 * resource_tpu_node_time.Minute), - }, - - CustomizeDiff: tpuNodeCustomizeDiff, - - Schema: map[string]*resource_tpu_node_schema.Schema{ - "accelerator_type": { - Type: resource_tpu_node_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The type of hardware accelerators associated with this node.`, - }, - "name": { - Type: resource_tpu_node_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The immutable name of the TPU.`, - }, - "tensorflow_version": { - Type: resource_tpu_node_schema.TypeString, - Required: true, - Description: `The version of Tensorflow running in the Node.`, - }, - "cidr_block": { - Type: resource_tpu_node_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The CIDR block that the TPU node will use when selecting an IP -address. This CIDR block must be a /29 block; the Compute Engine -networks API forbids a smaller block, and using a larger block would -be wasteful (a node can only consume one IP address). - -Errors will occur if the CIDR block has already been used for a -currently existing TPU node, the CIDR block conflicts with any -subnetworks in the user's provided network, or the provided network -is peered with another network that is using that CIDR block.`, - ConflictsWith: []string{"use_service_networking"}, - }, - "description": { - Type: resource_tpu_node_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The user-supplied description of the TPU. Maximum of 512 characters.`, - }, - "labels": { - Type: resource_tpu_node_schema.TypeMap, - Optional: true, - ForceNew: true, - Description: `Resource labels to represent user provided metadata.`, - Elem: &resource_tpu_node_schema.Schema{Type: resource_tpu_node_schema.TypeString}, - }, - "network": { - Type: resource_tpu_node_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `The name of a network to peer the TPU node to. It must be a -preexisting Compute Engine network inside of the project on which -this API has been activated. If none is provided, "default" will be -used.`, - }, - "scheduling_config": { - Type: resource_tpu_node_schema.TypeList, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareTpuNodeSchedulingConfig, - Description: `Sets the scheduling options for this TPU instance.`, - MaxItems: 1, - Elem: &resource_tpu_node_schema.Resource{ - Schema: map[string]*resource_tpu_node_schema.Schema{ - "preemptible": { - Type: resource_tpu_node_schema.TypeBool, - Required: true, - ForceNew: true, - DiffSuppressFunc: compareTpuNodeSchedulingConfig, - Description: `Defines whether the TPU instance is preemptible.`, - }, - }, - }, - }, - "use_service_networking": { - Type: resource_tpu_node_schema.TypeBool, - Optional: true, - ForceNew: true, - Description: `Whether the VPC peering for the node is set up through Service Networking API. -The VPC Peering should be set up before provisioning the node. If this field is set, -cidr_block field should not be specified. If the network that you want to peer the -TPU Node to is a Shared VPC network, the node must be created with this this field enabled.`, - Default: false, - ConflictsWith: []string{"cidr_block"}, - }, - "zone": { - Type: resource_tpu_node_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The GCP location for the TPU. If it is not provided, the provider zone is used.`, - }, - "network_endpoints": { - Type: resource_tpu_node_schema.TypeList, - Computed: true, - Description: `The network endpoints where TPU workers can be accessed and sent work. -It is recommended that Tensorflow clients of the node first reach out -to the first (index 0) entry.`, - Elem: &resource_tpu_node_schema.Resource{ - Schema: map[string]*resource_tpu_node_schema.Schema{ - "ip_address": { - Type: resource_tpu_node_schema.TypeString, - Computed: true, - Description: `The IP address of this network endpoint.`, - }, - "port": { - Type: resource_tpu_node_schema.TypeInt, - Computed: true, - Description: `The port of this network endpoint.`, - }, - }, - }, - }, - "service_account": { - Type: resource_tpu_node_schema.TypeString, - Computed: true, - Description: `The service account used to run the tensor flow services within the -node. To share resources, including Google Cloud Storage data, with -the Tensorflow job running in the Node, this account must have -permissions to that data.`, - }, - "project": { - Type: resource_tpu_node_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceTPUNodeCreate(d *resource_tpu_node_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandTPUNodeName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(nameProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandTPUNodeDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(descriptionProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - acceleratorTypeProp, err := expandTPUNodeAcceleratorType(d.Get("accelerator_type"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("accelerator_type"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(acceleratorTypeProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, acceleratorTypeProp)) { - obj["acceleratorType"] = acceleratorTypeProp - } - tensorflowVersionProp, err := expandTPUNodeTensorflowVersion(d.Get("tensorflow_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tensorflow_version"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(tensorflowVersionProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, tensorflowVersionProp)) { - obj["tensorflowVersion"] = tensorflowVersionProp - } - networkProp, err := expandTPUNodeNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(networkProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - cidrBlockProp, err := expandTPUNodeCidrBlock(d.Get("cidr_block"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("cidr_block"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(cidrBlockProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, cidrBlockProp)) { - obj["cidrBlock"] = cidrBlockProp - } - useServiceNetworkingProp, err := expandTPUNodeUseServiceNetworking(d.Get("use_service_networking"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("use_service_networking"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(useServiceNetworkingProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, useServiceNetworkingProp)) { - obj["useServiceNetworking"] = useServiceNetworkingProp - } - schedulingConfigProp, err := expandTPUNodeSchedulingConfig(d.Get("scheduling_config"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("scheduling_config"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(schedulingConfigProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, schedulingConfigProp)) { - obj["schedulingConfig"] = schedulingConfigProp - } - labelsProp, err := expandTPUNodeLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(labelsProp)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes?nodeId={{name}}") - if err != nil { - return err - } - - resource_tpu_node_log.Printf("[DEBUG] Creating new Node: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_tpu_node_fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_tpu_node_schema.TimeoutCreate)) - if err != nil { - return resource_tpu_node_fmt.Errorf("Error creating Node: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return resource_tpu_node_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = tpuOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Node", userAgent, - d.Timeout(resource_tpu_node_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_tpu_node_fmt.Errorf("Error waiting to create Node: %s", err) - } - - if err := d.Set("name", flattenTPUNodeName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return resource_tpu_node_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_tpu_node_log.Printf("[DEBUG] Finished creating Node %q: %#v", d.Id(), res) - - return resourceTPUNodeRead(d, meta) -} - -func resourceTPUNodeRead(d *resource_tpu_node_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_tpu_node_fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_tpu_node_fmt.Sprintf("TPUNode %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - - if err := d.Set("name", flattenTPUNodeName(res["name"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("description", flattenTPUNodeDescription(res["description"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("accelerator_type", flattenTPUNodeAcceleratorType(res["acceleratorType"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("tensorflow_version", flattenTPUNodeTensorflowVersion(res["tensorflowVersion"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("network", flattenTPUNodeNetwork(res["network"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("cidr_block", flattenTPUNodeCidrBlock(res["cidrBlock"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("service_account", flattenTPUNodeServiceAccount(res["serviceAccount"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("use_service_networking", flattenTPUNodeUseServiceNetworking(res["useServiceNetworking"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("scheduling_config", flattenTPUNodeSchedulingConfig(res["schedulingConfig"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("network_endpoints", flattenTPUNodeNetworkEndpoints(res["networkEndpoints"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - if err := d.Set("labels", flattenTPUNodeLabels(res["labels"], d, config)); err != nil { - return resource_tpu_node_fmt.Errorf("Error reading Node: %s", err) - } - - return nil -} - -func resourceTPUNodeUpdate(d *resource_tpu_node_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_tpu_node_fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - d.Partial(true) - - if d.HasChange("tensorflow_version") { - obj := make(map[string]interface{}) - - tensorflowVersionProp, err := expandTPUNodeTensorflowVersion(d.Get("tensorflow_version"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("tensorflow_version"); !isEmptyValue(resource_tpu_node_reflect.ValueOf(v)) && (ok || !resource_tpu_node_reflect.DeepEqual(v, tensorflowVersionProp)) { - obj["tensorflowVersion"] = tensorflowVersionProp - } - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}:reimage") - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_tpu_node_schema.TimeoutUpdate)) - if err != nil { - return resource_tpu_node_fmt.Errorf("Error updating Node %q: %s", d.Id(), err) - } else { - resource_tpu_node_log.Printf("[DEBUG] Finished updating Node %q: %#v", d.Id(), res) - } - - err = tpuOperationWaitTime( - config, res, project, "Updating Node", userAgent, - d.Timeout(resource_tpu_node_schema.TimeoutUpdate)) - if err != nil { - return err - } - } - - d.Partial(false) - - return resourceTPUNodeRead(d, meta) -} - -func resourceTPUNodeDelete(d *resource_tpu_node_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_tpu_node_fmt.Errorf("Error fetching project for Node: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{TPUBasePath}}projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_tpu_node_log.Printf("[DEBUG] Deleting Node %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_tpu_node_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Node") - } - - err = tpuOperationWaitTime( - config, res, project, "Deleting Node", userAgent, - d.Timeout(resource_tpu_node_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_tpu_node_log.Printf("[DEBUG] Finished deleting Node %q: %#v", d.Id(), res) - return nil -} - -func resourceTPUNodeImport(d *resource_tpu_node_schema.ResourceData, meta interface{}) ([]*resource_tpu_node_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/nodes/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{zone}}/nodes/{{name}}") - if err != nil { - return nil, resource_tpu_node_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_tpu_node_schema.ResourceData{d}, nil -} - -func flattenTPUNodeName(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenTPUNodeDescription(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeAcceleratorType(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeTensorflowVersion(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeNetwork(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeCidrBlock(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeServiceAccount(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeUseServiceNetworking(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeSchedulingConfig(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["preemptible"] = - flattenTPUNodeSchedulingConfigPreemptible(original["preemptible"], d, config) - return []interface{}{transformed} -} - -func flattenTPUNodeSchedulingConfigPreemptible(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeNetworkEndpoints(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - l := v.([]interface{}) - transformed := make([]interface{}, 0, len(l)) - for _, raw := range l { - original := raw.(map[string]interface{}) - if len(original) < 1 { - - continue - } - transformed = append(transformed, map[string]interface{}{ - "ip_address": flattenTPUNodeNetworkEndpointsIpAddress(original["ipAddress"], d, config), - "port": flattenTPUNodeNetworkEndpointsPort(original["port"], d, config), - }) - } - return transformed -} - -func flattenTPUNodeNetworkEndpointsIpAddress(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenTPUNodeNetworkEndpointsPort(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_tpu_node_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenTPUNodeLabels(v interface{}, d *resource_tpu_node_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandTPUNodeName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeAcceleratorType(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeTensorflowVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeCidrBlock(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeUseServiceNetworking(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeSchedulingConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedPreemptible, err := expandTPUNodeSchedulingConfigPreemptible(original["preemptible"], d, config) - if err != nil { - return nil, err - } else if val := resource_tpu_node_reflect.ValueOf(transformedPreemptible); val.IsValid() && !isEmptyValue(val) { - transformed["preemptible"] = transformedPreemptible - } - - return transformed, nil -} - -func expandTPUNodeSchedulingConfigPreemptible(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandTPUNodeLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func resourceProjectUsageBucket() *resource_usage_export_bucket_schema.Resource { - return &resource_usage_export_bucket_schema.Resource{ - Create: resourceProjectUsageBucketCreate, - Read: resourceProjectUsageBucketRead, - Delete: resourceProjectUsageBucketDelete, - Importer: &resource_usage_export_bucket_schema.ResourceImporter{ - State: resourceProjectUsageBucketImportState, - }, - - Timeouts: &resource_usage_export_bucket_schema.ResourceTimeout{ - Create: resource_usage_export_bucket_schema.DefaultTimeout(4 * resource_usage_export_bucket_time.Minute), - Delete: resource_usage_export_bucket_schema.DefaultTimeout(4 * resource_usage_export_bucket_time.Minute), - }, - - Schema: map[string]*resource_usage_export_bucket_schema.Schema{ - "bucket_name": { - Type: resource_usage_export_bucket_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The bucket to store reports in.`, - }, - "prefix": { - Type: resource_usage_export_bucket_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `A prefix for the reports, for instance, the project name.`, - }, - "project": { - Type: resource_usage_export_bucket_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - Description: `The project to set the export bucket on. If it is not provided, the provider project is used.`, - }, - }, - UseJSONNumber: true, - } -} - -func resourceProjectUsageBucketRead(d *resource_usage_export_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - p, err := config.NewComputeClient(userAgent).Projects.Get(project).Do() - if err != nil { - return handleNotFoundError(err, d, resource_usage_export_bucket_fmt.Sprintf("Project data for project %s", project)) - } - - if p.UsageExportLocation == nil { - resource_usage_export_bucket_log.Printf("[WARN] Removing usage export location resource %s because it's not enabled server-side.", project) - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_usage_export_bucket_fmt.Errorf("Error setting project: %s", err) - } - if err := d.Set("prefix", p.UsageExportLocation.ReportNamePrefix); err != nil { - return resource_usage_export_bucket_fmt.Errorf("Error setting prefix: %s", err) - } - if err := d.Set("bucket_name", p.UsageExportLocation.BucketName); err != nil { - return resource_usage_export_bucket_fmt.Errorf("Error setting bucket_name: %s", err) - } - return nil -} - -func resourceProjectUsageBucketCreate(d *resource_usage_export_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - op, err := config.NewComputeClient(userAgent).Projects.SetUsageExportBucket(project, &resource_usage_export_bucket_compute.UsageExportLocation{ - ReportNamePrefix: d.Get("prefix").(string), - BucketName: d.Get("bucket_name").(string), - }).Do() - if err != nil { - return err - } - d.SetId(project) - err = computeOperationWaitTime(config, op, project, "Setting usage export bucket.", userAgent, d.Timeout(resource_usage_export_bucket_schema.TimeoutCreate)) - if err != nil { - d.SetId("") - return err - } - - if err := d.Set("project", project); err != nil { - return resource_usage_export_bucket_fmt.Errorf("Error setting project: %s", err) - } - - return resourceProjectUsageBucketRead(d, meta) -} - -func resourceProjectUsageBucketDelete(d *resource_usage_export_bucket_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - project, err := getProject(d, config) - if err != nil { - return err - } - - op, err := config.NewComputeClient(userAgent).Projects.SetUsageExportBucket(project, nil).Do() - if err != nil { - return err - } - - err = computeOperationWaitTime(config, op, project, - "Setting usage export bucket to nil, automatically disabling usage export.", userAgent, d.Timeout(resource_usage_export_bucket_schema.TimeoutDelete)) - if err != nil { - return err - } - d.SetId("") - - return nil -} - -func resourceProjectUsageBucketImportState(d *resource_usage_export_bucket_schema.ResourceData, meta interface{}) ([]*resource_usage_export_bucket_schema.ResourceData, error) { - project := d.Id() - if err := d.Set("project", project); err != nil { - return nil, resource_usage_export_bucket_fmt.Errorf("Error setting project: %s", err) - } - return []*resource_usage_export_bucket_schema.ResourceData{d}, nil -} - -func resourceVertexAIDataset() *resource_vertex_ai_dataset_schema.Resource { - return &resource_vertex_ai_dataset_schema.Resource{ - Create: resourceVertexAIDatasetCreate, - Read: resourceVertexAIDatasetRead, - Update: resourceVertexAIDatasetUpdate, - Delete: resourceVertexAIDatasetDelete, - - Timeouts: &resource_vertex_ai_dataset_schema.ResourceTimeout{ - Create: resource_vertex_ai_dataset_schema.DefaultTimeout(6 * resource_vertex_ai_dataset_time.Minute), - Update: resource_vertex_ai_dataset_schema.DefaultTimeout(6 * resource_vertex_ai_dataset_time.Minute), - Delete: resource_vertex_ai_dataset_schema.DefaultTimeout(10 * resource_vertex_ai_dataset_time.Minute), - }, - - Schema: map[string]*resource_vertex_ai_dataset_schema.Schema{ - "display_name": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Required: true, - Description: `The user-defined name of the Dataset. The name can be up to 128 characters long and can be consist of any UTF-8 characters.`, - }, - "metadata_schema_uri": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Required: true, - ForceNew: true, - Description: `Points to a YAML file stored on Google Cloud Storage describing additional information about the Dataset. The schema is defined as an OpenAPI 3.0.2 Schema Object. The schema files that can be used here are found in gs://google-cloud-aiplatform/schema/dataset/metadata/.`, - }, - "encryption_spec": { - Type: resource_vertex_ai_dataset_schema.TypeList, - Optional: true, - ForceNew: true, - Description: `Customer-managed encryption key spec for a Dataset. If set, this Dataset and all sub-resources of this Dataset will be secured by this key.`, - MaxItems: 1, - Elem: &resource_vertex_ai_dataset_schema.Resource{ - Schema: map[string]*resource_vertex_ai_dataset_schema.Schema{ - "kms_key_name": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. -Has the form: projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key. The key needs to be in the same region as where the resource is created.`, - }, - }, - }, - }, - "labels": { - Type: resource_vertex_ai_dataset_schema.TypeMap, - Computed: true, - Optional: true, - Description: `A set of key/value label pairs to assign to this Workflow.`, - Elem: &resource_vertex_ai_dataset_schema.Schema{Type: resource_vertex_ai_dataset_schema.TypeString}, - }, - "region": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `The region of the dataset. eg us-central1`, - }, - "create_time": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Computed: true, - Description: `The timestamp of when the dataset was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "name": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Computed: true, - Description: `The resource name of the Dataset. This value is set by Google.`, - }, - "update_time": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Computed: true, - Description: `The timestamp of when the dataset was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "project": { - Type: resource_vertex_ai_dataset_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVertexAIDatasetCreate(d *resource_vertex_ai_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAIDatasetDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_vertex_ai_dataset_reflect.ValueOf(displayNameProp)) && (ok || !resource_vertex_ai_dataset_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandVertexAIDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_vertex_ai_dataset_reflect.ValueOf(labelsProp)) && (ok || !resource_vertex_ai_dataset_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - encryptionSpecProp, err := expandVertexAIDatasetEncryptionSpec(d.Get("encryption_spec"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("encryption_spec"); !isEmptyValue(resource_vertex_ai_dataset_reflect.ValueOf(encryptionSpecProp)) && (ok || !resource_vertex_ai_dataset_reflect.DeepEqual(v, encryptionSpecProp)) { - obj["encryptionSpec"] = encryptionSpecProp - } - metadataSchemaUriProp, err := expandVertexAIDatasetMetadataSchemaUri(d.Get("metadata_schema_uri"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("metadata_schema_uri"); !isEmptyValue(resource_vertex_ai_dataset_reflect.ValueOf(metadataSchemaUriProp)) && (ok || !resource_vertex_ai_dataset_reflect.DeepEqual(v, metadataSchemaUriProp)) { - obj["metadataSchemaUri"] = metadataSchemaUriProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/datasets") - if err != nil { - return err - } - - resource_vertex_ai_dataset_log.Printf("[DEBUG] Creating new Dataset: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_vertex_ai_dataset_schema.TimeoutCreate)) - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error creating Dataset: %s", err) - } - - id, err := replaceVars(d, config, "{{name}}") - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = vertexAIOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Dataset", userAgent, - d.Timeout(resource_vertex_ai_dataset_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_vertex_ai_dataset_fmt.Errorf("Error waiting to create Dataset: %s", err) - } - - if err := d.Set("name", flattenVertexAIDatasetName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "{{name}}") - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_vertex_ai_dataset_log.Printf("[DEBUG] Finished creating Dataset %q: %#v", d.Id(), res) - - return resourceVertexAIDatasetRead(d, meta) -} - -func resourceVertexAIDatasetRead(d *resource_vertex_ai_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_vertex_ai_dataset_fmt.Sprintf("VertexAIDataset %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - - if err := d.Set("name", flattenVertexAIDatasetName(res["name"], d, config)); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("display_name", flattenVertexAIDatasetDisplayName(res["displayName"], d, config)); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("create_time", flattenVertexAIDatasetCreateTime(res["createTime"], d, config)); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("update_time", flattenVertexAIDatasetUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("labels", flattenVertexAIDatasetLabels(res["labels"], d, config)); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("encryption_spec", flattenVertexAIDatasetEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - if err := d.Set("metadata_schema_uri", flattenVertexAIDatasetMetadataSchemaUri(res["metadataSchemaUri"], d, config)); err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error reading Dataset: %s", err) - } - - return nil -} - -func resourceVertexAIDatasetUpdate(d *resource_vertex_ai_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - displayNameProp, err := expandVertexAIDatasetDisplayName(d.Get("display_name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(resource_vertex_ai_dataset_reflect.ValueOf(v)) && (ok || !resource_vertex_ai_dataset_reflect.DeepEqual(v, displayNameProp)) { - obj["displayName"] = displayNameProp - } - labelsProp, err := expandVertexAIDatasetLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_vertex_ai_dataset_reflect.ValueOf(v)) && (ok || !resource_vertex_ai_dataset_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - resource_vertex_ai_dataset_log.Printf("[DEBUG] Updating Dataset %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("display_name") { - updateMask = append(updateMask, "displayName") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_vertex_ai_dataset_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_vertex_ai_dataset_schema.TimeoutUpdate)) - - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error updating Dataset %q: %s", d.Id(), err) - } else { - resource_vertex_ai_dataset_log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) - } - - err = vertexAIOperationWaitTime( - config, res, project, "Updating Dataset", userAgent, - d.Timeout(resource_vertex_ai_dataset_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceVertexAIDatasetRead(d, meta) -} - -func resourceVertexAIDatasetDelete(d *resource_vertex_ai_dataset_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_vertex_ai_dataset_fmt.Errorf("Error fetching project for Dataset: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{VertexAIBasePath}}{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_vertex_ai_dataset_log.Printf("[DEBUG] Deleting Dataset %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_vertex_ai_dataset_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Dataset") - } - - err = vertexAIOperationWaitTime( - config, res, project, "Deleting Dataset", userAgent, - d.Timeout(resource_vertex_ai_dataset_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_vertex_ai_dataset_log.Printf("[DEBUG] Finished deleting Dataset %q: %#v", d.Id(), res) - return nil -} - -func flattenVertexAIDatasetName(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetDisplayName(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetCreateTime(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetUpdateTime(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetLabels(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetEncryptionSpec(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - if v == nil { - return nil - } - original := v.(map[string]interface{}) - if len(original) == 0 { - return nil - } - transformed := make(map[string]interface{}) - transformed["kms_key_name"] = - flattenVertexAIDatasetEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) - return []interface{}{transformed} -} - -func flattenVertexAIDatasetEncryptionSpecKmsKeyName(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVertexAIDatasetMetadataSchemaUri(v interface{}, d *resource_vertex_ai_dataset_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandVertexAIDatasetDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIDatasetLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandVertexAIDatasetEncryptionSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil, nil - } - raw := l[0] - original := raw.(map[string]interface{}) - transformed := make(map[string]interface{}) - - transformedKmsKeyName, err := expandVertexAIDatasetEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) - if err != nil { - return nil, err - } else if val := resource_vertex_ai_dataset_reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { - transformed["kmsKeyName"] = transformedKmsKeyName - } - - return transformed, nil -} - -func expandVertexAIDatasetEncryptionSpecKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVertexAIDatasetMetadataSchemaUri(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceVPCAccessConnector() *resource_vpc_access_connector_schema.Resource { - return &resource_vpc_access_connector_schema.Resource{ - Create: resourceVPCAccessConnectorCreate, - Read: resourceVPCAccessConnectorRead, - Delete: resourceVPCAccessConnectorDelete, - - Importer: &resource_vpc_access_connector_schema.ResourceImporter{ - State: resourceVPCAccessConnectorImport, - }, - - Timeouts: &resource_vpc_access_connector_schema.ResourceTimeout{ - Create: resource_vpc_access_connector_schema.DefaultTimeout(6 * resource_vpc_access_connector_time.Minute), - Delete: resource_vpc_access_connector_schema.DefaultTimeout(10 * resource_vpc_access_connector_time.Minute), - }, - - Schema: map[string]*resource_vpc_access_connector_schema.Schema{ - "name": { - Type: resource_vpc_access_connector_schema.TypeString, - Required: true, - ForceNew: true, - Description: `The name of the resource (Max 25 characters).`, - }, - "ip_cidr_range": { - Type: resource_vpc_access_connector_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The range of internal addresses that follows RFC 4632 notation. Example: '10.132.0.0/28'.`, - RequiredWith: []string{"network"}, - }, - "max_throughput": { - Type: resource_vpc_access_connector_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_vpc_access_connector_validation.IntBetween(200, 1000), - Description: `Maximum throughput of the connector in Mbps, must be greater than 'min_throughput'. Default is 300.`, - Default: 300, - }, - "min_throughput": { - Type: resource_vpc_access_connector_schema.TypeInt, - Optional: true, - ForceNew: true, - ValidateFunc: resource_vpc_access_connector_validation.IntBetween(200, 1000), - Description: `Minimum throughput of the connector in Mbps. Default and min is 200.`, - Default: 200, - }, - "network": { - Type: resource_vpc_access_connector_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Name of the VPC network. Required if 'ip_cidr_range' is set.`, - ExactlyOneOf: []string{"network"}, - }, - "region": { - Type: resource_vpc_access_connector_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Region where the VPC Access connector resides. If it is not provided, the provider region is used.`, - }, - "self_link": { - Type: resource_vpc_access_connector_schema.TypeString, - Computed: true, - Description: `The fully qualified name of this VPC connector`, - }, - "state": { - Type: resource_vpc_access_connector_schema.TypeString, - Computed: true, - Description: `State of the VPC access connector.`, - }, - "project": { - Type: resource_vpc_access_connector_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceVPCAccessConnectorCreate(d *resource_vpc_access_connector_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandVPCAccessConnectorName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_vpc_access_connector_reflect.ValueOf(nameProp)) && (ok || !resource_vpc_access_connector_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - networkProp, err := expandVPCAccessConnectorNetwork(d.Get("network"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("network"); !isEmptyValue(resource_vpc_access_connector_reflect.ValueOf(networkProp)) && (ok || !resource_vpc_access_connector_reflect.DeepEqual(v, networkProp)) { - obj["network"] = networkProp - } - ipCidrRangeProp, err := expandVPCAccessConnectorIpCidrRange(d.Get("ip_cidr_range"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("ip_cidr_range"); !isEmptyValue(resource_vpc_access_connector_reflect.ValueOf(ipCidrRangeProp)) && (ok || !resource_vpc_access_connector_reflect.DeepEqual(v, ipCidrRangeProp)) { - obj["ipCidrRange"] = ipCidrRangeProp - } - minThroughputProp, err := expandVPCAccessConnectorMinThroughput(d.Get("min_throughput"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("min_throughput"); !isEmptyValue(resource_vpc_access_connector_reflect.ValueOf(minThroughputProp)) && (ok || !resource_vpc_access_connector_reflect.DeepEqual(v, minThroughputProp)) { - obj["minThroughput"] = minThroughputProp - } - maxThroughputProp, err := expandVPCAccessConnectorMaxThroughput(d.Get("max_throughput"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("max_throughput"); !isEmptyValue(resource_vpc_access_connector_reflect.ValueOf(maxThroughputProp)) && (ok || !resource_vpc_access_connector_reflect.DeepEqual(v, maxThroughputProp)) { - obj["maxThroughput"] = maxThroughputProp - } - - obj, err = resourceVPCAccessConnectorEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors?connectorId={{name}}") - if err != nil { - return err - } - - resource_vpc_access_connector_log.Printf("[DEBUG] Creating new Connector: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error fetching project for Connector: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_vpc_access_connector_schema.TimeoutCreate)) - if err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error creating Connector: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = vpcAccessOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Connector", userAgent, - d.Timeout(resource_vpc_access_connector_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_vpc_access_connector_fmt.Errorf("Error waiting to create Connector: %s", err) - } - - opRes, err = resourceVPCAccessConnectorDecoder(d, meta, opRes) - if err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error decoding response from operation: %s", err) - } - if opRes == nil { - return resource_vpc_access_connector_fmt.Errorf("Error decoding response from operation, could not find object") - } - - if err := d.Set("name", flattenVPCAccessConnectorName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_vpc_access_connector_time.Sleep(5 * resource_vpc_access_connector_time.Second) - - resource_vpc_access_connector_log.Printf("[DEBUG] Finished creating Connector %q: %#v", d.Id(), res) - - return resourceVPCAccessConnectorRead(d, meta) -} - -func resourceVPCAccessConnectorRead(d *resource_vpc_access_connector_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error fetching project for Connector: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_vpc_access_connector_fmt.Sprintf("VPCAccessConnector %q", d.Id())) - } - - res, err = resourceVPCAccessConnectorDecoder(d, meta, res) - if err != nil { - return err - } - - if res == nil { - - resource_vpc_access_connector_log.Printf("[DEBUG] Removing VPCAccessConnector because it no longer exists.") - d.SetId("") - return nil - } - - if err := d.Set("project", project); err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error reading Connector: %s", err) - } - - if err := d.Set("name", flattenVPCAccessConnectorName(res["name"], d, config)); err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("network", flattenVPCAccessConnectorNetwork(res["network"], d, config)); err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("ip_cidr_range", flattenVPCAccessConnectorIpCidrRange(res["ipCidrRange"], d, config)); err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("state", flattenVPCAccessConnectorState(res["state"], d, config)); err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("min_throughput", flattenVPCAccessConnectorMinThroughput(res["minThroughput"], d, config)); err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error reading Connector: %s", err) - } - if err := d.Set("max_throughput", flattenVPCAccessConnectorMaxThroughput(res["maxThroughput"], d, config)); err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error reading Connector: %s", err) - } - - return nil -} - -func resourceVPCAccessConnectorDelete(d *resource_vpc_access_connector_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_vpc_access_connector_fmt.Errorf("Error fetching project for Connector: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{VPCAccessBasePath}}projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_vpc_access_connector_log.Printf("[DEBUG] Deleting Connector %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_vpc_access_connector_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Connector") - } - - err = vpcAccessOperationWaitTime( - config, res, project, "Deleting Connector", userAgent, - d.Timeout(resource_vpc_access_connector_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_vpc_access_connector_log.Printf("[DEBUG] Finished deleting Connector %q: %#v", d.Id(), res) - return nil -} - -func resourceVPCAccessConnectorImport(d *resource_vpc_access_connector_schema.ResourceData, meta interface{}) ([]*resource_vpc_access_connector_schema.ResourceData, error) { - config := meta.(*Config) - if err := parseImportId([]string{ - "projects/(?P[^/]+)/locations/(?P[^/]+)/connectors/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)/(?P[^/]+)", - "(?P[^/]+)", - }, d, config); err != nil { - return nil, err - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/connectors/{{name}}") - if err != nil { - return nil, resource_vpc_access_connector_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - return []*resource_vpc_access_connector_schema.ResourceData{d}, nil -} - -func flattenVPCAccessConnectorName(v interface{}, d *resource_vpc_access_connector_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenVPCAccessConnectorNetwork(v interface{}, d *resource_vpc_access_connector_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVPCAccessConnectorIpCidrRange(v interface{}, d *resource_vpc_access_connector_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVPCAccessConnectorState(v interface{}, d *resource_vpc_access_connector_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenVPCAccessConnectorMinThroughput(v interface{}, d *resource_vpc_access_connector_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_vpc_access_connector_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func flattenVPCAccessConnectorMaxThroughput(v interface{}, d *resource_vpc_access_connector_schema.ResourceData, config *Config) interface{} { - - if strVal, ok := v.(string); ok { - if intVal, err := resource_vpc_access_connector_strconv.ParseInt(strVal, 10, 64); err == nil { - return intVal - } - } - - if floatVal, ok := v.(float64); ok { - intVal := int(floatVal) - return intVal - } - - return v -} - -func expandVPCAccessConnectorName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorIpCidrRange(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorMinThroughput(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandVPCAccessConnectorMaxThroughput(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceVPCAccessConnectorEncoder(d *resource_vpc_access_connector_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - delete(obj, "name") - return obj, nil -} - -func resourceVPCAccessConnectorDecoder(d *resource_vpc_access_connector_schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - - res["name"] = d.Get("name").(string) - return res, nil -} - -func resourceWorkflowsWorkflow() *resource_workflows_workflow_schema.Resource { - return &resource_workflows_workflow_schema.Resource{ - Create: resourceWorkflowsWorkflowCreate, - Read: resourceWorkflowsWorkflowRead, - Update: resourceWorkflowsWorkflowUpdate, - Delete: resourceWorkflowsWorkflowDelete, - - Timeouts: &resource_workflows_workflow_schema.ResourceTimeout{ - Create: resource_workflows_workflow_schema.DefaultTimeout(6 * resource_workflows_workflow_time.Minute), - Update: resource_workflows_workflow_schema.DefaultTimeout(6 * resource_workflows_workflow_time.Minute), - Delete: resource_workflows_workflow_schema.DefaultTimeout(6 * resource_workflows_workflow_time.Minute), - }, - - SchemaVersion: 1, - StateUpgraders: []resource_workflows_workflow_schema.StateUpgrader{ - { - Type: resourceWorkflowsWorkflowResourceV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceWorkflowsWorkflowUpgradeV0, - Version: 0, - }, - }, - - Schema: map[string]*resource_workflows_workflow_schema.Schema{ - "description": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Optional: true, - Description: `Description of the workflow provided by the user. Must be at most 1000 unicode characters long.`, - }, - "labels": { - Type: resource_workflows_workflow_schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Workflow.`, - Elem: &resource_workflows_workflow_schema.Schema{Type: resource_workflows_workflow_schema.TypeString}, - }, - "name": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Name of the Workflow.`, - }, - "region": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the workflow.`, - }, - "service_account": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Optional: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the service account associated with the latest workflow version. This service -account represents the identity of the workflow and determines what permissions the workflow has. - -Format: projects/{project}/serviceAccounts/{account}.`, - }, - "source_contents": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - Description: `Workflow code to be executed. The size limit is 32KB.`, - }, - "create_time": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "revision_id": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `The revision of the workflow. A new one is generated if the service account or source contents is changed.`, - }, - "state": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `State of the workflow deployment.`, - }, - "update_time": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "name_prefix": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - }, - "project": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - UseJSONNumber: true, - } -} - -func resourceWorkflowsWorkflowCreate(d *resource_workflows_workflow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - obj := make(map[string]interface{}) - nameProp, err := expandWorkflowsWorkflowName(d.Get("name"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("name"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(nameProp)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, nameProp)) { - obj["name"] = nameProp - } - descriptionProp, err := expandWorkflowsWorkflowDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(descriptionProp)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandWorkflowsWorkflowLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(labelsProp)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - serviceAccountProp, err := expandWorkflowsWorkflowServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(serviceAccountProp)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - sourceContentsProp, err := expandWorkflowsWorkflowSourceContents(d.Get("source_contents"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_contents"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(sourceContentsProp)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, sourceContentsProp)) { - obj["sourceContents"] = sourceContentsProp - } - - obj, err = resourceWorkflowsWorkflowEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows?workflowId={{name}}") - if err != nil { - return err - } - - resource_workflows_workflow_log.Printf("[DEBUG] Creating new Workflow: %#v", obj) - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(resource_workflows_workflow_schema.TimeoutCreate)) - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error creating Workflow: %s", err) - } - - id, err := replaceVars(d, config, "projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - var opRes map[string]interface{} - err = workflowsOperationWaitTimeWithResponse( - config, res, &opRes, project, "Creating Workflow", userAgent, - d.Timeout(resource_workflows_workflow_schema.TimeoutCreate)) - if err != nil { - - d.SetId("") - return resource_workflows_workflow_fmt.Errorf("Error waiting to create Workflow: %s", err) - } - - if err := d.Set("name", flattenWorkflowsWorkflowName(opRes["name"], d, config)); err != nil { - return err - } - - id, err = replaceVars(d, config, "projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error constructing id: %s", err) - } - d.SetId(id) - - resource_workflows_workflow_log.Printf("[DEBUG] Finished creating Workflow %q: %#v", d.Id(), res) - - return resourceWorkflowsWorkflowRead(d, meta) -} - -func resourceWorkflowsWorkflowRead(d *resource_workflows_workflow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) - if err != nil { - return handleNotFoundError(err, d, resource_workflows_workflow_fmt.Sprintf("WorkflowsWorkflow %q", d.Id())) - } - - if err := d.Set("project", project); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - - if err := d.Set("name", flattenWorkflowsWorkflowName(res["name"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("description", flattenWorkflowsWorkflowDescription(res["description"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("create_time", flattenWorkflowsWorkflowCreateTime(res["createTime"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("update_time", flattenWorkflowsWorkflowUpdateTime(res["updateTime"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("state", flattenWorkflowsWorkflowState(res["state"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("labels", flattenWorkflowsWorkflowLabels(res["labels"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("service_account", flattenWorkflowsWorkflowServiceAccount(res["serviceAccount"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("source_contents", flattenWorkflowsWorkflowSourceContents(res["sourceContents"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - if err := d.Set("revision_id", flattenWorkflowsWorkflowRevisionId(res["revisionId"], d, config)); err != nil { - return resource_workflows_workflow_fmt.Errorf("Error reading Workflow: %s", err) - } - - return nil -} - -func resourceWorkflowsWorkflowUpdate(d *resource_workflows_workflow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - obj := make(map[string]interface{}) - descriptionProp, err := expandWorkflowsWorkflowDescription(d.Get("description"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("description"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(v)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, descriptionProp)) { - obj["description"] = descriptionProp - } - labelsProp, err := expandWorkflowsWorkflowLabels(d.Get("labels"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(v)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, labelsProp)) { - obj["labels"] = labelsProp - } - serviceAccountProp, err := expandWorkflowsWorkflowServiceAccount(d.Get("service_account"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("service_account"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(v)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, serviceAccountProp)) { - obj["serviceAccount"] = serviceAccountProp - } - sourceContentsProp, err := expandWorkflowsWorkflowSourceContents(d.Get("source_contents"), d, config) - if err != nil { - return err - } else if v, ok := d.GetOkExists("source_contents"); !isEmptyValue(resource_workflows_workflow_reflect.ValueOf(v)) && (ok || !resource_workflows_workflow_reflect.DeepEqual(v, sourceContentsProp)) { - obj["sourceContents"] = sourceContentsProp - } - - obj, err = resourceWorkflowsWorkflowEncoder(d, meta, obj) - if err != nil { - return err - } - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return err - } - - resource_workflows_workflow_log.Printf("[DEBUG] Updating Workflow %q: %#v", d.Id(), obj) - updateMask := []string{} - - if d.HasChange("description") { - updateMask = append(updateMask, "description") - } - - if d.HasChange("labels") { - updateMask = append(updateMask, "labels") - } - - if d.HasChange("service_account") { - updateMask = append(updateMask, "serviceAccount") - } - - if d.HasChange("source_contents") { - updateMask = append(updateMask, "sourceContents") - } - - url, err = addQueryParams(url, map[string]string{"updateMask": resource_workflows_workflow_strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(resource_workflows_workflow_schema.TimeoutUpdate)) - - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error updating Workflow %q: %s", d.Id(), err) - } else { - resource_workflows_workflow_log.Printf("[DEBUG] Finished updating Workflow %q: %#v", d.Id(), res) - } - - err = workflowsOperationWaitTime( - config, res, project, "Updating Workflow", userAgent, - d.Timeout(resource_workflows_workflow_schema.TimeoutUpdate)) - - if err != nil { - return err - } - - return resourceWorkflowsWorkflowRead(d, meta) -} - -func resourceWorkflowsWorkflowDelete(d *resource_workflows_workflow_schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := "" - - project, err := getProject(d, config) - if err != nil { - return resource_workflows_workflow_fmt.Errorf("Error fetching project for Workflow: %s", err) - } - billingProject = project - - url, err := replaceVars(d, config, "{{WorkflowsBasePath}}projects/{{project}}/locations/{{region}}/workflows/{{name}}") - if err != nil { - return err - } - - var obj map[string]interface{} - resource_workflows_workflow_log.Printf("[DEBUG] Deleting Workflow %q", d.Id()) - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(resource_workflows_workflow_schema.TimeoutDelete)) - if err != nil { - return handleNotFoundError(err, d, "Workflow") - } - - err = workflowsOperationWaitTime( - config, res, project, "Deleting Workflow", userAgent, - d.Timeout(resource_workflows_workflow_schema.TimeoutDelete)) - - if err != nil { - return err - } - - resource_workflows_workflow_log.Printf("[DEBUG] Finished deleting Workflow %q: %#v", d.Id(), res) - return nil -} - -func flattenWorkflowsWorkflowName(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - if v == nil { - return v - } - return NameFromSelfLinkStateFunc(v) -} - -func flattenWorkflowsWorkflowDescription(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowCreateTime(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowUpdateTime(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowState(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowLabels(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowServiceAccount(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowSourceContents(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func flattenWorkflowsWorkflowRevisionId(v interface{}, d *resource_workflows_workflow_schema.ResourceData, config *Config) interface{} { - return v -} - -func expandWorkflowsWorkflowName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandWorkflowsWorkflowDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandWorkflowsWorkflowLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { - if v == nil { - return map[string]string{}, nil - } - m := make(map[string]string) - for k, val := range v.(map[string]interface{}) { - m[k] = val.(string) - } - return m, nil -} - -func expandWorkflowsWorkflowServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func expandWorkflowsWorkflowSourceContents(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { - return v, nil -} - -func resourceWorkflowsWorkflowEncoder(d *resource_workflows_workflow_schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - var ResName string - if v, ok := d.GetOk("name"); ok { - ResName = v.(string) - } else if v, ok := d.GetOk("name_prefix"); ok { - ResName = resource_workflows_workflow_resource.PrefixedUniqueId(v.(string)) - } else { - ResName = resource_workflows_workflow_resource.UniqueId() - } - - if err := d.Set("name", ResName); err != nil { - return nil, resource_workflows_workflow_fmt.Errorf("Error setting name: %s", err) - } - - return obj, nil -} - -func resourceWorkflowsWorkflowResourceV0() *resource_workflows_workflow_schema.Resource { - return &resource_workflows_workflow_schema.Resource{ - Schema: map[string]*resource_workflows_workflow_schema.Schema{ - "description": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Optional: true, - Description: `Description of the workflow provided by the user. Must be at most 1000 unicode characters long.`, - }, - "labels": { - Type: resource_workflows_workflow_schema.TypeMap, - Optional: true, - Description: `A set of key/value label pairs to assign to this Workflow.`, - Elem: &resource_workflows_workflow_schema.Schema{Type: resource_workflows_workflow_schema.TypeString}, - }, - "name": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - Description: `Name of the Workflow.`, - }, - "region": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `The region of the workflow.`, - }, - "service_account": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Optional: true, - ForceNew: true, - DiffSuppressFunc: compareSelfLinkOrResourceName, - Description: `Name of the service account associated with the latest workflow version. This service -account represents the identity of the workflow and determines what permissions the workflow has. - -Format: projects/{project}/serviceAccounts/{account}.`, - }, - "source_contents": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - ForceNew: true, - Description: `Workflow code to be executed. The size limit is 32KB.`, - }, - "create_time": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was created in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "revision_id": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `The revision of the workflow. A new one is generated if the service account or source contents is changed.`, - }, - "state": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `State of the workflow deployment.`, - }, - "update_time": { - Type: resource_workflows_workflow_schema.TypeString, - Computed: true, - Description: `The timestamp of when the workflow was last updated in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits.`, - }, - "name_prefix": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ConflictsWith: []string{"name"}, - }, - "project": { - Type: resource_workflows_workflow_schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - }, - } -} - -func resourceWorkflowsWorkflowUpgradeV0(_ resource_workflows_workflow_context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { - resource_workflows_workflow_log.Printf("[DEBUG] Attributes before migration: %#v", rawState) - - rawState["name"] = GetResourceNameFromSelfLink(rawState["name"].(string)) - - resource_workflows_workflow_log.Printf("[DEBUG] Attributes after migration: %#v", rawState) - return rawState, nil -} - -const defaultRetryTransportTimeoutSec = 90 - -func NewTransportWithDefaultRetries(t retry_transport_http.RoundTripper) *retryTransport { - return &retryTransport{ - retryPredicates: defaultErrorRetryPredicates, - internal: t, - } -} - -func ClientWithAdditionalRetries(baseClient *retry_transport_http.Client, predicates ...RetryErrorPredicateFunc) *retry_transport_http.Client { - copied := *baseClient - baseRetryTransport := NewTransportWithDefaultRetries(baseClient.Transport) - copied.Transport = baseRetryTransport.WithAddedPredicates(predicates...) - return &copied -} - -func (t *retryTransport) WithAddedPredicates(predicates ...RetryErrorPredicateFunc) *retryTransport { - copyT := *t - copyT.retryPredicates = append(t.retryPredicates, predicates...) - return ©T -} - -type retryTransport struct { - retryPredicates []RetryErrorPredicateFunc - internal retry_transport_http.RoundTripper -} - -func (t *retryTransport) RoundTrip(req *retry_transport_http.Request) (resp *retry_transport_http.Response, respErr error) { - - ctx := req.Context() - var ccancel retry_transport_context.CancelFunc - if _, ok := ctx.Deadline(); !ok { - ctx, ccancel = retry_transport_context.WithTimeout(ctx, defaultRetryTransportTimeoutSec*retry_transport_time.Second) - defer func() { - if ctx.Err() == nil { - - ccancel() - } - }() - } - - attempts := 0 - backoff := retry_transport_time.Millisecond * 500 - nextBackoff := retry_transport_time.Millisecond * 500 - - if _, err := retry_transport_httputil.DumpRequestOut(req, true); err != nil { - retry_transport_log.Printf("[WARN] Retry Transport: Consuming original request body failed: %v", err) - } - - retry_transport_log.Printf("[DEBUG] Retry Transport: starting RoundTrip retry loop") -Retry: - for { - - newRequest, copyErr := copyHttpRequest(req) - if copyErr != nil { - retry_transport_log.Printf("[WARN] Retry Transport: Unable to copy request body: %v.", copyErr) - retry_transport_log.Printf("[WARN] Retry Transport: Running request as non-retryable") - resp, respErr = t.internal.RoundTrip(req) - break Retry - } - - retry_transport_log.Printf("[DEBUG] Retry Transport: request attempt %d", attempts) - - resp, respErr = t.internal.RoundTrip(newRequest) - attempts++ - - retryErr := t.checkForRetryableError(resp, respErr) - if retryErr == nil { - retry_transport_log.Printf("[DEBUG] Retry Transport: Stopping retries, last request was successful") - break Retry - } - if !retryErr.Retryable { - retry_transport_log.Printf("[DEBUG] Retry Transport: Stopping retries, last request failed with non-retryable error: %s", retryErr.Err) - break Retry - } - - retry_transport_log.Printf("[DEBUG] Retry Transport: Waiting %s before trying request again", backoff) - select { - case <-ctx.Done(): - retry_transport_log.Printf("[DEBUG] Retry Transport: Stopping retries, context done: %v", ctx.Err()) - break Retry - case <-retry_transport_time.After(backoff): - retry_transport_log.Printf("[DEBUG] Retry Transport: Finished waiting %s before next retry", backoff) - - lastBackoff := backoff - backoff = backoff + nextBackoff - nextBackoff = lastBackoff - continue - } - } - retry_transport_log.Printf("[DEBUG] Retry Transport: Returning after %d attempts", attempts) - return resp, respErr -} - -func copyHttpRequest(req *retry_transport_http.Request) (*retry_transport_http.Request, error) { - newRequest := *req - if req.Body == nil || req.Body == retry_transport_http.NoBody { - return &newRequest, nil - } - - if req.GetBody == nil { - return nil, retry_transport_errors.New("request.GetBody is not defined for non-empty Body") - } - - bd, err := req.GetBody() - if err != nil { - return nil, err - } - - newRequest.Body = bd - return &newRequest, nil -} - -func (t *retryTransport) checkForRetryableError(resp *retry_transport_http.Response, respErr error) *retry_transport_resource.RetryError { - var errToCheck error - - if respErr != nil { - errToCheck = respErr - } else { - respToCheck := *resp - - if resp.Body != nil && resp.Body != retry_transport_http.NoBody { - - dumpBytes, err := retry_transport_httputil.DumpResponse(resp, true) - if err != nil { - return retry_transport_resource.NonRetryableError(retry_transport_fmt.Errorf("unable to check response for error: %v", err)) - } - respToCheck.Body = retry_transport_ioutil.NopCloser(retry_transport_bytes.NewReader(dumpBytes)) - } - errToCheck = retry_transport_googleapi.CheckResponse(&respToCheck) - } - - if errToCheck == nil { - return nil - } - if isRetryableError(errToCheck, t.retryPredicates...) { - return retry_transport_resource.RetryableError(errToCheck) - } - return retry_transport_resource.NonRetryableError(errToCheck) -} - -func retry(retryFunc func() error) error { - return retryTime(retryFunc, 1) -} - -func retryTime(retryFunc func() error, minutes int) error { - return retryTimeDuration(retryFunc, retry_utils_time.Duration(minutes)*retry_utils_time.Minute) -} - -func retryTimeDuration(retryFunc func() error, duration retry_utils_time.Duration, errorRetryPredicates ...RetryErrorPredicateFunc) error { - return retry_utils_resource.Retry(duration, func() *retry_utils_resource.RetryError { - err := retryFunc() - if err == nil { - return nil - } - if isRetryableError(err, errorRetryPredicates...) { - return retry_utils_resource.RetryableError(err) - } - return retry_utils_resource.NonRetryableError(err) - }) -} - -func isRetryableError(topErr error, customPredicates ...RetryErrorPredicateFunc) bool { - if topErr == nil { - return false - } - - retryPredicates := append( - - defaultErrorRetryPredicates, - customPredicates...) - - isRetryable := false - retry_utils_errwrap.Walk(topErr, func(werr error) { - for _, pred := range retryPredicates { - if predRetry, predReason := pred(werr); predRetry { - retry_utils_log.Printf("[DEBUG] Dismissed an error as retryable. %s - %s", predReason, werr) - isRetryable = true - return - } - } - }) - return isRetryable -} - -func compareResourceNames(_, old, new string, _ *self_link_helpers_schema.ResourceData) bool { - return GetResourceNameFromSelfLink(old) == GetResourceNameFromSelfLink(new) -} - -func compareSelfLinkRelativePaths(_, old, new string, _ *self_link_helpers_schema.ResourceData) bool { - oldStripped, err := getRelativePath(old) - if err != nil { - return false - } - - newStripped, err := getRelativePath(new) - if err != nil { - return false - } - - if oldStripped == newStripped { - return true - } - - return false -} - -func compareSelfLinkOrResourceName(_, old, new string, _ *self_link_helpers_schema.ResourceData) bool { - newParts := self_link_helpers_strings.Split(new, "/") - - if len(newParts) == 1 { - - if GetResourceNameFromSelfLink(old) == newParts[0] { - return true - } - } - - return compareSelfLinkRelativePaths("", old, new, nil) -} - -func selfLinkRelativePathHash(selfLink interface{}) int { - path, _ := getRelativePath(selfLink.(string)) - return hashcode(path) -} - -func getRelativePath(selfLink string) (string, error) { - stringParts := self_link_helpers_strings.SplitAfterN(selfLink, "projects/", 2) - if len(stringParts) != 2 { - return "", self_link_helpers_fmt.Errorf("String was not a self link: %s", selfLink) - } - - return "projects/" + stringParts[1], nil -} - -func selfLinkNameHash(selfLink interface{}) int { - name := GetResourceNameFromSelfLink(selfLink.(string)) - return hashcode(name) -} - -func ConvertSelfLinkToV1(link string) string { - reg := self_link_helpers_regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/") - return reg.ReplaceAllString(link, "/compute/v1/projects/") -} - -func GetResourceNameFromSelfLink(link string) string { - parts := self_link_helpers_strings.Split(link, "/") - return parts[len(parts)-1] -} - -func NameFromSelfLinkStateFunc(v interface{}) string { - return GetResourceNameFromSelfLink(v.(string)) -} - -func StoreResourceName(resourceLink interface{}) string { - return GetResourceNameFromSelfLink(resourceLink.(string)) -} - -type LocationType int - -const ( - Zonal LocationType = iota - Regional - Global -) - -func GetZonalResourcePropertiesFromSelfLinkOrSchema(d *self_link_helpers_schema.ResourceData, config *Config) (string, string, string, error) { - return getResourcePropertiesFromSelfLinkOrSchema(d, config, Zonal) -} - -func GetRegionalResourcePropertiesFromSelfLinkOrSchema(d *self_link_helpers_schema.ResourceData, config *Config) (string, string, string, error) { - return getResourcePropertiesFromSelfLinkOrSchema(d, config, Regional) -} - -func getResourcePropertiesFromSelfLinkOrSchema(d *self_link_helpers_schema.ResourceData, config *Config, locationType LocationType) (string, string, string, error) { - if selfLink, ok := d.GetOk("self_link"); ok { - return GetLocationalResourcePropertiesFromSelfLinkString(selfLink.(string)) - } else { - project, err := getProject(d, config) - if err != nil { - return "", "", "", err - } - - location := "" - if locationType == Regional { - location, err = getRegion(d, config) - if err != nil { - return "", "", "", err - } - } else if locationType == Zonal { - location, err = getZone(d, config) - if err != nil { - return "", "", "", err - } - } - - n, ok := d.GetOk("name") - name := n.(string) - if !ok { - return "", "", "", self_link_helpers_errors.New("must provide either `self_link` or `name`") - } - return project, location, name, nil - } -} - -func GetLocationalResourcePropertiesFromSelfLinkString(selfLink string) (string, string, string, error) { - parsed, err := self_link_helpers_url.Parse(selfLink) - if err != nil { - return "", "", "", err - } - - s := self_link_helpers_strings.Split(parsed.Path, "/") - - if len(s) < 9 { - return "", "", "", self_link_helpers_fmt.Errorf("value %s was not a self link", selfLink) - } - - return s[4], s[6], s[8], nil -} - -func GetRegionFromRegionSelfLink(selfLink string) string { - re := self_link_helpers_regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/[a-zA-Z0-9-]*/regions/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(selfLink): - if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { - return res[1] - } - } - return selfLink -} - -func GetRegionFromRegionalSelfLink(selfLink string) string { - re := self_link_helpers_regexp.MustCompile("projects/[a-zA-Z0-9-]*/(?:locations|regions)/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(selfLink): - if res := re.FindStringSubmatch(selfLink); len(res) == 2 && res[1] != "" { - return res[1] - } - } - return selfLink -} - -type ServiceAccountKeyWaiter struct { - Service *service_account_waiter_iam.ProjectsServiceAccountsKeysService - PublicKeyType string - KeyName string -} - -func (w *ServiceAccountKeyWaiter) RefreshFunc() service_account_waiter_resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var err error - var sak *service_account_waiter_iam.ServiceAccountKey - sak, err = w.Service.Get(w.KeyName).PublicKeyType(w.PublicKeyType).Do() - - if err != nil { - if err.(*service_account_waiter_googleapi.Error).Code == 404 { - return nil, "PENDING", nil - } else { - return nil, "", err - } - } else { - return sak, "DONE", nil - } - } -} - -func serviceAccountKeyWaitTime(client *service_account_waiter_iam.ProjectsServiceAccountsKeysService, keyName, publicKeyType, activity string, timeout service_account_waiter_time.Duration) error { - w := &ServiceAccountKeyWaiter{ - Service: client, - PublicKeyType: publicKeyType, - KeyName: keyName, - } - - c := &service_account_waiter_resource.StateChangeConf{ - Pending: []string{"PENDING"}, - Target: []string{"DONE"}, - Refresh: w.RefreshFunc(), - Timeout: timeout, - MinTimeout: 2 * service_account_waiter_time.Second, - } - _, err := c.WaitForState() - if err != nil { - return service_account_waiter_fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - return nil -} - -type ServiceNetworkingOperationWaiter struct { - Service *service_networking_operation_servicenetworking.APIService - Project string - UserProjectOverride bool - CommonOperationWaiter -} - -func (w *ServiceNetworkingOperationWaiter) QueryOp() (interface{}, error) { - opGetCall := w.Service.Operations.Get(w.Op.Name) - if w.UserProjectOverride { - opGetCall.Header().Add("X-Goog-User-Project", w.Project) - } - return opGetCall.Do() -} - -func serviceNetworkingOperationWaitTime(config *Config, op *service_networking_operation_servicenetworking.Operation, activity, userAgent, project string, timeout service_networking_operation_time.Duration) error { - w := &ServiceNetworkingOperationWaiter{ - Service: config.NewServiceNetworkingClient(userAgent), - Project: project, - UserProjectOverride: config.UserProjectOverride, - } - - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func canonicalizeServiceScope(scope string) string { - - scopeMap := map[string]string{ - "bigquery": "https://www.googleapis.com/auth/bigquery", - "cloud-platform": "https://www.googleapis.com/auth/cloud-platform", - "cloud-source-repos": "https://www.googleapis.com/auth/source.full_control", - "cloud-source-repos-ro": "https://www.googleapis.com/auth/source.read_only", - "compute-ro": "https://www.googleapis.com/auth/compute.readonly", - "compute-rw": "https://www.googleapis.com/auth/compute", - "datastore": "https://www.googleapis.com/auth/datastore", - "logging-write": "https://www.googleapis.com/auth/logging.write", - "monitoring": "https://www.googleapis.com/auth/monitoring", - "monitoring-read": "https://www.googleapis.com/auth/monitoring.read", - "monitoring-write": "https://www.googleapis.com/auth/monitoring.write", - "pubsub": "https://www.googleapis.com/auth/pubsub", - "service-control": "https://www.googleapis.com/auth/servicecontrol", - "service-management": "https://www.googleapis.com/auth/service.management.readonly", - "sql": "https://www.googleapis.com/auth/sqlservice", - "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", - "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", - "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", - "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", - "taskqueue": "https://www.googleapis.com/auth/taskqueue", - "trace": "https://www.googleapis.com/auth/trace.append", - "useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", - "useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts", - "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", - } - - if matchedURL, ok := scopeMap[scope]; ok { - return matchedURL - } - - return scope -} - -func canonicalizeServiceScopes(scopes []string) []string { - cs := make([]string, len(scopes)) - for i, scope := range scopes { - cs[i] = canonicalizeServiceScope(scope) - } - return cs -} - -func stringScopeHashcode(v interface{}) int { - v = canonicalizeServiceScope(v.(string)) - return service_scope_schema.HashString(v) -} - -type ServiceUsageOperationWaiter struct { - Config *Config - UserAgent string - Project string - retryCount int - CommonOperationWaiter -} - -func (w *ServiceUsageOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, service_usage_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := service_usage_operation_fmt.Sprintf("https://serviceusage.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func (w *ServiceUsageOperationWaiter) IsRetryable(err error) bool { - - maxRetries := 3 - if gerr, ok := err.(*service_usage_operation_googleapi.Error); ok && gerr.Code == 403 { - if w.retryCount < maxRetries && service_usage_operation_strings.Contains(gerr.Body, "has not been used in project") { - w.retryCount += 1 - service_usage_operation_log.Printf("[DEBUG] retrying on 403 %v more times", w.retryCount-maxRetries-1) - return true - } - } - return false -} - -func createServiceUsageWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*ServiceUsageOperationWaiter, error) { - w := &ServiceUsageOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func serviceUsageOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout service_usage_operation_time.Duration) error { - w, err := createServiceUsageWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return service_usage_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func serviceUsageOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout service_usage_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createServiceUsageWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type ServiceManagementOperationWaiter struct { - Service *serviceman_operation_servicemanagement.APIService - CommonOperationWaiter -} - -func (w *ServiceManagementOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, serviceman_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - return w.Service.Operations.Get(w.Op.Name).Do() -} - -func serviceManagementOperationWaitTime(config *Config, op *serviceman_operation_servicemanagement.Operation, activity, userAgent string, timeout serviceman_operation_time.Duration) (serviceman_operation_googleapi.RawMessage, error) { - w := &ServiceManagementOperationWaiter{ - Service: config.NewServiceManClient(userAgent), - } - - if err := w.SetOp(op); err != nil { - return nil, err - } - - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return nil, err - } - return w.Op.Response, nil -} - -const ( - batchKeyTmplServiceUsageEnableServices = "project/%s/services:batchEnable" - batchKeyTmplServiceUsageListServices = "project/%s/services" -) - -func BatchRequestEnableService(service string, project string, d *serviceusage_batching_schema.ResourceData, config *Config) error { - - if altName, ok := renamedServicesByOldAndNewServiceNames[service]; ok { - return tryEnableRenamedService(service, altName, project, d, config) - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - req := &BatchRequest{ - ResourceName: project, - Body: []string{service}, - CombineF: combineServiceUsageServicesBatches, - SendF: sendBatchFuncEnableServices(config, userAgent, billingProject, d.Timeout(serviceusage_batching_schema.TimeoutCreate)), - DebugId: serviceusage_batching_fmt.Sprintf("Enable Project Service %q for project %q", service, project), - } - - _, err = config.requestBatcherServiceUsage.SendRequestWithTimeout( - serviceusage_batching_fmt.Sprintf(batchKeyTmplServiceUsageEnableServices, project), - req, - d.Timeout(serviceusage_batching_schema.TimeoutCreate)) - return err -} - -func tryEnableRenamedService(service, altName string, project string, d *serviceusage_batching_schema.ResourceData, config *Config) error { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return err - } - - serviceusage_batching_log.Printf("[DEBUG] found renamed service %s (with alternate name %s)", service, altName) - - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - serviceusage_batching_log.Printf("[DEBUG] attempting enabling service with user-specified name %s", service) - err = enableServiceUsageProjectServices([]string{service}, project, billingProject, userAgent, config, 1*serviceusage_batching_time.Minute) - if err != nil { - serviceusage_batching_log.Printf("[DEBUG] saw error %s. attempting alternate name %v", err, altName) - err2 := enableServiceUsageProjectServices([]string{altName}, project, billingProject, userAgent, config, 1*serviceusage_batching_time.Minute) - if err2 != nil { - return serviceusage_batching_fmt.Errorf("Saw 2 subsequent errors attempting to enable a renamed service: %s / %s", err, err2) - } - } - return nil -} - -func BatchRequestReadServices(project string, d *serviceusage_batching_schema.ResourceData, config *Config) (interface{}, error) { - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - billingProject := project - - if bp, err := getBillingProject(d, config); err == nil { - billingProject = bp - } - - req := &BatchRequest{ - ResourceName: project, - Body: nil, - - CombineF: func(body interface{}, toAdd interface{}) (interface{}, error) { return nil, nil }, - SendF: sendListServices(config, billingProject, userAgent, d.Timeout(serviceusage_batching_schema.TimeoutRead)), - DebugId: serviceusage_batching_fmt.Sprintf("List Project Services %s", project), - } - - return config.requestBatcherServiceUsage.SendRequestWithTimeout( - serviceusage_batching_fmt.Sprintf(batchKeyTmplServiceUsageListServices, project), - req, - d.Timeout(serviceusage_batching_schema.TimeoutRead)) -} - -func combineServiceUsageServicesBatches(srvsRaw interface{}, toAddRaw interface{}) (interface{}, error) { - srvs, ok := srvsRaw.([]string) - if !ok { - return nil, serviceusage_batching_fmt.Errorf("Expected batch body type to be []string, got %v. This is a provider error.", srvsRaw) - } - toAdd, ok := toAddRaw.([]string) - if !ok { - return nil, serviceusage_batching_fmt.Errorf("Expected new request body type to be []string, got %v. This is a provider error.", toAdd) - } - - return append(srvs, toAdd...), nil -} - -func sendBatchFuncEnableServices(config *Config, userAgent, billingProject string, timeout serviceusage_batching_time.Duration) BatcherSendFunc { - return func(project string, toEnableRaw interface{}) (interface{}, error) { - toEnable, ok := toEnableRaw.([]string) - if !ok { - return nil, serviceusage_batching_fmt.Errorf("Expected batch body type to be []string, got %v. This is a provider error.", toEnableRaw) - } - return nil, enableServiceUsageProjectServices(toEnable, project, billingProject, userAgent, config, timeout) - } -} - -func sendListServices(config *Config, billingProject, userAgent string, timeout serviceusage_batching_time.Duration) BatcherSendFunc { - return func(project string, _ interface{}) (interface{}, error) { - return listCurrentlyEnabledServices(project, billingProject, userAgent, config, timeout) - } -} - -func serviceUsageOperationWait(config *Config, op *serviceusage_operation_serviceusage.Operation, project, activity, userAgent string, timeout serviceusage_operation_time.Duration) error { - - b, err := op.MarshalJSON() - if err != nil { - return err - } - var m map[string]interface{} - if err := serviceusage_operation_json.Unmarshal(b, &m); err != nil { - return err - } - return serviceUsageOperationWaitTime(config, m, project, activity, userAgent, timeout) -} - -func handleServiceUsageRetryableError(err error) error { - if err == nil { - return nil - } - if gerr, ok := err.(*serviceusage_operation_googleapi.Error); ok { - if (gerr.Code == 400 || gerr.Code == 412) && gerr.Message == "Precondition check failed." { - return &serviceusage_operation_googleapi.Error{ - Code: 503, - Message: "api returned \"precondition failed\" while enabling service", - } - } - } - return err -} - -func expandSourceRepoRepositoryPubsubConfigsTopic(v interface{}, d TerraformResourceData, config *Config) (string, error) { - - ok, err := source_repo_utils_regexp.MatchString(PubsubTopicRegex, v.(string)) - if err != nil { - return "", err - } - - if ok { - return v.(string), nil - } - - project, err := getProject(d, config) - if err != nil { - return "", err - } - - return getComputedTopicName(project, v.(string)), err -} - -type SpannerOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *SpannerOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, spanner_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := spanner_operation_fmt.Sprintf("https://spanner.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createSpannerWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*SpannerOperationWaiter, error) { - w := &SpannerOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func spannerOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout spanner_operation_time.Duration) error { - w, err := createSpannerWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return spanner_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func spannerOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout spanner_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createSpannerWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -func transformSQLDatabaseReadError(err error) error { - if gErr, ok := sql_utils_errwrap.GetType(err, &sql_utils_googleapi.Error{}).(*sql_utils_googleapi.Error); ok { - if gErr.Code == 400 && sql_utils_strings.Contains(gErr.Message, "Invalid request since instance is not running") { - - gErr.Code = 404 - } - - sql_utils_log.Printf("[DEBUG] Transformed SQLDatabase error") - return gErr - } - - return err -} - -type SqlAdminOperationWaiter struct { - Service *sqladmin_operation_sqladminsqladmin.Service - Op *sqladmin_operation_sqladminsqladmin.Operation - Project string -} - -func (w *SqlAdminOperationWaiter) State() string { - if w == nil { - return "Operation Waiter is nil!" - } - - if w.Op == nil { - return "Operation is nil!" - } - - return w.Op.Status -} - -func (w *SqlAdminOperationWaiter) Error() error { - if w != nil && w.Op != nil && w.Op.Error != nil { - return SqlAdminOperationError(*w.Op.Error) - } - return nil -} - -func (w *SqlAdminOperationWaiter) IsRetryable(error) bool { - return false -} - -func (w *SqlAdminOperationWaiter) SetOp(op interface{}) error { - if op == nil { - - sqladmin_operation_log.Printf("[DEBUG] attempted to set nil op") - } - - sqlOp, ok := op.(*sqladmin_operation_sqladminsqladmin.Operation) - w.Op = sqlOp - if !ok { - return sqladmin_operation_fmt.Errorf("Unable to set operation. Bad type!") - } - - return nil -} - -func (w *SqlAdminOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, sqladmin_operation_fmt.Errorf("Cannot query operation, waiter is unset or nil.") - } - - if w.Op == nil { - return nil, sqladmin_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - if w.Service == nil { - return nil, sqladmin_operation_fmt.Errorf("Cannot query operation, service is nil.") - } - - var op interface{} - var err error - err = retryTimeDuration( - func() error { - op, err = w.Service.Operations.Get(w.Project, w.Op.Name).Do() - return err - }, - - DefaultRequestTimeout, - ) - - return op, err -} - -func (w *SqlAdminOperationWaiter) OpName() string { - if w == nil { - return "" - } - - if w.Op == nil { - return "" - } - - return w.Op.Name -} - -func (w *SqlAdminOperationWaiter) PendingStates() []string { - return []string{"PENDING", "RUNNING"} -} - -func (w *SqlAdminOperationWaiter) TargetStates() []string { - return []string{"DONE"} -} - -func sqlAdminOperationWaitTime(config *Config, res interface{}, project, activity, userAgent string, timeout sqladmin_operation_time.Duration) error { - op := &sqladmin_operation_sqladminsqladmin.Operation{} - err := Convert(res, op) - if err != nil { - return err - } - - w := &SqlAdminOperationWaiter{ - Service: config.NewSqlAdminClient(userAgent), - Op: op, - Project: project, - } - if err := w.SetOp(op); err != nil { - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type SqlAdminOperationError sqladmin_operation_sqladminsqladmin.OperationErrors - -func (e SqlAdminOperationError) Error() string { - var buf sqladmin_operation_bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} - -type StateType int - -const ( - UndefinedState StateType = iota - - RestingState - - ErrorState - - ReadyState -) - -type RestingStates map[string]StateType - -func resourceComputePerInstanceConfigPollRead(d *stateful_mig_polling_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return nil, err - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - res, err := sendRequest(config, "POST", project, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = flattenNestedComputePerInstanceConfig(d, meta, res) - if err != nil { - return nil, err - } - - return res, nil - } -} - -func resourceComputeRegionPerInstanceConfigPollRead(d *stateful_mig_polling_schema.ResourceData, meta interface{}) PollReadFunc { - return func() (map[string]interface{}, error) { - config := meta.(*Config) - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return nil, err - } - - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listPerInstanceConfigs") - if err != nil { - return nil, err - } - - project, err := getProject(d, config) - if err != nil { - return nil, err - } - res, err := sendRequest(config, "POST", project, url, userAgent, nil) - if err != nil { - return res, err - } - res, err = flattenNestedComputeRegionPerInstanceConfig(d, meta, res) - if err != nil { - return nil, err - } - - return res, nil - } -} - -func findInstanceName(d *stateful_mig_polling_schema.ResourceData, config *Config) (string, error) { - url, err := replaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listManagedInstances") - if err != nil { - return "", err - } - - userAgent, err := generateUserAgentString(d, config.userAgent) - if err != nil { - return "", err - } - - project, err := getProject(d, config) - if err != nil { - return "", err - } - instanceNameToFind := stateful_mig_polling_fmt.Sprintf("/%s", d.Get("name").(string)) - - token := "" - for paginate := true; paginate; { - urlWithToken := "" - if token != "" { - urlWithToken = stateful_mig_polling_fmt.Sprintf("%s?maxResults=1&pageToken=%s", url, token) - } else { - urlWithToken = stateful_mig_polling_fmt.Sprintf("%s?maxResults=1", url) - } - res, err := sendRequest(config, "POST", project, urlWithToken, userAgent, nil) - if err != nil { - return "", err - } - - managedInstances, ok := res["managedInstances"] - if !ok { - return "", stateful_mig_polling_fmt.Errorf("Failed to parse response for listManagedInstances for %s", d.Id()) - } - - managedInstancesArr := managedInstances.([]interface{}) - for _, managedInstanceRaw := range managedInstancesArr { - instance := managedInstanceRaw.(map[string]interface{}) - name, ok := instance["instance"] - if !ok { - return "", stateful_mig_polling_fmt.Errorf("Failed to read instance name for managed instance: %#v", instance) - } - if stateful_mig_polling_strings.HasSuffix(name.(string), instanceNameToFind) { - return name.(string), nil - } - } - - tokenRaw, paginate := res["nextPageToken"] - if paginate { - token = tokenRaw.(string) - } - } - - return "", stateful_mig_polling_fmt.Errorf("Failed to find managed instance with name: %s", instanceNameToFind) -} - -func PollCheckInstanceConfigDeleted(resp map[string]interface{}, respErr error) PollResult { - if respErr != nil { - return ErrorPollResult(respErr) - } - - if resp == nil { - - return SuccessPollResult() - } - - status := resp["status"].(string) - if status == "DELETING" { - return PendingStatusPollResult("Still deleting") - } - return ErrorPollResult(stateful_mig_polling_fmt.Errorf("Expected PerInstanceConfig to be deleting but status is: %s", status)) -} - -type TagsOperationWaiter struct { - Config *Config - UserAgent string - CommonOperationWaiter -} - -func (w *TagsOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, tags_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := tags_operation_fmt.Sprintf("https://cloudresourcemanager.googleapis.com/v3/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", "", url, w.UserAgent, nil) -} - -func createTagsWaiter(config *Config, op map[string]interface{}, activity, userAgent string) (*TagsOperationWaiter, error) { - w := &TagsOperationWaiter{ - Config: config, - UserAgent: userAgent, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func tagsOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, activity, userAgent string, timeout tags_operation_time.Duration) error { - w, err := createTagsWaiter(config, op, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return tags_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func tagsOperationWaitTime(config *Config, op map[string]interface{}, activity, userAgent string, timeout tags_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createTagsWaiter(config, op, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type ResourceDataMock struct { - FieldsInSchema map[string]interface{} - FieldsWithHasChange []string - id string -} - -func (d *ResourceDataMock) HasChange(key string) bool { - exists := false - for _, val := range d.FieldsWithHasChange { - if key == val { - exists = true - } - } - - return exists -} - -func (d *ResourceDataMock) Get(key string) interface{} { - v, _ := d.GetOk(key) - return v -} - -func (d *ResourceDataMock) GetOk(key string) (interface{}, bool) { - v, ok := d.GetOkExists(key) - if ok && !isEmptyValue(test_utils_reflect.ValueOf(v)) { - return v, true - } else { - return v, false - } -} - -func (d *ResourceDataMock) GetOkExists(key string) (interface{}, bool) { - for k, v := range d.FieldsInSchema { - if key == k { - return v, true - } - } - - return nil, false -} - -func (d *ResourceDataMock) Set(key string, value interface{}) error { - d.FieldsInSchema[key] = value - return nil -} - -func (d *ResourceDataMock) SetId(v string) { - d.id = v -} - -func (d *ResourceDataMock) Id() string { - return d.id -} - -func (d *ResourceDataMock) GetProviderMeta(dst interface{}) error { - return nil -} - -func (d *ResourceDataMock) Timeout(key string) test_utils_time.Duration { - return test_utils_time.Duration(1) -} - -type ResourceDiffMock struct { - Before map[string]interface{} - After map[string]interface{} - Cleared map[string]interface{} - IsForceNew bool -} - -func (d *ResourceDiffMock) GetChange(key string) (interface{}, interface{}) { - return d.Before[key], d.After[key] -} - -func (d *ResourceDiffMock) HasChange(key string) bool { - old, new := d.GetChange(key) - return old != new -} - -func (d *ResourceDiffMock) Get(key string) interface{} { - return d.After[key] -} - -func (d *ResourceDiffMock) GetOk(key string) (interface{}, bool) { - v, ok := d.After[key] - return v, ok -} - -func (d *ResourceDiffMock) Clear(key string) error { - if d.Cleared == nil { - d.Cleared = map[string]interface{}{} - } - d.Cleared[key] = true - return nil -} - -func (d *ResourceDiffMock) ForceNew(key string) error { - d.IsForceNew = true - return nil -} - -func checkDataSourceStateMatchesResourceState(dataSourceName, resourceName string) func(*test_utils_terraform.State) error { - return checkDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourceName, map[string]struct{}{}) -} - -func checkDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourceName string, ignoreFields map[string]struct{}) func(*test_utils_terraform.State) error { - return func(s *test_utils_terraform.State) error { - ds, ok := s.RootModule().Resources[dataSourceName] - if !ok { - return test_utils_fmt.Errorf("can't find %s in state", dataSourceName) - } - - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return test_utils_fmt.Errorf("can't find %s in state", resourceName) - } - - dsAttr := ds.Primary.Attributes - rsAttr := rs.Primary.Attributes - - errMsg := "" - - for k := range rsAttr { - if _, ok := ignoreFields[k]; ok { - continue - } - if k == "%" { - continue - } - if dsAttr[k] != rsAttr[k] { - - if k[len(k)-1:] == "#" && (dsAttr[k] == "" || dsAttr[k] == "0") && (rsAttr[k] == "" || rsAttr[k] == "0") { - continue - } - errMsg += test_utils_fmt.Sprintf("%s is %s; want %s\n", k, dsAttr[k], rsAttr[k]) - } - } - - if errMsg != "" { - return test_utils_errors.New(errMsg) - } - - return nil - } -} - -func oldValue(old, new interface{}) interface{} { - return old -} - -func handleNotFoundDCLError(err error, d *tpgtools_utils_schema.ResourceData, resourceName string) error { - if tpgtools_utils_dcldcl.IsNotFound(err) { - tpgtools_utils_log.Printf("[WARN] Removing %s because it's gone", resourceName) - - d.SetId("") - return nil - } - - return tpgtools_utils_errwrap.Wrapf( - tpgtools_utils_fmt.Sprintf("Error when reading or editing %s: {{err}}", resourceName), err) -} - -type TPUOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *TPUOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, tpu_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := tpu_operation_fmt.Sprintf("https://tpu.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createTPUWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*TPUOperationWaiter, error) { - w := &TPUOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func tpuOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout tpu_operation_time.Duration) error { - w, err := createTPUWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return tpu_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func tpuOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout tpu_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createTPUWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -var DefaultRequestTimeout = 5 * transport_time.Minute - -func isEmptyValue(v transport_reflect.Value) bool { - if !v.IsValid() { - return true - } - - switch v.Kind() { - case transport_reflect.Array, transport_reflect.Map, transport_reflect.Slice, transport_reflect.String: - return v.Len() == 0 - case transport_reflect.Bool: - return !v.Bool() - case transport_reflect.Int, transport_reflect.Int8, transport_reflect.Int16, transport_reflect.Int32, transport_reflect.Int64: - return v.Int() == 0 - case transport_reflect.Uint, transport_reflect.Uint8, transport_reflect.Uint16, transport_reflect.Uint32, transport_reflect.Uint64, transport_reflect.Uintptr: - return v.Uint() == 0 - case transport_reflect.Float32, transport_reflect.Float64: - return v.Float() == 0 - case transport_reflect.Interface, transport_reflect.Ptr: - return v.IsNil() - } - return false -} - -func sendRequest(config *Config, method, project, rawurl, userAgent string, body map[string]interface{}, errorRetryPredicates ...RetryErrorPredicateFunc) (map[string]interface{}, error) { - return sendRequestWithTimeout(config, method, project, rawurl, userAgent, body, DefaultRequestTimeout, errorRetryPredicates...) -} - -func sendRequestWithTimeout(config *Config, method, project, rawurl, userAgent string, body map[string]interface{}, timeout transport_time.Duration, errorRetryPredicates ...RetryErrorPredicateFunc) (map[string]interface{}, error) { - reqHeaders := make(transport_http.Header) - reqHeaders.Set("User-Agent", userAgent) - reqHeaders.Set("Content-Type", "application/json") - - if config.UserProjectOverride && project != "" { - - reqHeaders.Set("X-Goog-User-Project", project) - } - - if timeout == 0 { - timeout = transport_time.Duration(1) * transport_time.Hour - } - - var res *transport_http.Response - err := retryTimeDuration( - func() error { - var buf transport_bytes.Buffer - if body != nil { - err := transport_json.NewEncoder(&buf).Encode(body) - if err != nil { - return err - } - } - - u, err := addQueryParams(rawurl, map[string]string{"alt": "json"}) - if err != nil { - return err - } - req, err := transport_http.NewRequest(method, u, &buf) - if err != nil { - return err - } - - req.Header = reqHeaders - res, err = config.client.Do(req) - if err != nil { - return err - } - - if err := transport_googleapi.CheckResponse(res); err != nil { - transport_googleapi.CloseBody(res) - return err - } - - return nil - }, - timeout, - errorRetryPredicates..., - ) - if err != nil { - return nil, err - } - - if res == nil { - return nil, transport_fmt.Errorf("Unable to parse server response. This is most likely a terraform problem, please file a bug at https://github.com/hashicorp/terraform-provider-google/issues.") - } - - defer transport_googleapi.CloseBody(res) - - if res.StatusCode == 204 { - return nil, nil - } - result := make(map[string]interface{}) - if err := transport_json.NewDecoder(res.Body).Decode(&result); err != nil { - return nil, err - } - - return result, nil -} - -func addQueryParams(rawurl string, params map[string]string) (string, error) { - u, err := transport_url.Parse(rawurl) - if err != nil { - return "", err - } - q := u.Query() - for k, v := range params { - q.Set(k, v) - } - u.RawQuery = q.Encode() - return u.String(), nil -} - -func replaceVars(d TerraformResourceData, config *Config, linkTmpl string) (string, error) { - return replaceVarsRecursive(d, config, linkTmpl, false, 0) -} - -func replaceVarsForId(d TerraformResourceData, config *Config, linkTmpl string) (string, error) { - return replaceVarsRecursive(d, config, linkTmpl, true, 0) -} - -func replaceVarsRecursive(d TerraformResourceData, config *Config, linkTmpl string, shorten bool, depth int) (string, error) { - if depth > 10 { - return "", transport_errors.New("Recursive substitution detcted") - } - - re := transport_regexp.MustCompile("{{([%[:word:]]+)}}") - f, err := buildReplacementFunc(re, d, config, linkTmpl, shorten) - if err != nil { - return "", err - } - final := re.ReplaceAllStringFunc(linkTmpl, f) - - if re.Match([]byte(final)) { - return replaceVarsRecursive(d, config, final, shorten, depth+1) - } - - return final, nil -} - -func buildReplacementFunc(re *transport_regexp.Regexp, d TerraformResourceData, config *Config, linkTmpl string, shorten bool) (func(string) string, error) { - var project, projectID, region, zone string - var err error - - if transport_strings.Contains(linkTmpl, "{{project}}") { - project, err = getProject(d, config) - if err != nil { - return nil, err - } - } - - if transport_strings.Contains(linkTmpl, "{{project_id_or_project}}") { - v, ok := d.GetOkExists("project_id") - if ok { - projectID, _ = v.(string) - } - if projectID == "" { - project, err = getProject(d, config) - } - if err != nil { - return nil, err - } - } - - if transport_strings.Contains(linkTmpl, "{{region}}") { - region, err = getRegion(d, config) - if err != nil { - return nil, err - } - } - - if transport_strings.Contains(linkTmpl, "{{zone}}") { - zone, err = getZone(d, config) - if err != nil { - return nil, err - } - } - - f := func(s string) string { - - m := re.FindStringSubmatch(s)[1] - if m == "project" { - return project - } - if m == "project_id_or_project" { - if projectID != "" { - return projectID - } - return project - } - if m == "region" { - return region - } - if m == "zone" { - return zone - } - if string(m[0]) == "%" { - v, ok := d.GetOkExists(m[1:]) - if ok { - return transport_url.PathEscape(transport_fmt.Sprintf("%v", v)) - } - } else { - v, ok := d.GetOkExists(m) - if ok { - if shorten { - return GetResourceNameFromSelfLink(transport_fmt.Sprintf("%v", v)) - } else { - return transport_fmt.Sprintf("%v", v) - } - } - } - - if config != nil { - - if f := transport_reflect.Indirect(transport_reflect.ValueOf(config)).FieldByName(m); f.IsValid() { - return f.String() - } - } - return "" - } - - return f, nil -} - -type TerraformResourceDataChange interface { - GetChange(string) (interface{}, interface{}) -} - -type TerraformResourceData interface { - HasChange(string) bool - GetOkExists(string) (interface{}, bool) - GetOk(string) (interface{}, bool) - Get(string) interface{} - Set(string, interface{}) error - SetId(string) - Id() string - GetProviderMeta(interface{}) error - Timeout(key string) utils_time.Duration -} - -type TerraformResourceDiff interface { - HasChange(string) bool - GetChange(string) (interface{}, interface{}) - Get(string) interface{} - GetOk(string) (interface{}, bool) - Clear(string) error - ForceNew(string) error -} - -func getRegionFromZone(zone string) string { - if zone != "" && len(zone) > 2 { - region := zone[:len(zone)-2] - return region - } - return "" -} - -func getRegion(d TerraformResourceData, config *Config) (string, error) { - return getRegionFromSchema("region", "zone", d, config) -} - -func getProject(d TerraformResourceData, config *Config) (string, error) { - return getProjectFromSchema("project", d, config) -} - -func getBillingProject(d TerraformResourceData, config *Config) (string, error) { - return getBillingProjectFromSchema("billing_project", d, config) -} - -func getProjectFromDiff(d *utils_schema.ResourceDiff, config *Config) (string, error) { - res, ok := d.GetOk("project") - if ok { - return res.(string), nil - } - if config.Project != "" { - return config.Project, nil - } - return "", utils_fmt.Errorf("%s: required field is not set", "project") -} - -func getRouterLockName(region string, router string) string { - return utils_fmt.Sprintf("router/%s/%s", region, router) -} - -func handleNotFoundError(err error, d *utils_schema.ResourceData, resource string) error { - if isGoogleApiErrorWithCode(err, 404) { - utils_log.Printf("[WARN] Removing %s because it's gone", resource) - - d.SetId("") - - return nil - } - - return utils_errwrap.Wrapf( - utils_fmt.Sprintf("Error when reading or editing %s: {{err}}", resource), err) -} - -func isGoogleApiErrorWithCode(err error, errCode int) bool { - gerr, ok := utils_errwrap.GetType(err, &utils_googleapi.Error{}).(*utils_googleapi.Error) - return ok && gerr != nil && gerr.Code == errCode -} - -func isApiNotEnabledError(err error) bool { - gerr, ok := utils_errwrap.GetType(err, &utils_googleapi.Error{}).(*utils_googleapi.Error) - if !ok { - return false - } - if gerr == nil { - return false - } - if gerr.Code != 403 { - return false - } - for _, e := range gerr.Errors { - if e.Reason == "accessNotConfigured" { - return true - } - } - return false -} - -func isFailedPreconditionError(err error) bool { - gerr, ok := utils_errwrap.GetType(err, &utils_googleapi.Error{}).(*utils_googleapi.Error) - if !ok { - return false - } - if gerr == nil { - return false - } - if gerr.Code != 400 { - return false - } - for _, e := range gerr.Errors { - if e.Reason == "failedPrecondition" { - return true - } - } - return false -} - -func isConflictError(err error) bool { - if e, ok := err.(*utils_googleapi.Error); ok && (e.Code == 409 || e.Code == 412) { - return true - } else if !ok && utils_errwrap.ContainsType(err, &utils_googleapi.Error{}) { - e := utils_errwrap.GetType(err, &utils_googleapi.Error{}).(*utils_googleapi.Error) - if e.Code == 409 || e.Code == 412 { - return true - } - } - return false -} - -func expandLabels(d TerraformResourceData) map[string]string { - return expandStringMap(d, "labels") -} - -func expandEnvironmentVariables(d *utils_schema.ResourceData) map[string]string { - return expandStringMap(d, "environment_variables") -} - -func expandBuildEnvironmentVariables(d *utils_schema.ResourceData) map[string]string { - return expandStringMap(d, "build_environment_variables") -} - -func expandStringMap(d TerraformResourceData, key string) map[string]string { - v, ok := d.GetOk(key) - - if !ok { - return map[string]string{} - } - - return convertStringMap(v.(map[string]interface{})) -} - -func convertStringMap(v map[string]interface{}) map[string]string { - m := make(map[string]string) - for k, val := range v { - m[k] = val.(string) - } - return m -} - -func convertStringArr(ifaceArr []interface{}) []string { - return convertAndMapStringArr(ifaceArr, func(s string) string { return s }) -} - -func convertAndMapStringArr(ifaceArr []interface{}, f func(string) string) []string { - var arr []string - for _, v := range ifaceArr { - if v == nil { - continue - } - arr = append(arr, f(v.(string))) - } - return arr -} - -func mapStringArr(original []string, f func(string) string) []string { - var arr []string - for _, v := range original { - arr = append(arr, f(v)) - } - return arr -} - -func convertStringArrToInterface(strs []string) []interface{} { - arr := make([]interface{}, len(strs)) - for i, str := range strs { - arr[i] = str - } - return arr -} - -func convertStringSet(set *utils_schema.Set) []string { - s := make([]string, 0, set.Len()) - for _, v := range set.List() { - s = append(s, v.(string)) - } - utils_sort.Strings(s) - - return s -} - -func golangSetFromStringSlice(strings []string) map[string]struct{} { - set := map[string]struct{}{} - for _, v := range strings { - set[v] = struct{}{} - } - - return set -} - -func stringSliceFromGolangSet(sset map[string]struct{}) []string { - ls := make([]string, 0, len(sset)) - for s := range sset { - ls = append(ls, s) - } - utils_sort.Strings(ls) - - return ls -} - -func reverseStringMap(m map[string]string) map[string]string { - o := map[string]string{} - for k, v := range m { - o[v] = k - } - return o -} - -func mergeStringMaps(a, b map[string]string) map[string]string { - merged := make(map[string]string) - - for k, v := range a { - merged[k] = v - } - - for k, v := range b { - merged[k] = v - } - - return merged -} - -func mergeSchemas(a, b map[string]*utils_schema.Schema) map[string]*utils_schema.Schema { - merged := make(map[string]*utils_schema.Schema) - - for k, v := range a { - merged[k] = v - } - - for k, v := range b { - merged[k] = v - } - - return merged -} - -func mergeResourceMaps(ms ...map[string]*utils_schema.Resource) (map[string]*utils_schema.Resource, error) { - merged := make(map[string]*utils_schema.Resource) - duplicates := []string{} - - for _, m := range ms { - for k, v := range m { - if _, ok := merged[k]; ok { - duplicates = append(duplicates, k) - } - - merged[k] = v - } - } - - var err error - if len(duplicates) > 0 { - err = utils_fmt.Errorf("saw duplicates in mergeResourceMaps: %v", duplicates) - } - - return merged, err -} - -func extractFirstMapConfig(m []interface{}) map[string]interface{} { - if len(m) == 0 { - return map[string]interface{}{} - } - - return m[0].(map[string]interface{}) -} - -func lockedCall(lockKey string, f func() error) error { - mutexKV.Lock(lockKey) - defer mutexKV.Unlock(lockKey) - - return f() -} - -func Nprintf(format string, params map[string]interface{}) string { - for key, val := range params { - format = utils_strings.Replace(format, "%{"+key+"}", utils_fmt.Sprintf("%v", val), -1) - } - return format -} - -func serviceAccountFQN(serviceAccount string, d TerraformResourceData, config *Config) (string, error) { - - if utils_strings.HasPrefix(serviceAccount, "projects/") { - return serviceAccount, nil - } - - if utils_strings.Contains(serviceAccount, "@") { - return "projects/-/serviceAccounts/" + serviceAccount, nil - } - - project, err := getProject(d, config) - if err != nil { - return "", err - } - - return utils_fmt.Sprintf("projects/-/serviceAccounts/%s@%s.iam.gserviceaccount.com", serviceAccount, project), nil -} - -func paginatedListRequest(project, baseUrl, userAgent string, config *Config, flattener func(map[string]interface{}) []interface{}) ([]interface{}, error) { - res, err := sendRequest(config, "GET", project, baseUrl, userAgent, nil) - if err != nil { - return nil, err - } - - ls := flattener(res) - pageToken, ok := res["pageToken"] - for ok { - if pageToken.(string) == "" { - break - } - url := utils_fmt.Sprintf("%s?pageToken=%s", baseUrl, pageToken.(string)) - res, err = sendRequest(config, "GET", project, url, userAgent, nil) - if err != nil { - return nil, err - } - ls = append(ls, flattener(res)) - pageToken, ok = res["pageToken"] - } - - return ls, nil -} - -func getInterconnectAttachmentLink(config *Config, project, region, ic, userAgent string) (string, error) { - if !utils_strings.Contains(ic, "/") { - icData, err := config.NewComputeClient(userAgent).InterconnectAttachments.Get( - project, region, ic).Do() - if err != nil { - return "", utils_fmt.Errorf("Error reading interconnect attachment: %s", err) - } - ic = icData.SelfLink - } - - return ic, nil -} - -func calcAddRemove(from []string, to []string) (add, remove []string) { - add = make([]string, 0) - remove = make([]string, 0) - for _, u := range to { - found := false - for _, v := range from { - if compareSelfLinkOrResourceName("", v, u, nil) { - found = true - break - } - } - if !found { - add = append(add, u) - } - } - for _, u := range from { - found := false - for _, v := range to { - if compareSelfLinkOrResourceName("", u, v, nil) { - found = true - break - } - } - if !found { - remove = append(remove, u) - } - } - return add, remove -} - -func stringInSlice(arr []string, str string) bool { - for _, i := range arr { - if i == str { - return true - } - } - - return false -} - -func migrateStateNoop(v int, is *utils_terraform.InstanceState, meta interface{}) (*utils_terraform.InstanceState, error) { - return is, nil -} - -func expandString(v interface{}, d TerraformResourceData, config *Config) (string, error) { - return v.(string), nil -} - -func changeFieldSchemaToForceNew(sch *utils_schema.Schema) { - sch.ForceNew = true - switch sch.Type { - case utils_schema.TypeList: - case utils_schema.TypeSet: - if nestedR, ok := sch.Elem.(*utils_schema.Resource); ok { - for _, nestedSch := range nestedR.Schema { - changeFieldSchemaToForceNew(nestedSch) - } - } - } -} - -func generateUserAgentString(d TerraformResourceData, currentUserAgent string) (string, error) { - var m providerMeta - - err := d.GetProviderMeta(&m) - if err != nil { - return currentUserAgent, err - } - - if m.ModuleName != "" { - return utils_strings.Join([]string{currentUserAgent, m.ModuleName}, " "), nil - } - - return currentUserAgent, nil -} - -func SnakeToPascalCase(s string) string { - split := utils_strings.Split(s, "_") - for i := range split { - split[i] = utils_strings.Title(split[i]) - } - return utils_strings.Join(split, "") -} - -func multiEnvSearch(ks []string) string { - for _, k := range ks { - if v := utils_os.Getenv(k); v != "" { - return v - } - } - return "" -} - -func GetCurrentUserEmail(config *Config, userAgent string) (string, error) { - - res, err := sendRequest(config, "GET", "", "https://openidconnect.googleapis.com/v1/userinfo", userAgent, nil) - if err != nil { - return "", utils_fmt.Errorf("error retrieving userinfo for your provider credentials. have you enabled the 'https://www.googleapis.com/auth/userinfo.email' scope? error: %s", err) - } - return res["email"].(string), nil -} - -func checkStringMap(v interface{}) map[string]string { - m, ok := v.(map[string]string) - if ok { - return m - } - return convertStringMap(v.(map[string]interface{})) -} - -const ( - ProjectRegex = "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))" - ProjectRegexWildCard = "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?)|-)" - RegionRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - SubnetworkRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - - SubnetworkLinkRegex = "projects/(" + ProjectRegex + ")/regions/(" + RegionRegex + ")/subnetworks/(" + SubnetworkRegex + ")$" - - RFC1035NameTemplate = "[a-z](?:[-a-z0-9]{%d,%d}[a-z0-9])" - CloudIoTIdRegex = "^[a-zA-Z][-a-zA-Z0-9._+~%]{2,254}$" - - ComputeServiceAccountNameRegex = "[0-9]{1,20}-compute@developer.gserviceaccount.com" - - IAMCustomRoleIDRegex = "^[a-zA-Z0-9_\\.]{3,64}$" - - ADDomainNameRegex = "^[a-z][a-z0-9-]{0,14}\\.[a-z0-9-\\.]*[a-z]+[a-z0-9]*$" -) - -var ( - ServiceAccountNameRegex = validation_fmt.Sprintf(RFC1035NameTemplate, 4, 28) - - ServiceAccountLinkRegexPrefix = "projects/" + ProjectRegexWildCard + "/serviceAccounts/" - PossibleServiceAccountNames = []string{ - ServiceDefaultAccountNameRegex, - ComputeServiceAccountNameRegex, - CreatedServiceAccountNameRegex, - } - ServiceAccountLinkRegex = ServiceAccountLinkRegexPrefix + "(" + validation_strings.Join(PossibleServiceAccountNames, "|") + ")" - - ServiceAccountKeyNameRegex = ServiceAccountLinkRegexPrefix + "(.+)/keys/(.+)" - - CreatedServiceAccountNameRegex = validation_fmt.Sprintf(RFC1035NameTemplate, 4, 28) + "@" + ProjectNameInDNSFormRegex + "\\.iam\\.gserviceaccount\\.com$" - - ServiceDefaultAccountNameRegex = ProjectRegex + "@[a-z]+.gserviceaccount.com$" - - ProjectNameInDNSFormRegex = "[-a-z0-9\\.]{1,63}" - ProjectNameRegex = "^[A-Za-z0-9-'\"\\s!]{4,30}$" - - Rfc6996Asn16BitMin = int64(64512) - Rfc6996Asn16BitMax = int64(65534) - Rfc6996Asn32BitMin = int64(4200000000) - Rfc6996Asn32BitMax = int64(4294967294) - GcpRouterPartnerAsn = int64(16550) -) - -var rfc1918Networks = []string{ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", -} - -func validateGCPName(v interface{}, k string) (ws []string, errors []error) { - re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` - return validateRegexp(re)(v, k) -} - -func validateRFC6996Asn(v interface{}, k string) (ws []string, errors []error) { - value := int64(v.(int)) - if !(value >= Rfc6996Asn16BitMin && value <= Rfc6996Asn16BitMax) && - !(value >= Rfc6996Asn32BitMin && value <= Rfc6996Asn32BitMax) && - value != GcpRouterPartnerAsn { - errors = append(errors, validation_fmt.Errorf(`expected %q to be a RFC6996-compliant Local ASN: -must be either in the private ASN ranges: [64512..65534], [4200000000..4294967294]; -or be the value of [%d], got %d`, k, GcpRouterPartnerAsn, value)) - } - return -} - -func validateRegexp(re string) validation_schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !validation_regexp.MustCompile(re).MatchString(value) { - errors = append(errors, validation_fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, re)) - } - - return - } -} - -func validateRFC1918Network(min, max int) validation_schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - - s, es = validation_validation.IsCIDRNetwork(min, max)(i, k) - if len(es) > 0 { - return - } - - v, _ := i.(string) - ip, _, _ := validation_net.ParseCIDR(v) - for _, c := range rfc1918Networks { - if _, ipnet, _ := validation_net.ParseCIDR(c); ipnet.Contains(ip) { - return - } - } - - es = append(es, validation_fmt.Errorf("expected %q to be an RFC1918-compliant CIDR, got: %s", k, v)) - - return - } -} - -func validateRFC3339Time(v interface{}, k string) (warnings []string, errors []error) { - validation_time := v.(string) - if len(validation_time) != 5 || validation_time[2] != ':' { - errors = append(errors, validation_fmt.Errorf("%q (%q) must be in the format HH:mm (RFC3399)", k, validation_time)) - return - } - if hour, err := validation_strconv.ParseUint(validation_time[:2], 10, 0); err != nil || hour > 23 { - errors = append(errors, validation_fmt.Errorf("%q (%q) does not contain a valid hour (00-23)", k, validation_time)) - return - } - if min, err := validation_strconv.ParseUint(validation_time[3:], 10, 0); err != nil || min > 59 { - errors = append(errors, validation_fmt.Errorf("%q (%q) does not contain a valid minute (00-59)", k, validation_time)) - return - } - return -} - -func validateRFC1035Name(min, max int) validation_schema.SchemaValidateFunc { - if min < 2 || max < min { - return func(i interface{}, k string) (s []string, errors []error) { - if min < 2 { - errors = append(errors, validation_fmt.Errorf("min must be at least 2. Got: %d", min)) - } - if max < min { - errors = append(errors, validation_fmt.Errorf("max must greater than min. Got [%d, %d]", min, max)) - } - return - } - } - - return validateRegexp(validation_fmt.Sprintf("^"+RFC1035NameTemplate+"$", min-2, max-2)) -} - -func validateIpCidrRange(v interface{}, k string) (warnings []string, errors []error) { - _, _, err := validation_net.ParseCIDR(v.(string)) - if err != nil { - errors = append(errors, validation_fmt.Errorf("%q is not a valid IP CIDR range: %s", k, err)) - } - return -} - -func validateIAMCustomRoleID(v interface{}, k string) (warnings []string, errors []error) { - value := v.(string) - if !validation_regexp.MustCompile(IAMCustomRoleIDRegex).MatchString(value) { - errors = append(errors, validation_fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, IAMCustomRoleIDRegex)) - } - return -} - -func orEmpty(f validation_schema.SchemaValidateFunc) validation_schema.SchemaValidateFunc { - return func(i interface{}, k string) ([]string, []error) { - v, ok := i.(string) - if ok && v == "" { - return nil, nil - } - return f(i, k) - } -} - -func validateProjectID() validation_schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if !validation_regexp.MustCompile("^" + ProjectRegex + "$").MatchString(value) { - errors = append(errors, validation_fmt.Errorf( - "%q project_id must be 6 to 30 with lowercase letters, digits, hyphens and start with a letter. Trailing hyphens are prohibited.", value)) - } - return - } -} - -func validateProjectName() validation_schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if !validation_regexp.MustCompile(ProjectNameRegex).MatchString(value) { - errors = append(errors, validation_fmt.Errorf( - "%q name must be 4 to 30 characters with lowercase and uppercase letters, numbers, hyphen, single-quote, double-quote, space, and exclamation point.", value)) - } - return - } -} - -func validateDuration() validation_schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - v, ok := i.(string) - if !ok { - es = append(es, validation_fmt.Errorf("expected type of %s to be string", k)) - return - } - - if _, err := validation_time.ParseDuration(v); err != nil { - es = append(es, validation_fmt.Errorf("expected %s to be a duration, but parsing gave an error: %s", k, err.Error())) - return - } - - return - } -} - -func validateNonNegativeDuration() validation_schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - v, ok := i.(string) - if !ok { - es = append(es, validation_fmt.Errorf("expected type of %s to be string", k)) - return - } - - dur, err := validation_time.ParseDuration(v) - if err != nil { - es = append(es, validation_fmt.Errorf("expected %s to be a duration, but parsing gave an error: %s", k, err.Error())) - return - } - - if dur < 0 { - es = append(es, validation_fmt.Errorf("duration %v must be a non-negative duration", dur)) - return - } - - return - } -} - -func validateIpAddress(i interface{}, val string) ([]string, []error) { - ip := validation_net.ParseIP(i.(string)) - if ip == nil { - return nil, []error{validation_fmt.Errorf("could not parse %q to IP address", val)} - } - return nil, nil -} - -func validateBase64String(i interface{}, val string) ([]string, []error) { - _, err := validation_base64.StdEncoding.DecodeString(i.(string)) - if err != nil { - return nil, []error{validation_fmt.Errorf("could not decode %q as a valid base64 value. Please use the terraform base64 functions such as base64encode() or filebase64() to supply a valid base64 string", val)} - } - return nil, nil -} - -func StringNotInSlice(invalid []string, ignoreCase bool) validation_schema.SchemaValidateFunc { - return func(i interface{}, k string) (s []string, es []error) { - v, ok := i.(string) - if !ok { - es = append(es, validation_fmt.Errorf("expected type of %s to be string", k)) - return - } - - for _, str := range invalid { - if v == str || (ignoreCase && validation_strings.ToLower(v) == validation_strings.ToLower(str)) { - es = append(es, validation_fmt.Errorf("expected %s to not match any of %v, got %s", k, invalid, v)) - return - } - } - - return - } -} - -func validateHourlyOnly(val interface{}, key string) (warns []string, errs []error) { - v := val.(string) - parts := validation_strings.Split(v, ":") - if len(parts) != 2 { - errs = append(errs, validation_fmt.Errorf("%q must be in the format HH:00, got: %s", key, v)) - return - } - if parts[1] != "00" { - errs = append(errs, validation_fmt.Errorf("%q does not allow minutes, it must be in the format HH:00, got: %s", key, v)) - } - i, err := validation_strconv.Atoi(parts[0]) - if err != nil { - errs = append(errs, validation_fmt.Errorf("%q cannot be parsed, it must be in the format HH:00, got: %s", key, v)) - } else if i < 0 || i > 23 { - errs = append(errs, validation_fmt.Errorf("%q does not specify a valid hour, it must be in the format HH:00 where HH : [00-23], got: %s", key, v)) - } - return -} - -func validateRFC3339Date(v interface{}, k string) (warnings []string, errors []error) { - _, err := validation_time.Parse(validation_time.RFC3339, v.(string)) - if err != nil { - errors = append(errors, err) - } - return -} - -func validateADDomainName() validation_schema.SchemaValidateFunc { - return func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - - if len(value) > 64 || !validation_regexp.MustCompile(ADDomainNameRegex).MatchString(value) { - errors = append(errors, validation_fmt.Errorf( - "%q (%q) doesn't match regexp %q, domain_name must be 2 to 64 with lowercase letters, digits, hyphens, dots and start with a letter", k, value, ADDomainNameRegex)) - } - return - } -} - -type VertexAIOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *VertexAIOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, vertex_ai_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - region := GetRegionFromRegionalSelfLink(w.CommonOperationWaiter.Op.Name) - - url := vertex_ai_operation_fmt.Sprintf("https://%s-aiplatform.googleapis.com/v1/%s", region, w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createVertexAIWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*VertexAIOperationWaiter, error) { - w := &VertexAIOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func vertexAIOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout vertex_ai_operation_time.Duration) error { - w, err := createVertexAIWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return vertex_ai_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func vertexAIOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout vertex_ai_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createVertexAIWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type VPCAccessOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *VPCAccessOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, vpc_access_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := vpc_access_operation_fmt.Sprintf("https://vpcaccess.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createVPCAccessWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*VPCAccessOperationWaiter, error) { - w := &VPCAccessOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func vpcAccessOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout vpc_access_operation_time.Duration) error { - w, err := createVPCAccessWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return vpc_access_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func vpcAccessOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout vpc_access_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createVPCAccessWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} - -type WorkflowsOperationWaiter struct { - Config *Config - UserAgent string - Project string - CommonOperationWaiter -} - -func (w *WorkflowsOperationWaiter) QueryOp() (interface{}, error) { - if w == nil { - return nil, workflows_operation_fmt.Errorf("Cannot query operation, it's unset or nil.") - } - - url := workflows_operation_fmt.Sprintf("https://workflows.googleapis.com/v1/%s", w.CommonOperationWaiter.Op.Name) - - return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) -} - -func createWorkflowsWaiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*WorkflowsOperationWaiter, error) { - w := &WorkflowsOperationWaiter{ - Config: config, - UserAgent: userAgent, - Project: project, - } - if err := w.CommonOperationWaiter.SetOp(op); err != nil { - return nil, err - } - return w, nil -} - -func workflowsOperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout workflows_operation_time.Duration) error { - w, err := createWorkflowsWaiter(config, op, project, activity, userAgent) - if err != nil { - return err - } - if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { - return err - } - return workflows_operation_json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) -} - -func workflowsOperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout workflows_operation_time.Duration) error { - if val, ok := op["name"]; !ok || val == "" { - - return nil - } - w, err := createWorkflowsWaiter(config, op, project, activity, userAgent) - if err != nil { - - return err - } - return OperationWait(w, activity, timeout, config.PollInterval) -} diff --git a/contrib/tfcore/generated/tunnel/doc.go b/contrib/tfcore/generated/tunnel/doc.go deleted file mode 100644 index 75dc22ccaf..0000000000 --- a/contrib/tfcore/generated/tunnel/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package tunnel contains an iap tunnel proxy implementation that exports certain methods used for the provider. -package tunnel diff --git a/contrib/tfcore/generated/tunnel/generate.go b/contrib/tfcore/generated/tunnel/generate.go deleted file mode 100644 index ccdd2ed563..0000000000 --- a/contrib/tfcore/generated/tunnel/generate.go +++ /dev/null @@ -1,7 +0,0 @@ -package tunnel - -// here we copy the iap module from github.com/gartnera/gcloud/compute/iap - -import _ "github.com/gartnera/gcloud/compute/iap" - -//go:generate go run github.com/synapsecns/sanguine/tools/modulecopier --module-path github.com/gartnera/gcloud/compute/iap --package-name tunnel diff --git a/contrib/tfcore/generated/tunnel/tokensource.go b/contrib/tfcore/generated/tunnel/tokensource.go deleted file mode 100644 index 67abf98740..0000000000 --- a/contrib/tfcore/generated/tunnel/tokensource.go +++ /dev/null @@ -1,8 +0,0 @@ -package tunnel - -import "golang.org/x/oauth2" - -// SetTokenSource sets the token source for the tunnel -func (m *TunnelManager) SetTokenSource(source oauth2.TokenSource) { - m.ts = source -} diff --git a/contrib/tfcore/generated/tunnel/tunnel_gen.go b/contrib/tfcore/generated/tunnel/tunnel_gen.go deleted file mode 100644 index 3934a25912..0000000000 --- a/contrib/tfcore/generated/tunnel/tunnel_gen.go +++ /dev/null @@ -1,238 +0,0 @@ -// Code copied from github.com/gartnera/gcloud/compute/iap:/tunnel.go for testing by synapse modulecopier DO NOT EDIT." - -package tunnel - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "sync" - - "github.com/gartnera/gcloud/auth" - "github.com/gorilla/websocket" - "golang.org/x/oauth2" - "golang.org/x/sync/errgroup" -) - -const URL_SCHEME = "wss" -const URL_HOST = "tunnel.cloudproxy.app" -const MTLS_URL_HOST = "mtls.tunnel.cloudproxy.app" -const URL_PATH_ROOT = "/v4" -const CONNECT_ENDPOINT = "connect" -const RECONNECT_ENDPOINT = "reconnect" -const SEC_PROTOCOL_SUFFIX = "bearer.relay.tunnel.cloudproxy.app" -const TUNNEL_CLOUDPROXY_ORIGIN = "bot:iap-tunneler" - -const SUBPROTOCOL_NAME = "relay.tunnel.cloudproxy.app" -const SUBPROTOCOL_TAG_LEN = 2 -const SUBPROTOCOL_HEADER_LEN = SUBPROTOCOL_TAG_LEN + 4 -const SUBPROTOCOL_MAX_DATA_FRAME_SIZE = 16384 -const SUBPROTOCOL_TAG_CONNECT_SUCCESS_SID uint16 = 0x0001 -const SUBPROTOCOL_TAG_RECONNECT_SUCCESS_ACK uint16 = 0x0002 -const SUBPROTOCOL_TAG_DATA uint16 = 0x0004 -const SUBPROTOCOL_TAG_ACK uint16 = 0x0007 - -// tunnelAdapter abstracts the iap websocket tunnel to an io.ReadWriteCloser -type tunnelAdapter struct { - conn *websocket.Conn - inbound chan []byte - acks chan uint64 - - outboundLock sync.Mutex - - totalInboundLen uint64 -} - -func newTunnelAdapter(conn *websocket.Conn) *tunnelAdapter { - a := &tunnelAdapter{ - inbound: make(chan []byte), - acks: make(chan uint64), - conn: conn, - } - return a -} - -func (a *tunnelAdapter) inboundAck(len uint64) error { - buf := new(bytes.Buffer) - _ = binary.Write(buf, binary.BigEndian, SUBPROTOCOL_TAG_ACK) - _ = binary.Write(buf, binary.BigEndian, len) - - a.outboundLock.Lock() - defer a.outboundLock.Unlock() - err := a.conn.WriteMessage(websocket.BinaryMessage, buf.Bytes()) - if err != nil { - return fmt.Errorf("unable to write inbound ack msg: %w", err) - } - return nil -} - -func (a *tunnelAdapter) inboundHandler(ctx context.Context) error { - for { - _, msg, err := a.conn.ReadMessage() - if err != nil { - return fmt.Errorf("error while reading message: %w", err) - } - subprotocolTag := binary.BigEndian.Uint16(msg[:SUBPROTOCOL_TAG_LEN]) - msg = msg[SUBPROTOCOL_TAG_LEN:] - if subprotocolTag == SUBPROTOCOL_TAG_CONNECT_SUCCESS_SID { - continue - } else if subprotocolTag == SUBPROTOCOL_TAG_ACK { - // ack := binary.BigEndian.Uint64(msg) - continue - } else if subprotocolTag == SUBPROTOCOL_TAG_DATA { - dataLen := binary.BigEndian.Uint32(msg[:4]) - msg = msg[4 : dataLen+4] - a.inbound <- msg - a.totalInboundLen += uint64(len(msg)) - err = a.inboundAck(a.totalInboundLen) - if err != nil { - fmt.Println("inbound ack err: %w", err) - } - } else { - return errors.New("unknown tag") - } - } -} - -func (a *tunnelAdapter) Read(p []byte) (int, error) { - msg := <-a.inbound - len := copy(p, msg) - return len, nil -} - -func (a *tunnelAdapter) Write(p []byte) (int, error) { - for i := 0; i < len(p); i += SUBPROTOCOL_MAX_DATA_FRAME_SIZE { - maxOrEnd := len(p) - if maxOrEnd-i > SUBPROTOCOL_MAX_DATA_FRAME_SIZE { - maxOrEnd = i + SUBPROTOCOL_MAX_DATA_FRAME_SIZE - } - currentLen := maxOrEnd - i - buf := new(bytes.Buffer) - _ = binary.Write(buf, binary.BigEndian, SUBPROTOCOL_TAG_DATA) - _ = binary.Write(buf, binary.BigEndian, uint32(currentLen)) - _, _ = buf.Write(p[i:maxOrEnd]) - - a.outboundLock.Lock() - err := a.conn.WriteMessage(websocket.BinaryMessage, buf.Bytes()) - a.outboundLock.Unlock() - if err != nil { - return 0, fmt.Errorf("unable to write to websocket: %w", err) - } - } - return len(p), nil -} - -func (a *tunnelAdapter) Close() error { - return nil -} - -func (a *tunnelAdapter) Start(ctx context.Context) { - eg, ctx := errgroup.WithContext(ctx) - eg.Go(func() error { - err := a.inboundHandler(ctx) - if err != nil { - fmt.Println("error in inboundHandler: %w", err) - } - return err - }) - err := eg.Wait() - if err != nil { - fmt.Println("error in tunnel adapter: %w", err) - } -} - -type TunnelManager struct { - Project string - RemotePort int - LocalPort int - - Zone string - Instance string - Interface string - - ts oauth2.TokenSource -} - -func (m *TunnelManager) getHeaders() (http.Header, error) { - tok, err := m.ts.Token() - if err != nil { - return nil, fmt.Errorf("unable to get token: %w", err) - } - return http.Header{ - "Origin": []string{TUNNEL_CLOUDPROXY_ORIGIN}, - "Authorization": []string{fmt.Sprintf("Bearer %s", tok.AccessToken)}, - }, nil -} - -func (m *TunnelManager) StartTunnel(ctx context.Context) (io.ReadWriteCloser, error) { - var err error - if m.ts == nil { - m.ts, err = auth.TokenSource() - if err != nil { - return nil, fmt.Errorf("unable to get tokensource: %w", err) - } - } - - tunnelUrl := &url.URL{ - Scheme: URL_SCHEME, - Host: URL_HOST, - Path: fmt.Sprintf("%s/%s", URL_PATH_ROOT, CONNECT_ENDPOINT), - } - query := tunnelUrl.Query() - query.Add("project", m.Project) - query.Add("zone", m.Zone) - query.Add("instance", m.Instance) - query.Add("interface", "nic0") - query.Add("port", fmt.Sprint(m.RemotePort)) - tunnelUrl.RawQuery = query.Encode() - - headers, err := m.getHeaders() - if err != nil { - return nil, fmt.Errorf("unable to get connect headers: %w", err) - } - - urlStr := tunnelUrl.String() - - conn, _, err := websocket.DefaultDialer.Dial(urlStr, headers) - if err != nil { - return nil, fmt.Errorf("unable to connect: %w", err) - } - adapter := newTunnelAdapter(conn) - go adapter.Start(ctx) - return adapter, nil -} - -func (m *TunnelManager) StartProxy(ctx context.Context) error { - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", m.LocalPort)) - if err != nil { - return fmt.Errorf("unable to listen: %w", err) - } - for { - conn, err := lis.Accept() - if err != nil { - return fmt.Errorf("unable to accept: %w", err) - } - tunnel, err := m.StartTunnel(ctx) - if err != nil { - return fmt.Errorf("unable to start tunnel: %w", err) - } - go func() { - _, err := io.Copy(conn, tunnel) - if err != nil { - fmt.Println("unable to copy from tunnel to conn", err) - } - }() - go func() { - _, err := io.Copy(tunnel, conn) - if err != nil { - fmt.Println("unable to copy from conn to tunnel", err) - } - }() - } -} diff --git a/contrib/tfcore/generated/tunnel/tunnel_gen_test.go b/contrib/tfcore/generated/tunnel/tunnel_gen_test.go deleted file mode 100644 index 0e0f4e3358..0000000000 --- a/contrib/tfcore/generated/tunnel/tunnel_gen_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code copied from github.com/gartnera/gcloud/compute/iap:/tunnel_test.go for testing by synapse modulecopier DO NOT EDIT." - -package tunnel - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/augustoroman/hexdump" - "github.com/stretchr/testify/require" -) - -func TestStartTunnel(t *testing.T) { - if _, ok := os.LookupEnv("MANUAL_TEST"); !ok { - t.Skip() - } - - ctx := context.Background() - - m := TunnelManager{ - Project: os.Getenv("GOOGLE_PROJECT_ID"), - Zone: "us-west1-c", - Instance: os.Getenv("TEST_INSTANCE_NAME"), - Interface: "nic0", - RemotePort: 22, - } - tunnel, err := m.StartTunnel(ctx) - require.NoError(t, err) - buf := make([]byte, SUBPROTOCOL_MAX_DATA_FRAME_SIZE) - i, err := tunnel.Read(buf) - require.NoError(t, err) - fmt.Println(i) - fmt.Println(hexdump.Dump(buf[:i])) - _, err = tunnel.Write([]byte("SSH-2.0-jsssh.0.1\n")) - require.NoError(t, err) - i, err = tunnel.Read(buf) - require.NoError(t, err) - fmt.Println(i) - fmt.Println(hexdump.Dump(buf[:i])) - time.Sleep(time.Second * 1) -} - -func TestStartProxy(t *testing.T) { - if _, ok := os.LookupEnv("MANUAL_TEST"); !ok { - t.Skip() - } - - ctx := context.Background() - - m := TunnelManager{ - Project: os.Getenv("GOOGLE_PROJECT_ID"), - Zone: "us-west1-c", - Instance: os.Getenv("TEST_INSTANCE_NAME"), - Interface: "nic0", - RemotePort: 22, - LocalPort: 2020, - } - err := m.StartProxy(ctx) - require.NoError(t, err) -} diff --git a/contrib/tfcore/go.mod b/contrib/tfcore/go.mod deleted file mode 100644 index 7d35b7f0f8..0000000000 --- a/contrib/tfcore/go.mod +++ /dev/null @@ -1,120 +0,0 @@ -module github.com/synapsecns/sanguine/contrib/tfcore - -go 1.21 - -require ( - cloud.google.com/go/bigtable v1.10.1 - github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 - github.com/apparentlymart/go-cidr v1.1.0 - github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93 - github.com/davecgh/go-spew v1.1.1 - github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 - github.com/gartnera/gcloud v0.0.15 - github.com/gorilla/websocket v1.5.1 - github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/go-cleanhttp v0.5.2 - github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/terraform-plugin-go v0.14.2 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 - github.com/hashicorp/terraform-provider-google/v4 v4.2.0 - github.com/mitchellh/go-homedir v1.1.0 - github.com/mitchellh/hashstructure v1.1.0 - github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 - github.com/stretchr/testify v1.8.4 - golang.org/x/mod v0.15.0 - golang.org/x/net v0.21.0 - golang.org/x/oauth2 v0.16.0 - golang.org/x/sync v0.6.0 - golang.org/x/tools v0.18.0 - google.golang.org/api v0.149.0 - google.golang.org/grpc v1.60.1 -) - -require ( - bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.111.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect - github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/dnaeon/go-vcr v1.2.0 // indirect - github.com/docker/cli v20.10.17+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect - github.com/go-git/go-git/v5 v5.11.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/glog v1.1.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/uuid v1.5.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.4.8 // indirect - github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/hc-install v0.4.0 // indirect - github.com/hashicorp/hcl/v2 v2.15.0 // indirect - github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.17.3 // indirect - github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect - github.com/hashicorp/terraform-registry-address v0.1.0 // indirect - github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jhump/protoreflect v1.14.1 // indirect - github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/oklog/run v1.1.0 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect - github.com/sergi/go-diff v1.3.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.2 // indirect - github.com/zclconf/go-cty v1.12.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.23.1 // indirect - go.opentelemetry.io/otel/metric v1.23.1 // indirect - go.opentelemetry.io/otel/sdk v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.23.1 // indirect - golang.org/x/crypto v0.19.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - gotest.tools/v3 v3.4.0 // indirect -) - -replace ( - golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 - google.golang.org/api => google.golang.org/api v0.86.0 -) diff --git a/contrib/tfcore/go.sum b/contrib/tfcore/go.sum deleted file mode 100644 index 692c945e58..0000000000 --- a/contrib/tfcore/go.sum +++ /dev/null @@ -1,1376 +0,0 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= -bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= -bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= -cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= -cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM= -cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g= -cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518 h1:tFdFasG+VDpnn+BfVbZrfGcoH6pw6s7ODYlZlhTO3UM= -github.com/GoogleCloudPlatform/declarative-resource-client-library v0.0.0-20211027225138-ef28ca390518/go.mod h1:oEeBHikdF/NrnUy0ornVaY1OT+jGvTqm+LQS0+ZDKzU= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= -github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apparentlymart/go-cidr v1.0.1/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= -github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= -github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= -github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93 h1:z6k1vb5L2wqLK4SIk3fpUiXnhNWSZ6Oyy8AaLqr0B+A= -github.com/augustoroman/hexdump v0.0.0-20190827031536-6506f4163e93/go.mod h1:ps2Vk8wMZarkeIPtUqW/FUvwVVdeRDbewMYz+EmuEgk= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405163807-87a70f3d418b/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= -github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= -github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 h1:R+19WKQClnfMXS60cP5BmMe1wjZ4u0evY2p2Ar0ZTXo= -github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4/go.mod h1:GeIq9qoE43YdGnDXURnmKTnGg15pQz4mYkXSTChbneI= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 h1:EipXK6U05IQ2wtuFRn4k3h0+2lXypzItoXGVyf4r9Io= -github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92/go.mod h1:w9RqFVO2BM3xwWEcAB8Fwp0OviTBBEiRmSBDfbXnd3w= -github.com/gartnera/gcloud v0.0.15 h1:/PkEnxPczVRS78MkMDz6wfdRR8YDDjzr0VF6ri6cGVs= -github.com/gartnera/gcloud v0.0.15/go.mod h1:i9wWa1ndPbE8AhduqRMX9nAv9X9HqN9xgqydfEdFLGo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= -github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.40.1/go.mod h1:OyFTr1muxaWeGTcHQcL3B7C4rETnDphTKYenZDgH2/g= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/gookit/color v1.3.8/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= -github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= -github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= -github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= -github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= -github.com/hashicorp/go-getter v1.5.0/go.mod h1:a7z7NPPfNQpJWcn4rSWFtdrSldqLdLPEF3d8nFMsSLM= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= -github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= -github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= -github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hc-install v0.4.0 h1:cZkRFr1WVa0Ty6x5fTvL1TuO1flul231rWkGH92oYYk= -github.com/hashicorp/hc-install v0.4.0/go.mod h1:5d155H8EC5ewegao9A4PUTMNPZaq+TbOzkJJZ4vrXeI= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.3.0/go.mod h1:d+FwDBbOLvpAM3Z6J7gPj/VoAGkNe/gm352ZhjJ/Zv8= -github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= -github.com/hashicorp/hcl/v2 v2.15.0 h1:CPDXO6+uORPjKflkWCCwoWc9uRp+zSIPcCQ+BrxV7m8= -github.com/hashicorp/hcl/v2 v2.15.0/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= -github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/terraform-exec v0.12.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.13.0/go.mod h1:SGhto91bVRlgXQWcJ5znSz+29UZIa8kpBbkGwQ+g9E8= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.8.0/go.mod h1:3defM4kkMfttwiE7VakJDwCd4R+umhSQnvJwORXbprE= -github.com/hashicorp/terraform-json v0.12.0/go.mod h1:pmbq9o4EuL43db5+0ogX10Yofv1nozM+wskr/bGFJpI= -github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= -github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= -github.com/hashicorp/terraform-plugin-go v0.1.0/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.2.1/go.mod h1:10V6F3taeDWVAoLlkmArKttR3IULlRWFAGtQIQTIDr4= -github.com/hashicorp/terraform-plugin-go v0.14.2 h1:rhsVEOGCnY04msNymSvbUsXfRLKh9znXZmHlf5e8mhE= -github.com/hashicorp/terraform-plugin-go v0.14.2/go.mod h1:Q12UjumPNGiFsZffxOsA40Tlz1WVXt2Evh865Zj0+UA= -github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= -github.com/hashicorp/terraform-plugin-log v0.7.0/go.mod h1:p4R1jWBXRTvL4odmEkFfDdhUjHf9zcs/BCoNHAc7IK4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.4.0/go.mod h1:JBItawj+j8Ssla5Ib6BC/W9VQkOucBfnX7VRtyx1vw8= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.5.0/go.mod h1:z+cMZ0iswzZOahBJ3XmNWgWkVnAd2bl8g+FhyyuPDH4= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 h1:zHcMbxY0+rFO9gY99elV/XC/UnQVg7FhRCbj1i5b7vM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1/go.mod h1:+tNlb0wkfdsDJ7JEiERLz4HzM19HyiuIoGzTsM7rPpw= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0 h1:w0r/YEy7ZM5mTMAarRUpS7eyYrXTN5mazwHtLnEGAk8= -github.com/hashicorp/terraform-provider-google/v4 v4.2.0/go.mod h1:eUbSXbhfBMNiOuofFo688iPhk42O782vze8drAN2sPA= -github.com/hashicorp/terraform-registry-address v0.1.0 h1:W6JkV9wbum+m516rCl5/NjKxCyTVaaUBbzYcMzBDO3U= -github.com/hashicorp/terraform-registry-address v0.1.0/go.mod h1:EnyO2jYO6j29DTHbJcm00E5nQTFeTtyZH3H5ycydQ5A= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= -github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= -github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= -github.com/jhump/protoreflect v1.14.1/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= -github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= -github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f h1:dKccXx7xA56UNqOcFIbuqFjAWPVtP688j5QMgmo6OHU= -github.com/kirsle/configdir v0.0.0-20170128060238-e45d2f54772f/go.mod h1:4rEELDSfUAlBSyUjPG0JnaNGjf13JySHFeRdD/3dLP0= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.6/go.mod h1:Lj5gIVxjBlH8REa3icEOkdfchwYc291nShzZ4QYWyMo= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.1/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.4/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= -github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= -github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= -github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanposhiho/wastedassign v1.0.0/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/securego/gosec/v2 v2.7.0/go.mod h1:xNbGArrGUspJLuz3LS5XCY1EBW/0vABAl/LWfSklmiM= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.4/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tetafro/godot v1.4.6/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= -github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= -github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.5.1/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= -go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= -go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= -go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= -go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191009170851-d66e71096ffb/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.86.0 h1:ZAnyOHQFIuWso1BodVfSaRyffD74T9ERGFa3k1fNk/U= -google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= -google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/contrib/tfcore/utils/combine_schemas.go b/contrib/tfcore/utils/combine_schemas.go deleted file mode 100644 index f70cdfdb4f..0000000000 --- a/contrib/tfcore/utils/combine_schemas.go +++ /dev/null @@ -1,107 +0,0 @@ -package utils - -import ( - "context" - "fmt" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "strings" -) - -// CombinedSchema returns the combined schema of only -// schema, metaSchema, resourceMaps and dataSourceMaps. -type CombinedSchema struct { - Schema map[string]*schema.Schema - MetaSchema map[string]*schema.Schema - - ResourceMap, DataSourceMap map[string]*schema.Resource -} - -// CombineSchemas provides 2 helm schemas -// toReplace and replaceWith are used to specify to the provider to replace -// note: while this does not enforce which provider is used, it will fail if one provider is not a google-like provider (in terms of the fields it has) -func CombineSchemas(googleProvider, underlyingProvider *schema.Provider, toReplace, replaceWith string) (co CombinedSchema) { - // schema - co.Schema = MustCombineMaps(googleProvider.Schema, underlyingProvider.Schema) - co.MetaSchema = MustCombineMaps(googleProvider.ProviderMetaSchema, underlyingProvider.ProviderMetaSchema) - co.ResourceMap = make(map[string]*schema.Resource) - co.DataSourceMap = make(map[string]*schema.Resource) - - for key, val := range underlyingProvider.ResourcesMap { - co.ResourceMap[strings.Replace(key, toReplace, replaceWith, 1)] = WrapSchemaResource(val) - } - - for key, val := range underlyingProvider.DataSourcesMap { - co.DataSourceMap[strings.Replace(key, toReplace, replaceWith, 1)] = WrapSchemaResource(val) - } - - co.Schema = UpdateSchemaWithDefaults(co.Schema) - - return co -} - -// UpdateSchemaWithDefaults adds extra fields to the schema needed for the google tunnel. -func UpdateSchemaWithDefaults(smap map[string]*schema.Schema) map[string]*schema.Schema { - // project is required to start the proxy - smap["project"].Required = true - smap["project"].Optional = false - // zone is required to start the proxy - smap["zone"].Required = true - smap["zone"].Optional = false - - smap["instance"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - Description: "The name of the instance to start the proxy on", - } - smap["interface"] = &schema.Schema{ - Type: schema.TypeString, - Description: "The name of the interface to start the proxy on", - Default: "nic0", - // defaults to default - Optional: true, - } - - smap["remote_port"] = &schema.Schema{ - Type: schema.TypeInt, - Description: "the port to proxy to", - // defaults to default - Optional: true, - // default tinyproxy port - Default: "8888", - } - return smap -} - -// CombineProtoSchemas combines google schemas and tfproto schemas into a single schema -// this differs from CombineSchemas in that it supports tfproto schemas. -func CombineProtoSchemas(ctx context.Context, googleSchema *schema.Provider, protoSchema *tfprotov5.GetProviderSchemaResponse, toReplace, replaceWith string) (co *tfprotov5.Schema, err error) { - // add defaults to the terraform schema - googleSchema.Schema = UpdateSchemaWithDefaults(googleSchema.Schema) - providerSchema := schema.NewGRPCProviderServer(googleSchema) - tfProviderSchema, err := providerSchema.GetProviderSchema(ctx, &tfprotov5.GetProviderSchemaRequest{}) - - if err != nil { - return nil, fmt.Errorf("could not get provider schema: %w", err) - } - - for _, attribute := range tfProviderSchema.Provider.Block.Attributes { - if hasAttribute(protoSchema, attribute) { - return nil, fmt.Errorf("cannot override attribute %s", attribute.Name) - } - protoSchema.Provider.Block.Attributes = append(protoSchema.Provider.Block.Attributes, attribute) - } - - protoSchema.Provider.Block.BlockTypes = append(protoSchema.Provider.Block.BlockTypes, tfProviderSchema.Provider.Block.BlockTypes...) - - return protoSchema.Provider, nil -} - -func hasAttribute(schema *tfprotov5.GetProviderSchemaResponse, attribute *tfprotov5.SchemaAttribute) bool { - for _, ogAttribute := range schema.Provider.Block.Attributes { - if ogAttribute.Name == attribute.Name { - return true - } - } - return false -} diff --git a/contrib/tfcore/utils/combined_schemas_test.go b/contrib/tfcore/utils/combined_schemas_test.go deleted file mode 100644 index fb838069e2..0000000000 --- a/contrib/tfcore/utils/combined_schemas_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package utils_test - -import ( - "context" - "github.com/hashicorp/terraform-plugin-go/tfprotov5" - "github.com/hashicorp/terraform-plugin-go/tftypes" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/synapsecns/sanguine/contrib/tfcore/utils" - "testing" -) - -func TestCombineSchemas(t *testing.T) { - googleProvider := &schema.Provider{ - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: true, - }, - "zone": { - Type: schema.TypeString, - Required: true, - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "google_compute_instance": {}, - }, - DataSourcesMap: map[string]*schema.Resource{ - "google_compute_instance": {}, - }, - } - underlyingProvider := &schema.Provider{ - Schema: map[string]*schema.Schema{ - "region": { - Type: schema.TypeString, - Required: true, - }, - "vpc": { - Type: schema.TypeString, - Required: true, - }, - }, - ResourcesMap: map[string]*schema.Resource{ - "aws_instance": {}, - }, - DataSourcesMap: map[string]*schema.Resource{ - "aws_instance": {}, - }, - } - toReplace := "aws" - replaceWith := "google" - combinedSchema := utils.CombineSchemas(googleProvider, underlyingProvider, toReplace, replaceWith) - if combinedSchema.Schema["project"].Required != true { - t.Errorf("Expected project to be required but got %v", combinedSchema.Schema["project"].Required) - } - if combinedSchema.Schema["zone"].Required != true { - t.Errorf("Expected zone to be required but got %v", combinedSchema.Schema["zone"].Required) - } - if combinedSchema.ResourceMap["google_instance"] == nil { - t.Errorf("Expected resource map to have key google_instance but got %v", combinedSchema.ResourceMap) - } - if combinedSchema.DataSourceMap["google_instance"] == nil { - t.Errorf("Expected data source map to have key google_instance but got %v", combinedSchema.DataSourceMap) - } -} - -func TestCombineProtoSchemas(t *testing.T) { - ctx := context.Background() - googleSchema := &schema.Provider{ - Schema: map[string]*schema.Schema{ - "project": { - Type: schema.TypeString, - Required: false, - Optional: true, - }, - "zone": { - Type: schema.TypeString, - Required: false, - Optional: true, - }, - }, - } - protoSchema := &tfprotov5.GetProviderSchemaResponse{ - Provider: &tfprotov5.Schema{ - Block: &tfprotov5.SchemaBlock{ - Attributes: []*tfprotov5.SchemaAttribute{ - { - Name: "test_attribute", - Type: tftypes.String, - }, - }, - BlockTypes: []*tfprotov5.SchemaNestedBlock{ - { - TypeName: "test_block", - Block: &tfprotov5.SchemaBlock{ - Attributes: []*tfprotov5.SchemaAttribute{ - { - Name: "test_block_attribute", - Type: tftypes.String, - }, - }, - }, - }, - }, - }, - }, - } - co, err := utils.CombineProtoSchemas(ctx, googleSchema, protoSchema, "", "") - if err != nil { - t.Fatalf("CombineProtoSchemas returned error: %v", err) - } - if co == nil { - t.Fatalf("CombineProtoSchemas returned nil co") - } - if len(co.Block.Attributes) != 6 { - t.Fatalf("CombineProtoSchemas did not add all google attributes, expected 6 got %d", len(co.Block.Attributes)) - } - if len(co.Block.BlockTypes) != 1 { - t.Fatalf("CombineProtoSchemas did not add all proto block types, expected 1 got %d", len(co.Block.BlockTypes)) - } -} diff --git a/contrib/tfcore/utils/doc.go b/contrib/tfcore/utils/doc.go deleted file mode 100644 index a1358ea773..0000000000 --- a/contrib/tfcore/utils/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package utils contains utility functions for the tfcore package. -package utils diff --git a/contrib/tfcore/utils/tunnel.go b/contrib/tfcore/utils/tunnel.go deleted file mode 100644 index 7cd457f381..0000000000 --- a/contrib/tfcore/utils/tunnel.go +++ /dev/null @@ -1,104 +0,0 @@ -package utils - -import ( - "context" - "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/phayes/freeport" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/google" - "github.com/synapsecns/sanguine/contrib/tfcore/generated/tunnel" - "log" - "net/http" - "net/url" - "time" -) - -// StartTunnel and returns the proxy url -// nolint: cyclop -func StartTunnel(ctx context.Context, d *schema.ResourceData, config *google.Config) (proxyURL string, err error) { - project, ok := d.Get("project").(string) - if !ok { - return proxyURL, fmt.Errorf("could not cast project of type %T to %T for project", d.Get("project"), project) - } - zone, ok := d.Get("zone").(string) - if !ok { - return proxyURL, fmt.Errorf("could not cast zone of type %T to %T for zone", d.Get("zone"), zone) - } - instance, ok := d.Get("instance").(string) - if !ok { - return proxyURL, fmt.Errorf("could not cast instance of type %T to %T for instance", d.Get("instance"), instance) - } - iface, ok := d.Get("interface").(string) - if !ok { - return proxyURL, fmt.Errorf("could not cast interface of type %T to %T for interface", d.Get("interface"), iface) - } - remotePort, ok := d.Get("remote_port").(int) - if !ok { - return proxyURL, fmt.Errorf("could not cast remote_port of type %T to %T for remote port", d.Get("remote_port"), remotePort) - } - - localPort, err := freeport.GetFreePort() - if err != nil { - return proxyURL, fmt.Errorf("could not get a free port: %w", err) - } - - tm := tunnel.TunnelManager{ - Project: project, - RemotePort: remotePort, - LocalPort: localPort, - Zone: zone, - Instance: instance, - Interface: iface, - } - - tm.SetTokenSource(config.GetTokenSource()) - - errChan := make(chan error) - - log.Printf("[INFO] creating tunnel") - go func() { - startTime := time.Now() - err := tm.StartProxy(ctx) - if err != nil { - fmt.Println(err) - log.Printf("[DEBUG] Proxy Error %v", err) - errChan <- err - } - - log.Printf("[DEBUG] Proxy closed after %s", time.Since(startTime)) - }() - - select { - // wait 5 seconds for an error, otherwise just log since this will run in the background for the course of the apply - case <-time.NewTimer(time.Second * 1).C: - break - case err := <-errChan: - log.Printf("[ERROR] Received error while booting provider: %v", err) - return proxyURL, fmt.Errorf("could not boot provider: %w", err) - } - - log.Printf("[DEBUG] Finished creating proxy on port %d", localPort) - - // test the tunnel - log.Printf("testing the tunnel") - - proxyURL = fmt.Sprintf("http://localhost:%d", localPort) - log.Printf("[DEBUG] setting proxy url to %s", proxyURL) - - parsedURL, err := url.Parse(proxyURL) - if err != nil { - log.Printf("[ERROR] could not parse proxy url %s: %v", proxyURL, err) - return proxyURL, fmt.Errorf("could not parse url: %w", err) - } - testClient := &http.Client{Transport: &http.Transport{Proxy: http.ProxyURL(parsedURL)}} - //nolint:noctx - resp, err := testClient.Get("https://www.google.com/") - if err != nil { - log.Printf("[ERROR] could not connect through proxy %s: %v", proxyURL, err) - } else { - log.Printf("[INFO] proxy tunnel connected %s: %v", proxyURL, err) - _ = resp.Body.Close() - } - - return proxyURL, nil -} diff --git a/contrib/tfcore/utils/utils.go b/contrib/tfcore/utils/utils.go deleted file mode 100644 index cb2891d78e..0000000000 --- a/contrib/tfcore/utils/utils.go +++ /dev/null @@ -1,13 +0,0 @@ -package utils - -// MustCombineMaps attempts to combine two maps. Panics if maps can not be combined. -func MustCombineMaps[T interface{}](m1, m2 map[string]T) map[string]T { - for key, value := range m2 { - _, exists := m1[key] - if exists { - panic("Key overlap found when combining maps") - } - m1[key] = value - } - return m1 -} diff --git a/contrib/tfcore/utils/utils_test.go b/contrib/tfcore/utils/utils_test.go deleted file mode 100644 index 6f24d2fa37..0000000000 --- a/contrib/tfcore/utils/utils_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package utils_test - -import ( - "github.com/synapsecns/sanguine/contrib/tfcore/utils" - "testing" -) - -func TestCombineMaps(t *testing.T) { - m1 := map[string]int{"a": 1, "b": 2} - m2 := map[string]int{"c": 3, "d": 4} - expected := map[string]int{"a": 1, "b": 2, "c": 3, "d": 4} - - combinedMap := utils.MustCombineMaps(m1, m2) - - for key, value := range expected { - if combinedMap[key] != value { - t.Errorf("Expected value %d for key %s, but got %d", value, key, combinedMap[key]) - } - } -} - -func TestCombineMapsPanic(t *testing.T) { - m1 := map[string]int{"a": 1, "b": 2} - m2 := map[string]int{"a": 3, "d": 4} - - defer func() { - if r := recover(); r == nil { - t.Errorf("The code did not panic") - } - }() - - _ = utils.MustCombineMaps(m1, m2) -} diff --git a/contrib/tfcore/utils/wrapper.go b/contrib/tfcore/utils/wrapper.go deleted file mode 100644 index 44820faf0c..0000000000 --- a/contrib/tfcore/utils/wrapper.go +++ /dev/null @@ -1,215 +0,0 @@ -package utils - -import ( - "context" - "errors" - provider_diag "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// WrappedProvider is a provider that wraps another provider. This is used to wrap the underlying provider with the google provider. -type WrappedProvider interface { - // UnderlyingProvider gets the underlying provider - UnderlyingProvider() interface{} - GoogleProvider() interface{} -} - -// WrapSchemaResource wraps a schema.resource to extract the underlying provider interface. This way, we can configure context on both -// the underlying provider interfaces without modifying the underlying provider. This allows are only modification to the provider -// itself to be the addition of the proxy_url field and the proxy starter. -// nolint: staticcheck, wrapcheck, gocognit, cyclop -func WrapSchemaResource(resource *schema.Resource) *schema.Resource { - resResource := &schema.Resource{ - Schema: resource.Schema, - SchemaVersion: resource.SchemaVersion, - MigrateState: resource.MigrateState, - StateUpgraders: resource.StateUpgraders, - Importer: resource.Importer, - Description: resource.Description, - UseJSONNumber: resource.UseJSONNumber, - } - - if resource.Create != nil { - resResource.Create = func(data *schema.ResourceData, meta interface{}) error { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return errors.New("failed to cast meta interface") - } - return resource.Create(data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.CreateContext != nil { - resResource.CreateContext = func(ctx context.Context, data *schema.ResourceData, meta interface{}) (_ provider_diag.Diagnostics) { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - return resource.CreateContext(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.Read != nil { - resResource.Read = func(data *schema.ResourceData, meta interface{}) error { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return errors.New("failed to cast meta interface") - } - return resource.Read(data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.ReadContext != nil { - resResource.ReadContext = func(ctx context.Context, data *schema.ResourceData, meta interface{}) (_ provider_diag.Diagnostics) { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - return resource.ReadContext(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.UpdateContext != nil { - resResource.UpdateContext = func(ctx context.Context, data *schema.ResourceData, meta interface{}) (_ provider_diag.Diagnostics) { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - return resource.UpdateContext(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.Update != nil { - resResource.Update = func(data *schema.ResourceData, meta interface{}) error { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return errors.New("failed to cast meta interface") - } - return resource.Update(data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.Delete != nil { - resResource.Delete = func(data *schema.ResourceData, meta interface{}) error { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return errors.New("failed to cast meta interface") - } - return resource.Delete(data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.Exists != nil { - resResource.Exists = func(data *schema.ResourceData, meta interface{}) (bool, error) { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return false, errors.New("failed to cast meta interface") - } - return resource.Exists(data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.CreateWithoutTimeout != nil { - resResource.CreateWithoutTimeout = func(ctx context.Context, data *schema.ResourceData, meta interface{}) provider_diag.Diagnostics { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - return resource.CreateWithoutTimeout(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.ReadWithoutTimeout != nil { - resResource.ReadWithoutTimeout = func(ctx context.Context, data *schema.ResourceData, meta interface{}) provider_diag.Diagnostics { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - return resource.ReadWithoutTimeout(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.UpdateWithoutTimeout != nil { - resResource.UpdateWithoutTimeout = func(ctx context.Context, data *schema.ResourceData, meta interface{}) provider_diag.Diagnostics { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - return resource.UpdateWithoutTimeout(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.DeleteWithoutTimeout != nil { - resResource.DeleteWithoutTimeout = func(ctx context.Context, data *schema.ResourceData, meta interface{}) provider_diag.Diagnostics { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - - return resource.DeleteWithoutTimeout(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.CustomizeDiff != nil { - resResource.CustomizeDiff = func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) error { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return errors.New("failed to cast meta interface") - } - - return resource.CustomizeDiff(ctx, diff, underlyingProvider.UnderlyingProvider()) - } - } - - if resource.DeleteContext != nil { - resResource.DeleteContext = func(ctx context.Context, data *schema.ResourceData, meta interface{}) (_ provider_diag.Diagnostics) { - underlyingProvider, ok := meta.(WrappedProvider) - if !ok { - return provider_diag.Diagnostics{ - { - Severity: provider_diag.Error, - Summary: "failed to cast meta interface", - }, - } - } - return resource.DeleteContext(ctx, data, underlyingProvider.UnderlyingProvider()) - } - } - - return resResource -} diff --git a/contrib/tfcore/utils/wrapper_test.go b/contrib/tfcore/utils/wrapper_test.go deleted file mode 100644 index 2c7db295c2..0000000000 --- a/contrib/tfcore/utils/wrapper_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package utils_test - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/synapsecns/sanguine/contrib/tfcore/utils" - "testing" -) - -// nolint:staticcheck -func TestWrapSchemaResource(t *testing.T) { - resource := &schema.Resource{ - Create: func(data *schema.ResourceData, meta interface{}) error { - return nil - }, - Read: func(data *schema.ResourceData, meta interface{}) error { - return nil - }, - Update: func(data *schema.ResourceData, meta interface{}) error { - return nil - }, - } - - // TODO: look into testing all fields with reflection - wrappedResource := utils.WrapSchemaResource(resource) - underlyingProvider := &MockWrappedProvider{} - - // Test Create hook - err := wrappedResource.Create(nil, underlyingProvider) - if err != nil { - t.Fatalf("Expected Create to succeed, got %s", err) - } - - // Test Read hook - err = wrappedResource.Read(nil, underlyingProvider) - if err != nil { - t.Fatalf("Expected Read to succeed, got %s", err) - } - - // Test Update hook - err = wrappedResource.Update(nil, underlyingProvider) - if err != nil { - t.Fatalf("Expected Update to succeed, got %s", err) - } -} - -type MockWrappedProvider struct{} - -func (m *MockWrappedProvider) UnderlyingProvider() interface{} { - return nil -} - -func (m *MockWrappedProvider) GoogleProvider() interface{} { - return nil -} diff --git a/tools/modulecopier/README.md b/tools/modulecopier/README.md deleted file mode 100644 index 82fd0524f6..0000000000 --- a/tools/modulecopier/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Module Copier - - diff --git a/tools/modulecopier/cmd/doc.go b/tools/modulecopier/cmd/doc.go deleted file mode 100644 index 902726a40c..0000000000 --- a/tools/modulecopier/cmd/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package cmd contains a generator for copying files exported files from geth -// in order to use private fields. The resulting files should not be modified directly -// but if there are new methods you need exported, generators, etc that can be done in other files -// that will now have access to the private fields. These generated files should only be used for testing -// -// TODO: look into implementing a tag for tests in order to make sure nothing in testutils/ is used in a production build -// we haven't done this yet because of the poor ux in an ide as far as having to add a `-tag`. -package cmd diff --git a/tools/modulecopier/cmd/flags.go b/tools/modulecopier/cmd/flags.go deleted file mode 100644 index 5cc6737b49..0000000000 --- a/tools/modulecopier/cmd/flags.go +++ /dev/null @@ -1,21 +0,0 @@ -package cmd - -import "github.com/urfave/cli/v2" - -var modulePathFlag = &cli.StringFlag{ - Name: "module-path", - Usage: "module path you'd like to copy. For example github.com/ethereum/go-ethereum/console for https://github.com/ethereum/go-ethereum/tree/master/console", - Required: false, -} - -var filePathFlag = &cli.StringFlag{ - Name: "file-path", - Usage: "file path you'd like to copy. For example github.com/ethereum/go-ethereum/console/console.go for https://github.com/ethereum/go-ethereum/tree/master/console.go", - Required: false, -} - -var packageFlag = &cli.StringFlag{ - Name: "package-name", - Usage: "package name of the new package", - Required: true, -} diff --git a/tools/modulecopier/cmd/main.go b/tools/modulecopier/cmd/main.go deleted file mode 100644 index ae1d6b4332..0000000000 --- a/tools/modulecopier/cmd/main.go +++ /dev/null @@ -1,64 +0,0 @@ -package cmd - -import ( - "fmt" - "github.com/gen2brain/beeep" - "github.com/synapsecns/sanguine/core" - "github.com/synapsecns/sanguine/core/config" - "github.com/synapsecns/sanguine/tools/modulecopier/internal" - "github.com/urfave/cli/v2" - "os" -) - -// Run runs the module copier. -func Run(args []string, buildInfo config.BuildInfo) { - app := cli.NewApp() - app.Name = buildInfo.Name() - app.Version = buildInfo.Version() - app.Description = buildInfo.VersionString() + "This is used for copying files out of modules in order to export unused fields. This should only be used for unit testing" - app.Usage = fmt.Sprintf("%s --help", buildInfo.Name()) - app.Flags = []cli.Flag{ - modulePathFlag, - filePathFlag, - packageFlag, - } - app.Action = func(c *cli.Context) error { - wd, err := os.Getwd() - if err != nil { - return fmt.Errorf("could not determine working directory: %w", err) - } - - modulePath := c.String(modulePathFlag.Name) - filePath := core.ExpandOrReturnPath(c.String(filePathFlag.Name)) - packageName := c.String(packageFlag.Name) - - // return an error if neither is specified or both are specified - if (modulePath == "" && filePath == "") || (modulePath != "" && filePath != "") { - return fmt.Errorf("exactly one of %s OR %s must be specified", modulePathFlag.Name, filePathFlag.Name) - } - - // handle module path copy - if modulePath != "" { - err = internal.CopyModule(modulePath, wd, packageName) - if err != nil { - return fmt.Errorf("could not copy files for %s to %s", c.String("module-path"), wd) - } - } else { - // handle go file copy - err = internal.CopyFile(filePath, wd, packageName) - if err != nil { - return fmt.Errorf("could not copy files for %s to %s", c.String("module-path"), wd) - } - } - - return nil - } - err := app.Run(args) - if err != nil { - // we send an additional alert through beep because go:generate *will* silently fail if ran as - // go:generate ./... - logoPath, _ := config.GetLogoPath() - _ = beeep.Notify("GethExport Failed", "", logoPath) - panic(err) - } -} diff --git a/tools/modulecopier/internal/copy.go b/tools/modulecopier/internal/copy.go deleted file mode 100644 index 85236527dc..0000000000 --- a/tools/modulecopier/internal/copy.go +++ /dev/null @@ -1,183 +0,0 @@ -package internal - -import ( - "bytes" - "fmt" - "github.com/markbates/pkger" - "github.com/thoas/go-funk" - "go/ast" - "go/format" - "go/parser" - "go/printer" - "go/token" - "golang.org/x/tools/go/ast/astutil" - "io" - "io/fs" - "os" - "path" - "path/filepath" - "strings" -) - -// CopyModule copies a module path to a destination. -func CopyModule(toCopy, dest, packageName string) error { - // walk through the dir, see: https://github.com/markbates/pkger/blob/09e9684b656b/examples/app/main.go#L29 - info, err := pkger.Info(toCopy) - if err != nil { - return fmt.Errorf("could not resolve %s", toCopy) - } - - // get the go files to copy - goFiles := append(info.GoFiles, info.TestGoFiles...) - - err = pkger.Walk(toCopy, func(filePath string, info fs.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("error while walking: %w", err) - } - // if it's not a go file, skip it - if !funk.ContainsString(goFiles, info.Name()) { - return nil - } - - return copyGoFile(filePath, packageName, dest, info) - }) - - if err != nil { - return fmt.Errorf("error while copying: %w", err) - } - return nil -} - -// copyGoFile copies a go file using the package info. -func copyGoFile(filePath, packageName, dest string, info fs.FileInfo) error { - fileContents, err := getUpdatedFileContents(filePath, packageName) - if err != nil { - return fmt.Errorf("could not get updated file contents: %w", err) - } - - newFile := fmt.Sprintf("%s/%s", dest, getFileName(info.Name())) - //nolint: gosec - f, err := os.Create(newFile) - if err != nil { - return fmt.Errorf("could not open file") - } - - // write the contents to the file - _, err = f.Write(fileContents) - if err != nil { - return fmt.Errorf("could not write to file: %w", err) - } - - err = f.Close() - if err != nil { - return fmt.Errorf("could not close file: %w", err) - } - - return nil -} - -// CopyFile copies a single go file. This will not bring dependencies. -func CopyFile(fileToCopy, dest, packageName string) error { - // first things first, pkger operates on go modules, so we need to trim - modulePath := path.Dir(fileToCopy) - fileName := path.Base(fileToCopy) - - // make sure the last element is a file - if filepath.Ext(fileName) != ".go" { - return fmt.Errorf("must specify a .go file after module, got %s", filepath.Ext(fileName)) - } - - err := pkger.Walk(modulePath, func(filePath string, info fs.FileInfo, err error) error { - if err != nil { - return fmt.Errorf("error while walking: %w", err) - } - - // only copy the target file - if info.Name() != fileName { - return nil - } - - return copyGoFile(filePath, packageName, dest, info) - }) - - if err != nil { - return fmt.Errorf("error while copying: %w", err) - } - - return nil -} - -// getFileName gets the new file name. Gen is added here before the .go in the case of non tests -// and before _test.go in the case of tests. -func getFileName(originalName string) string { - suffix := filepath.Ext(originalName) - noExtensionName := strings.TrimSuffix(originalName, suffix) - - const testSuffix = "_test" - - // if it's a test strip it from the original name and add it to the suffix - testIndex := strings.LastIndex(noExtensionName, testSuffix) - if testIndex != -1 { - noExtensionName = noExtensionName[:testIndex] + strings.Replace(noExtensionName[testIndex:], testSuffix, "", 1) - suffix = testSuffix + suffix - } - - return noExtensionName + "_gen" + suffix -} - -// getUpdatedFileContents rewrites adds the generation header and rewrites the package name. -func getUpdatedFileContents(path, newPackageName string) (fileContents []byte, err error) { - file, err := pkger.Open(path) - if err != nil { - return fileContents, fmt.Errorf("could not open file at %s: %w", path, err) - } - - fileContents, err = io.ReadAll(file) - if err != nil { - return fileContents, fmt.Errorf("could not read file %s: %w", fileContents, err) - } - - // prepend the header to the file - fileContents = append([]byte(makeGeneratedHeader(path)+"\n\n"), fileContents...) - - // rename the package by modifying the ast - fset := token.NewFileSet() - - fileAst, err := parser.ParseFile(fset, filepath.Base(path), fileContents, parser.ParseComments) - if err != nil { - return nil, fmt.Errorf("could not parse ast. This could indicate an invalid source file: %w", err) - } - - newAst := astutil.Apply(fileAst, nil, func(cursor *astutil.Cursor) bool { - if ident, ok := cursor.Node().(*ast.Ident); ok { - cursor.Replace(&ast.Ident{ - NamePos: ident.NamePos, - Name: newPackageName, - Obj: ident.Obj, - }) - return false - } - return true - }) - - fileBuffer := bytes.NewBuffer([]byte{}) - err = printer.Fprint(fileBuffer, fset, newAst) - if err != nil { - return nil, fmt.Errorf("could not write resulting ast: %w", err) - } - - // TODO: use golangci-lint - formatted, err := format.Source(fileBuffer.Bytes()) - if err != nil { - return nil, fmt.Errorf("could not format: %w", err) - } - - return formatted, nil -} - -// makeGenerated header makes the code generation header -// note: this must conform to https://github.com/golangci/golangci-lint/blob/1fb67fe448da8a3fb525ecef28decceb23b42d7a/pkg/result/processors/autogenerated_exclude.go#L76 -// to bypass linters. -func makeGeneratedHeader(origin string) string { - return fmt.Sprintf("// Code copied from %s for testing by synapse modulecopier DO NOT EDIT.\"", origin) -} diff --git a/tools/modulecopier/internal/copy_test.go b/tools/modulecopier/internal/copy_test.go deleted file mode 100644 index 89cd9c2c92..0000000000 --- a/tools/modulecopier/internal/copy_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package internal_test - -import ( - "bytes" - "github.com/Flaque/filet" - "github.com/brianvoe/gofakeit/v6" - . "github.com/stretchr/testify/assert" - "github.com/synapsecns/sanguine/tools/modulecopier/internal" - "go/parser" - "go/token" - "io/fs" - "os" - "path/filepath" -) - -// TestCopyModule runs some sanity checks on the copy process. -func (s GeneratorSuite) TestCopyModule() { - newPackageName := gofakeit.Word() - - destDir := filet.TmpDir(s.T(), "") - err := internal.CopyModule("github.com/ethereum/go-ethereum/accounts/abi/bind/backends", destDir, newPackageName) - Nil(s.T(), err) - - // run some sanity checks on the resulting dir. This is by no means complete, but this is a testutil - err = filepath.WalkDir(destDir, func(path string, d fs.DirEntry, err error) error { - Nil(s.T(), err) - // skip the tld - if d.IsDir() { - return nil - } - - // make sure file is not empty - //nolint: staticcheck - fileInfo, err := d.Info() - Nil(s.T(), err) - - NotZero(s.T(), fileInfo.Size()) - - s.validateGoFile(path, newPackageName) - - return nil - }) - Nil(s.T(), err) -} - -func (s GeneratorSuite) TestCopyFile() { - newPackageName := gofakeit.Word() - destDir := filet.TmpDir(s.T(), "") - err := internal.CopyFile("github.com/ethereum/go-ethereum/ethclient/signer.go", destDir, newPackageName) - Nil(s.T(), err) - - path := filepath.Join(destDir, "signer_gen.go") - - s.validateGoFile(path, newPackageName) -} - -// validateGoFile validates that the file was correctly copied with the correct prefix. -func (s GeneratorSuite) validateGoFile(path, packageName string) { - //nolint: gosec - src, err := os.ReadFile(path) - Nil(s.T(), err) - - True(s.T(), bytes.Contains(src, []byte("DO NOT EDIT"))) - - fset := token.NewFileSet() - - // verify package name was correctly changed - ast, err := parser.ParseFile(fset, filepath.Base(path), src, parser.PackageClauseOnly) - Nil(s.T(), err) - - realPackageName := ast.Name.Name - Equal(s.T(), realPackageName, packageName) -} diff --git a/tools/modulecopier/internal/doc.go b/tools/modulecopier/internal/doc.go deleted file mode 100644 index d959a7567f..0000000000 --- a/tools/modulecopier/internal/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package internal contains the internal implementation of our module copier -package internal diff --git a/tools/modulecopier/internal/module.go b/tools/modulecopier/internal/module.go deleted file mode 100644 index 46a477007c..0000000000 --- a/tools/modulecopier/internal/module.go +++ /dev/null @@ -1,143 +0,0 @@ -package internal - -import ( - "fmt" - "github.com/markbates/pkger" - "github.com/pkg/errors" - "golang.org/x/mod/modfile" - "os" - "path" - "path/filepath" -) - -const modFileName = "go.mod" - -// GetModulePath gets the module path for a dependency -// for example, for ethereum, dependencyName would be github.com/ethereum/go-ethereum -// note: we keep this in place w/ packager so tests can determine valid resolution -// pkger is safe to use directly with the tests in place. -func GetModulePath(dependencyName string) (modPath string, err error) { - modFile, err := getModfile() - if err != nil { - return "", err - } - - // make sure the module is not a replace which we don't have functionality for yet - if _, err := hasUnsupportedDirective(modFile, dependencyName); err != nil { - return "", fmt.Errorf("module has unupoorted directive: %w", err) - } - - var resolvedModule *modfile.Require - for _, mod := range modFile.Require { - // this is our module - if mod.Mod.Path == dependencyName { - resolvedModule = mod - } - } - - if resolvedModule == nil { - return "", fmt.Errorf("could not find module at %s in go.mod", dependencyName) - } - - // now we use pkger to resolve the module name. If we could've done this the whole time, why didn't we? - // a) we need the module included in the go.mod so we don't have to run go mod tidy after generation. - // pkger is go module aware, but it's user friendliness comes at a cost. It'll try to import - // things that aren't in the modules file - // b) pkger will not handle replaces: see the above check - depModFile, err := pkger.Open(fmt.Sprintf("%s/:go.mod", dependencyName)) - if err != nil { - return "", fmt.Errorf("pkger could not resolve go.mod file: %w", err) - } - resolvedModFile := path.Join(depModFile.Info().Dir, modFileName) - - //nolint: gosec - depModFileContents, err := os.ReadFile(resolvedModFile) - if err != nil { - return "", fmt.Errorf("could not read resolved module file at %s: %w", depModFile.Path().String(), err) - } - - // parse the resolved module file - parsedFile, err := modfile.Parse(depModFile.Path().String(), depModFileContents, nil) - if err != nil { - return "", fmt.Errorf("could not read mod file: %w", err) - } - - if parsedFile.Module.Mod.Path != depModFile.Info().Module.Path { - return "", fmt.Errorf("incorrect module resolved at path %s, expected: %s got %s", depModFile.Path().String(), - parsedFile.Module.Mod.String(), - resolvedModule.Mod.String()) - } - - return depModFile.Info().Dir, nil -} - -// hasUnsupportedDirective checks if the module is either a replace or exclude which are not currently supported -// note: there's no reason they can't be. We just don't use them at all yet. -func hasUnsupportedDirective(modFile *modfile.File, dependencyName string) (ok bool, err error) { - for _, mod := range modFile.Replace { - if mod.Old.Path == dependencyName { - return true, errors.New("replaced modules are not currently supported") - } - } - - for _, mod := range modFile.Exclude { - if mod.Mod.Path == dependencyName { - return true, errors.New("excluded modules are not currently supported") - } - } - return false, nil -} - -// findModPath recursively searches parent directories for the module path. -// Throws an error if it hits a breakpoint (either due to permissions or getting to repo root). -func findModPath() (string, error) { - currentPath, err := os.Getwd() - if err != nil { - return "", fmt.Errorf("could not get current path: %w", err) - } - - for { - exists := true - - prospectiveFile := filepath.Join(currentPath, modFileName) - - if _, err := os.Stat(prospectiveFile); os.IsNotExist(err) { - exists = false - } - - if !exists { - lastPath := currentPath - currentPath = filepath.Dir(currentPath) - - if lastPath == currentPath { - return "", errors.New("could not find go.mod file") - } - - continue - } - - return prospectiveFile, nil - } -} - -// getModFile gets the module file from the root of the repo. It returns an error if the module cannot be found. -func getModfile() (*modfile.File, error) { - modFile, err := findModPath() - if err != nil { - return nil, fmt.Errorf("could not get modfile: %w", err) - } - - // read the file - //nolint: gosec - modContents, err := os.ReadFile(modFile) - if err != nil { - return nil, fmt.Errorf("could not read modfile: %w", err) - } - - parsedFile, err := modfile.Parse(modFile, modContents, nil) - if err != nil { - return nil, fmt.Errorf("could not parse mod file") - } - - return parsedFile, nil -} diff --git a/tools/modulecopier/internal/module_test.go b/tools/modulecopier/internal/module_test.go deleted file mode 100644 index 8c430f73fd..0000000000 --- a/tools/modulecopier/internal/module_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package internal_test - -import ( - . "github.com/stretchr/testify/assert" - "github.com/synapsecns/sanguine/tools/modulecopier/internal" - "os" - "path" - // required for copy test. - _ "github.com/ethereum/go-ethereum/common" -) - -// fileCheck is the file to check for. We check for .mailmap in ethereum because -// 1) we don't use it -// 2) it's relatively uncommon depiste being a git feature (https://git-scm.com/docs/git-check-mailmap) -// 3) it hasn't changed in 4 years. -// if you're seeing this test break, make sure this file wasn't deleted from ethereum. -const fileCheck = ".mailmap" -const ethModule = "github.com/ethereum/go-ethereum" - -// TestGetEthModulePath tests a fetch of the ethereum module path. -func (s GeneratorSuite) TestGetEthModulePath() { - ethModulePath, err := internal.GetModulePath(ethModule) - Nil(s.T(), err) - - if _, err := os.Stat(path.Join(ethModulePath, fileCheck)); os.IsNotExist(err) { - s.T().Errorf("expected to find module path for %s%s, did not find any. Used eth module path %s", ethModule, fileCheck, ethModulePath) - } -} diff --git a/tools/modulecopier/internal/suite_test.go b/tools/modulecopier/internal/suite_test.go deleted file mode 100644 index 7acf0b4d2d..0000000000 --- a/tools/modulecopier/internal/suite_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package internal_test - -import ( - "github.com/stretchr/testify/suite" - "github.com/synapsecns/sanguine/core/testsuite" - "testing" -) - -type GeneratorSuite struct { - *testsuite.TestSuite -} - -// NewGeneratorSuite creates a end-to-end test suite. -func NewGeneratorSuite(tb testing.TB) *GeneratorSuite { - tb.Helper() - return &GeneratorSuite{ - TestSuite: testsuite.NewTestSuite(tb), - } -} - -func TestGeneratorSuite(t *testing.T) { - suite.Run(t, NewGeneratorSuite(t)) -} diff --git a/tools/modulecopier/main.go b/tools/modulecopier/main.go deleted file mode 100644 index 3052fc4567..0000000000 --- a/tools/modulecopier/main.go +++ /dev/null @@ -1,20 +0,0 @@ -// Package main provides a command line tool for copying modules. -package main - -import ( - "github.com/synapsecns/sanguine/core/config" - "github.com/synapsecns/sanguine/tools/modulecopier/cmd" - "os" -) - -var ( - version = config.DefaultVersion - commit = config.DefaultCommit - date = config.DefaultDate -) - -func main() { - buildInfo := config.NewBuildInfo(version, commit, "modulecopier", date) - - cmd.Run(os.Args, buildInfo) -} From 8cb75eda46b35ac336ee917009a7098d3561d53e Mon Sep 17 00:00:00 2001 From: Trajan0x Date: Sat, 8 Jun 2024 14:45:36 -0400 Subject: [PATCH 2/7] cleanup --- .codecov.yml | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 4ba9064546..244d47b884 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -30,26 +30,6 @@ flags: paths: - contrib/git-changes-action/ carryforward: true - release-copier-action: - paths: - - contrib/release-copier-action/ - carryforward: true - terraform-provider-helmproxy: - paths: - - contrib/terraform-provider-helmproxy/ - carryforward: true - terraform-provider-iap: - paths: - - contrib/terraform-provider-iap/ - carryforward: true - terraform-provider-kubeproxy: - paths: - - contrib/terraform-provider-kubeproxy/ - carryforward: true - tfcore: - paths: - - contrib/tfcore/ - carryforward: true core: paths: - core/ From 46078addab4cbde1882b924c62d0aee457332de8 Mon Sep 17 00:00:00 2001 From: Trajan0x Date: Sat, 8 Jun 2024 14:49:13 -0400 Subject: [PATCH 3/7] remove bundle module --- tools/bundle/.gitignore | 1 - tools/bundle/README.md | 3 - tools/bundle/include/doc.go | 5 - tools/bundle/main.go | 645 --------------------- tools/bundle/main_test.go | 9 - tools/modulecopier/README.md | 3 + tools/modulecopier/cmd/doc.go | 8 + tools/modulecopier/cmd/flags.go | 21 + tools/modulecopier/cmd/main.go | 64 ++ tools/modulecopier/internal/copy.go | 183 ++++++ tools/modulecopier/internal/copy_test.go | 73 +++ tools/modulecopier/internal/doc.go | 2 + tools/modulecopier/internal/module.go | 143 +++++ tools/modulecopier/internal/module_test.go | 28 + tools/modulecopier/internal/suite_test.go | 23 + tools/modulecopier/main.go | 20 + 16 files changed, 568 insertions(+), 663 deletions(-) delete mode 100644 tools/bundle/.gitignore delete mode 100644 tools/bundle/README.md delete mode 100644 tools/bundle/include/doc.go delete mode 100644 tools/bundle/main.go delete mode 100644 tools/bundle/main_test.go create mode 100644 tools/modulecopier/README.md create mode 100644 tools/modulecopier/cmd/doc.go create mode 100644 tools/modulecopier/cmd/flags.go create mode 100644 tools/modulecopier/cmd/main.go create mode 100644 tools/modulecopier/internal/copy.go create mode 100644 tools/modulecopier/internal/copy_test.go create mode 100644 tools/modulecopier/internal/doc.go create mode 100644 tools/modulecopier/internal/module.go create mode 100644 tools/modulecopier/internal/module_test.go create mode 100644 tools/modulecopier/internal/suite_test.go create mode 100644 tools/modulecopier/main.go diff --git a/tools/bundle/.gitignore b/tools/bundle/.gitignore deleted file mode 100644 index caaeb09d8c..0000000000 --- a/tools/bundle/.gitignore +++ /dev/null @@ -1 +0,0 @@ -testdata/out.got diff --git a/tools/bundle/README.md b/tools/bundle/README.md deleted file mode 100644 index 2f3f5a33bc..0000000000 --- a/tools/bundle/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Bundle - -This is a tool for bundling a package into a single source file. Most of the source code is copied from there, but this attempts to fix the bugs around shadowing in a rather hacky way (by aliasing all imports). See [here](https://pkg.go.dev/golang.org/x/tools/cmd/bundle) for the original code diff --git a/tools/bundle/include/doc.go b/tools/bundle/include/doc.go deleted file mode 100644 index 257baeddf8..0000000000 --- a/tools/bundle/include/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package include is used to import dependencies -package include - -// IncludeMe is used to include this module as a dependency -var IncludeMe = 0 diff --git a/tools/bundle/main.go b/tools/bundle/main.go deleted file mode 100644 index ab9cf9f5ca..0000000000 --- a/tools/bundle/main.go +++ /dev/null @@ -1,645 +0,0 @@ -// we skip linting this file because it is largely copied from the standard library and mostly a patch until the bug is fixed in bundle -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Bundle creates a single-source-file version of a source package -// suitable for inclusion in a particular target package. -// -// Usage: -// -// bundle [-o file] [-dst path] [-pkg name] [-prefix p] [-import old=new] [-tags build_constraints] -// -// The src argument specifies the import path of the package to bundle. -// The bundling of a directory of source files into a single source file -// necessarily imposes a number of constraints. -// The package being bundled must not use cgo; must not use conditional -// file compilation, whether with build tags or system-specific file names -// like code_amd64.go; must not depend on any special comments, which -// may not be preserved; must not use any assembly sources; -// must not use renaming imports; and must not use reflection-based APIs -// that depend on the specific names of types or struct fields. -// -// By default, bundle writes the bundled code to standard output. -// If the -o argument is given, bundle writes to the named file -// and also includes a “//go:generate” comment giving the exact -// command line used, for regenerating the file with “go generate.” -// -// Bundle customizes its output for inclusion in a particular package, the destination package. -// By default bundle assumes the destination is the package in the current directory, -// but the destination package can be specified explicitly using the -dst option, -// which takes an import path as its argument. -// If the source package imports the destination package, bundle will remove -// those imports and rewrite any references to use direct references to the -// corresponding symbols. -// Bundle also must write a package declaration in the output and must -// choose a name to use in that declaration. -// If the -pkg option is given, bundle uses that name. -// Otherwise, the name of the destination package is used. -// Build constraints for the generated file can be specified using the -tags option. -// -// To avoid collisions, bundle inserts a prefix at the beginning of -// every package-level const, func, type, and var identifier in src's code, -// updating references accordingly. The default prefix is the package name -// of the source package followed by an underscore. The -prefix option -// specifies an alternate prefix. -// -// Occasionally it is necessary to rewrite imports during the bundling -// process. The -import option, which may be repeated, specifies that -// an import of "old" should be rewritten to import "new" instead. -// -// # Example -// -// Bundle archive/zip for inclusion in cmd/dist: -// -// cd $GOROOT/src/cmd/dist -// bundle -o zip.go archive/zip -// -// Bundle golang.org/x/net/http2 for inclusion in net/http, -// prefixing all identifiers by "http2" instead of "http2_", and -// including a "!nethttpomithttp2" build constraint: -// -// cd $GOROOT/src/net/http -// bundle -o h2_bundle.go -prefix http2 -tags '!nethttpomithttp2' golang.org/x/net/http2 -// -// Update the http2 bundle in net/http: -// -// go generate net/http -// -// Update all bundles in the standard library: -// -// go generate -run bundle std -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/format" - "go/printer" - "go/token" - "go/types" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - "golang.org/x/tools/imports" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "unicode" - - "golang.org/x/tools/go/packages" -) - -var ( - outputFile = flag.String("o", "", "write output to `file` (default standard output)") - dstPath = flag.String("dst", ".", "set destination import `path`") - pkgName = flag.String("pkg", "", "set destination package `name`") - prefix = flag.String("prefix", "&_", "set bundled identifier prefix to `p` (default is \"&_\", where & stands for the original name)") - buildTags = flag.String("tags", "", "the build constraints to be inserted into the generated file") - - importMap = map[string]string{} -) - -func init() { - flag.Var(flagFunc(addImportMap), "import", "rewrite import using `map`, of form old=new (can be repeated)") -} - -func addImportMap(s string) { - if strings.Count(s, "=") != 1 { - log.Fatal("-import argument must be of the form old=new") - } - i := strings.Index(s, "=") - old, new := s[:i], s[i+1:] - if old == "" || new == "" { - log.Fatal("-import argument must be of the form old=new; old and new must be non-empty") - } - importMap[old] = new -} - -func usage() { - fmt.Fprintf(os.Stderr, "Usage: bundle [options] \n") - flag.PrintDefaults() -} - -func main() { - log.SetPrefix("bundle: ") - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() - args := flag.Args() - if len(args) != 1 { - usage() - os.Exit(2) - } - - cfg := &packages.Config{Mode: packages.NeedName} - pkgs, err := packages.Load(cfg, *dstPath) - if err != nil { - log.Fatalf("cannot load destination package: %v", err) - } - if packages.PrintErrors(pkgs) > 0 || len(pkgs) != 1 { - log.Fatalf("failed to load destination package") - } - if *pkgName == "" { - *pkgName = pkgs[0].Name - } - - code, err := bundle(args[0], pkgs[0].PkgPath, *pkgName, *prefix, *buildTags) - if err != nil { - log.Fatal(err) - } - if *outputFile != "" { - err := ioutil.WriteFile(*outputFile, code, 0666) - if err != nil { - log.Fatal(err) - } - } else { - _, err := os.Stdout.Write(code) - if err != nil { - log.Fatal(err) - } - } -} - -// isStandardImportPath is copied from cmd/go in the standard library. -func isStandardImportPath(path string) bool { - i := strings.Index(path, "/") - if i < 0 { - i = len(path) - } - elem := path[:i] - return !strings.Contains(elem, ".") -} - -var testingOnlyPackagesConfig *packages.Config - -func bundle(src, dst, dstpkg, prefix, buildTags string) ([]byte, error) { - // Load the initial package. - cfg := &packages.Config{} - if testingOnlyPackagesConfig != nil { - *cfg = *testingOnlyPackagesConfig - } else { - // Bypass default vendor mode, as we need a package not available in the - // std module vendor folder. - var environVars []string - // list of go keys to copy - validGoKeys := []string{"GOPATH", "GOROOT", "GO111MODULE", "GOPRIVATE"} - for _, val := range os.Environ() { - key := strings.Split(val, "=")[0] - - if slices.Contains(validGoKeys, key) || !strings.HasPrefix(val, "GO") { - environVars = append(environVars, val) - } - } - cfg.Env = append(environVars) - } - cfg.Mode = packages.NeedTypes | packages.NeedSyntax | packages.NeedTypesInfo | packages.NeedImports | packages.NeedDeps - pkgs, err := packages.Load(cfg, src) - if err != nil { - return nil, err - } - if packages.PrintErrors(pkgs) > 0 || len(pkgs) != 1 { - return nil, fmt.Errorf("failed to load source package") - } - pkg := pkgs[0] - - if strings.Contains(prefix, "&") { - prefix = strings.Replace(prefix, "&", pkg.Syntax[0].Name.Name, -1) - } - - objsToUpdate := make(map[types.Object]bool) - var rename func(from types.Object) - rename = func(from types.Object) { - if !objsToUpdate[from] { - objsToUpdate[from] = true - - // Renaming a type that is used as an embedded field - // requires renaming the field too. e.g. - // type T int // if we rename this to U.. - // var s struct {T} - // print(s.T) // ...this must change too - if _, ok := from.(*types.TypeName); ok { - for id, obj := range pkg.TypesInfo.Uses { - if obj == from { - if field := pkg.TypesInfo.Defs[id]; field != nil { - rename(field) - } - } - } - } - } - } - - // Rename each package-level object. - scope := pkg.Types.Scope() - for _, name := range scope.Names() { - rename(scope.Lookup(name)) - } - - var out bytes.Buffer - - // Concatenate package comments from all files... - for _, f := range pkg.Syntax { - if doc := f.Doc.Text(); strings.TrimSpace(doc) != "" { - for _, line := range strings.Split(doc, "\n") { - fmt.Fprintf(&out, "// %s\n", line) - } - } - } - // ...but don't let them become the actual package comment. - fmt.Fprintln(&out) - - fmt.Fprintf(&out, "package %s\n\n", dstpkg) - - // BUG(adonovan,shurcooL): bundle may generate incorrect code - // due to shadowing between identifiers and imported package names. - // - // The generated code will either fail to compile or - // (unlikely) compile successfully but have different behavior - // than the original package. The risk of this happening is higher - // when the original package has renamed imports (they're typically - // renamed in order to resolve a shadow inside that particular .go file). - - // TODO(adonovan,shurcooL): - // - detect shadowing issues, and either return error or resolve them - // - preserve comments from the original import declarations. - - // pkgStd and pkgExt are sets of printed import specs. This is done - // to deduplicate instances of the same import name and path. - var pkgStd = make(map[string]bool) - var pkgExt = make(map[string]bool) - // renamedImportsFile keeps track of all new aliases - // alias->ogimport->newimport - var renamedImportsAlias = make(map[string]map[string]string) - for _, f := range pkg.Syntax { - // create a standard alias for every import in the file - aliasPrefix := getAliasPrefix(pkg, f) - renamedImportsAlias[aliasPrefix] = make(map[string]string) - - for _, imp := range f.Imports { - path, err := strconv.Unquote(imp.Path.Value) - if err != nil { - log.Fatalf("invalid import path string: %v", err) // Shouldn't happen here since packages.Load succeeded. - } - if path == dst { - continue - } - - ogPath := path - - if newPath, ok := importMap[path]; ok { - path = newPath - } - - var name string - if imp.Name != nil { - name = imp.Name.Name - } - - importName := pkg.Imports[ogPath].Types.Name() - - refName := name - if name == "" { - refName = importName - } - - name = aliasPrefix + importName + name - spec := fmt.Sprintf("%s %q", name, path) - - renamedImportsAlias[aliasPrefix][refName] = name - - if isStandardImportPath(path) { - pkgStd[spec] = true - } else { - pkgExt[spec] = true - } - } - } - - // Print a single declaration that imports all necessary packages. - fmt.Fprintln(&out, "import (") - for p := range pkgStd { - fmt.Fprintf(&out, "\t%s\n", p) - } - if len(pkgExt) > 0 { - fmt.Fprintln(&out) - } - for p := range pkgExt { - fmt.Fprintf(&out, "\t%s\n", p) - } - fmt.Fprint(&out, ")\n\n") - - // Modify and print each file. - for _, f := range pkg.Syntax { - // Update renamed identifiers. - for id, obj := range pkg.TypesInfo.Defs { - if objsToUpdate[obj] { - id.Name = prefix + obj.Name() - } - } - for id, obj := range pkg.TypesInfo.Uses { - if objsToUpdate[obj] { - id.Name = prefix + obj.Name() - } - } - - // For each qualified identifier that refers to the - // destination package, remove the qualifier. - // The "@@@." strings are removed in postprocessing. - ast.Inspect(f, func(n ast.Node) bool { - if sel, ok := n.(*ast.SelectorExpr); ok { - if id, ok := sel.X.(*ast.Ident); ok { - if obj, ok := pkg.TypesInfo.Uses[id].(*types.PkgName); ok { - if obj.Imported().Path() == dst { - id.Name = "@@@" - } - } - } - } - return true - }) - - // For each reference to an import, replace it with the new alias - aliasedImports := renamedImportsAlias[getAliasPrefix(pkg, f)] - // convert to a slice for quick lookup - importKeys := maps.Keys(aliasedImports) - - // manage a stack to do ancestor checks, see: https://stackoverflow.com/a/66810485 - var stack []ast.Node - ast.Inspect(f, func(n ast.Node) bool { - OUTER: - switch x := n.(type) { - case *ast.Ident: - if slices.Contains(importKeys, x.Name) { - // don't rename struct vars - for _, item := range stack { - // check if any ancestor is a struct - if _, ok := item.(*ast.StructType); ok { - if x.Obj != nil { - // if our current object is a field don't rename it - _, isField := x.Obj.Decl.(*ast.Field) - if isField { - break OUTER - } - } - } - } - - // check the parent node to make sure it's not a struct before replacing - parent := stack[len(stack)-1] - switch px := parent.(type) { - case *ast.SelectorExpr: - if sel, ok := px.X.(*ast.Ident); ok { - if sel.Obj != nil && sel.Obj.Decl != nil { - // check if its a field, if it is we break to the outer - // since these dont' need to be rewritten - _, isField := sel.Obj.Decl.(*ast.Field) - if isField { - break OUTER - } - - assign, isAssign := sel.Obj.Decl.(*ast.AssignStmt) - if isAssign { - // check the right side of the assignment to see if its a field - for _, item := range assign.Rhs { - // TODO: there's probably more edge cases to handle around this but none that come up in google - tae, isTypeAssert := item.(*ast.TypeAssertExpr) - if isTypeAssert { - // check the ident used in the type assertion - taIdent, isTaIdent := tae.X.(*ast.Ident) - if isTaIdent { - // check the object for a field assertion - if taIdent.Obj != nil && taIdent.Obj.Decl != nil { - _, isField := taIdent.Obj.Decl.(*ast.Field) - if isField { - break OUTER - } - } - } - } - } - } - } - } - } - - // don't rename fields - if x.Obj != nil && x.Obj.Decl != nil { - _, isField := x.Obj.Decl.(*ast.Field) - if isField { - break OUTER - } - } - - // skip vars - x.Name = aliasedImports[x.Name] - } - case *ast.FuncType: - if x.Results != nil { - for _, res := range x.Results.List { - if len(res.Names) == 0 { - if ident, ok := res.Type.(*ast.Ident); ok && slices.Contains(importKeys, ident.Name) { - ident.Name = aliasedImports[ident.Name] - } - } - } - } - } - - // Manage the stack. Inspect calls a function like this: - // f(node) - // for each child { - // f(child) // and recursively for child's children - // } - // f(nil) - if n == nil { - // Done with node's children. Pop. - stack = stack[:len(stack)-1] - } else { - // Push the current node for children. - stack = append(stack, n) - } - - return true - }) - - last := f.Package - if len(f.Imports) > 0 { - imp := f.Imports[len(f.Imports)-1] - last = imp.End() - if imp.Comment != nil { - if e := imp.Comment.End(); e > last { - last = e - } - } - } - - // Pretty-print package-level declarations. - // but no package or import declarations. - var buf bytes.Buffer - for _, decl := range f.Decls { - if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT { - continue - } - - beg, end := sourceRange(decl) - - printComments(&out, f.Comments, last, beg) - - buf.Reset() - format.Node(&buf, pkg.Fset, &printer.CommentedNode{Node: decl, Comments: f.Comments}) - // Remove each "@@@." in the output. - // TODO(adonovan): not hygienic. - out.Write(bytes.Replace(buf.Bytes(), []byte("@@@."), nil, -1)) - - last = printSameLineComment(&out, f.Comments, pkg.Fset, end) - - out.WriteString("\n\n") - } - - printLastComments(&out, f.Comments, last) - } - - result, err := imports.Process("", out.Bytes(), &imports.Options{}) - if err != nil { - return nil, fmt.Errorf("error processing imports: %v", err) - } - - if buildTags != "" { - result = append([]byte(fmt.Sprintf("//go:build %s\n", buildTags)), result...) - result = append([]byte(fmt.Sprintf("// +build %s\n\n", buildTags)), result...) - } - - result = append([]byte("//nolint\n"), result...) - result = append([]byte("// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.\n"), result...) - if *outputFile != "" && buildTags == "" { - // skip this for now - //fmt.Fprintf(&out, "//go:generate bundle %s\n", strings.Join(quoteArgs(os.Args[1:]), " ")) - } else { - result = append([]byte(fmt.Sprintf("// $ bundle %s\n", strings.Join(os.Args[1:], " "))), result...) - } - result = append([]byte("\n"), result...) - - // Now format the entire thing. - result, err = format.Source(result) - if err != nil { - log.Fatalf("formatting failed: %v", err) - } - - return result, nil -} - -// sourceRange returns the [beg, end) interval of source code -// belonging to decl (incl. associated comments). -func sourceRange(decl ast.Decl) (beg, end token.Pos) { - beg = decl.Pos() - end = decl.End() - - var doc, com *ast.CommentGroup - - switch d := decl.(type) { - case *ast.GenDecl: - doc = d.Doc - if len(d.Specs) > 0 { - switch spec := d.Specs[len(d.Specs)-1].(type) { - case *ast.ValueSpec: - com = spec.Comment - case *ast.TypeSpec: - com = spec.Comment - } - } - case *ast.FuncDecl: - doc = d.Doc - } - - if doc != nil { - beg = doc.Pos() - } - if com != nil && com.End() > end { - end = com.End() - } - - return beg, end -} - -func printComments(out *bytes.Buffer, comments []*ast.CommentGroup, pos, end token.Pos) { - for _, cg := range comments { - if pos <= cg.Pos() && cg.Pos() < end { - for _, c := range cg.List { - fmt.Fprintln(out, c.Text) - } - fmt.Fprintln(out) - } - } -} - -const infinity = 1 << 30 - -func printLastComments(out *bytes.Buffer, comments []*ast.CommentGroup, pos token.Pos) { - printComments(out, comments, pos, infinity) -} - -func printSameLineComment(out *bytes.Buffer, comments []*ast.CommentGroup, fset *token.FileSet, pos token.Pos) token.Pos { - tf := fset.File(pos) - for _, cg := range comments { - if pos <= cg.Pos() && tf.Line(cg.Pos()) == tf.Line(pos) { - for _, c := range cg.List { - fmt.Fprintln(out, c.Text) - } - return cg.End() - } - } - return pos -} - -func quoteArgs(ss []string) []string { - // From go help generate: - // - // > The arguments to the directive are space-separated tokens or - // > double-quoted strings passed to the generator as individual - // > arguments when it is run. - // - // > Quoted strings use Go syntax and are evaluated before execution; a - // > quoted string appears as a single argument to the generator. - // - var qs []string - for _, s := range ss { - if s == "" || containsSpace(s) { - s = strconv.Quote(s) - } - qs = append(qs, s) - } - return qs -} - -func containsSpace(s string) bool { - for _, r := range s { - if unicode.IsSpace(r) { - return true - } - } - return false -} - -type flagFunc func(string) - -func (f flagFunc) Set(s string) error { - f(s) - return nil -} - -func (f flagFunc) String() string { return "" } - -// getAliasPrefix gets a unique import alias name for a package. -func getAliasPrefix(packages *packages.Package, file *ast.File) string { - filename := path.Base(packages.Fset.Position(file.Package).Filename) - extension := filepath.Ext(filename) - - return filename[0:len(filename)-len(extension)] + "_" -} diff --git a/tools/bundle/main_test.go b/tools/bundle/main_test.go deleted file mode 100644 index 79a1a51748..0000000000 --- a/tools/bundle/main_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package main - -import ( - "testing" -) - -func TestBundle(t *testing.T) { - t.Skip("TODO: add some tests here. As the readme states, this tool is used only for copying the google module. Any failures there should be covered by the go:generate tests, but we can implement here at a later date") -} diff --git a/tools/modulecopier/README.md b/tools/modulecopier/README.md new file mode 100644 index 0000000000..82fd0524f6 --- /dev/null +++ b/tools/modulecopier/README.md @@ -0,0 +1,3 @@ +# Module Copier + + diff --git a/tools/modulecopier/cmd/doc.go b/tools/modulecopier/cmd/doc.go new file mode 100644 index 0000000000..902726a40c --- /dev/null +++ b/tools/modulecopier/cmd/doc.go @@ -0,0 +1,8 @@ +// Package cmd contains a generator for copying files exported files from geth +// in order to use private fields. The resulting files should not be modified directly +// but if there are new methods you need exported, generators, etc that can be done in other files +// that will now have access to the private fields. These generated files should only be used for testing +// +// TODO: look into implementing a tag for tests in order to make sure nothing in testutils/ is used in a production build +// we haven't done this yet because of the poor ux in an ide as far as having to add a `-tag`. +package cmd diff --git a/tools/modulecopier/cmd/flags.go b/tools/modulecopier/cmd/flags.go new file mode 100644 index 0000000000..5cc6737b49 --- /dev/null +++ b/tools/modulecopier/cmd/flags.go @@ -0,0 +1,21 @@ +package cmd + +import "github.com/urfave/cli/v2" + +var modulePathFlag = &cli.StringFlag{ + Name: "module-path", + Usage: "module path you'd like to copy. For example github.com/ethereum/go-ethereum/console for https://github.com/ethereum/go-ethereum/tree/master/console", + Required: false, +} + +var filePathFlag = &cli.StringFlag{ + Name: "file-path", + Usage: "file path you'd like to copy. For example github.com/ethereum/go-ethereum/console/console.go for https://github.com/ethereum/go-ethereum/tree/master/console.go", + Required: false, +} + +var packageFlag = &cli.StringFlag{ + Name: "package-name", + Usage: "package name of the new package", + Required: true, +} diff --git a/tools/modulecopier/cmd/main.go b/tools/modulecopier/cmd/main.go new file mode 100644 index 0000000000..ae1d6b4332 --- /dev/null +++ b/tools/modulecopier/cmd/main.go @@ -0,0 +1,64 @@ +package cmd + +import ( + "fmt" + "github.com/gen2brain/beeep" + "github.com/synapsecns/sanguine/core" + "github.com/synapsecns/sanguine/core/config" + "github.com/synapsecns/sanguine/tools/modulecopier/internal" + "github.com/urfave/cli/v2" + "os" +) + +// Run runs the module copier. +func Run(args []string, buildInfo config.BuildInfo) { + app := cli.NewApp() + app.Name = buildInfo.Name() + app.Version = buildInfo.Version() + app.Description = buildInfo.VersionString() + "This is used for copying files out of modules in order to export unused fields. This should only be used for unit testing" + app.Usage = fmt.Sprintf("%s --help", buildInfo.Name()) + app.Flags = []cli.Flag{ + modulePathFlag, + filePathFlag, + packageFlag, + } + app.Action = func(c *cli.Context) error { + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("could not determine working directory: %w", err) + } + + modulePath := c.String(modulePathFlag.Name) + filePath := core.ExpandOrReturnPath(c.String(filePathFlag.Name)) + packageName := c.String(packageFlag.Name) + + // return an error if neither is specified or both are specified + if (modulePath == "" && filePath == "") || (modulePath != "" && filePath != "") { + return fmt.Errorf("exactly one of %s OR %s must be specified", modulePathFlag.Name, filePathFlag.Name) + } + + // handle module path copy + if modulePath != "" { + err = internal.CopyModule(modulePath, wd, packageName) + if err != nil { + return fmt.Errorf("could not copy files for %s to %s", c.String("module-path"), wd) + } + } else { + // handle go file copy + err = internal.CopyFile(filePath, wd, packageName) + if err != nil { + return fmt.Errorf("could not copy files for %s to %s", c.String("module-path"), wd) + } + } + + return nil + } + err := app.Run(args) + if err != nil { + // we send an additional alert through beep because go:generate *will* silently fail if ran as + // go:generate ./... + logoPath, _ := config.GetLogoPath() + _ = beeep.Notify("GethExport Failed", "", logoPath) + panic(err) + } +} diff --git a/tools/modulecopier/internal/copy.go b/tools/modulecopier/internal/copy.go new file mode 100644 index 0000000000..85236527dc --- /dev/null +++ b/tools/modulecopier/internal/copy.go @@ -0,0 +1,183 @@ +package internal + +import ( + "bytes" + "fmt" + "github.com/markbates/pkger" + "github.com/thoas/go-funk" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "golang.org/x/tools/go/ast/astutil" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "strings" +) + +// CopyModule copies a module path to a destination. +func CopyModule(toCopy, dest, packageName string) error { + // walk through the dir, see: https://github.com/markbates/pkger/blob/09e9684b656b/examples/app/main.go#L29 + info, err := pkger.Info(toCopy) + if err != nil { + return fmt.Errorf("could not resolve %s", toCopy) + } + + // get the go files to copy + goFiles := append(info.GoFiles, info.TestGoFiles...) + + err = pkger.Walk(toCopy, func(filePath string, info fs.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error while walking: %w", err) + } + // if it's not a go file, skip it + if !funk.ContainsString(goFiles, info.Name()) { + return nil + } + + return copyGoFile(filePath, packageName, dest, info) + }) + + if err != nil { + return fmt.Errorf("error while copying: %w", err) + } + return nil +} + +// copyGoFile copies a go file using the package info. +func copyGoFile(filePath, packageName, dest string, info fs.FileInfo) error { + fileContents, err := getUpdatedFileContents(filePath, packageName) + if err != nil { + return fmt.Errorf("could not get updated file contents: %w", err) + } + + newFile := fmt.Sprintf("%s/%s", dest, getFileName(info.Name())) + //nolint: gosec + f, err := os.Create(newFile) + if err != nil { + return fmt.Errorf("could not open file") + } + + // write the contents to the file + _, err = f.Write(fileContents) + if err != nil { + return fmt.Errorf("could not write to file: %w", err) + } + + err = f.Close() + if err != nil { + return fmt.Errorf("could not close file: %w", err) + } + + return nil +} + +// CopyFile copies a single go file. This will not bring dependencies. +func CopyFile(fileToCopy, dest, packageName string) error { + // first things first, pkger operates on go modules, so we need to trim + modulePath := path.Dir(fileToCopy) + fileName := path.Base(fileToCopy) + + // make sure the last element is a file + if filepath.Ext(fileName) != ".go" { + return fmt.Errorf("must specify a .go file after module, got %s", filepath.Ext(fileName)) + } + + err := pkger.Walk(modulePath, func(filePath string, info fs.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error while walking: %w", err) + } + + // only copy the target file + if info.Name() != fileName { + return nil + } + + return copyGoFile(filePath, packageName, dest, info) + }) + + if err != nil { + return fmt.Errorf("error while copying: %w", err) + } + + return nil +} + +// getFileName gets the new file name. Gen is added here before the .go in the case of non tests +// and before _test.go in the case of tests. +func getFileName(originalName string) string { + suffix := filepath.Ext(originalName) + noExtensionName := strings.TrimSuffix(originalName, suffix) + + const testSuffix = "_test" + + // if it's a test strip it from the original name and add it to the suffix + testIndex := strings.LastIndex(noExtensionName, testSuffix) + if testIndex != -1 { + noExtensionName = noExtensionName[:testIndex] + strings.Replace(noExtensionName[testIndex:], testSuffix, "", 1) + suffix = testSuffix + suffix + } + + return noExtensionName + "_gen" + suffix +} + +// getUpdatedFileContents rewrites adds the generation header and rewrites the package name. +func getUpdatedFileContents(path, newPackageName string) (fileContents []byte, err error) { + file, err := pkger.Open(path) + if err != nil { + return fileContents, fmt.Errorf("could not open file at %s: %w", path, err) + } + + fileContents, err = io.ReadAll(file) + if err != nil { + return fileContents, fmt.Errorf("could not read file %s: %w", fileContents, err) + } + + // prepend the header to the file + fileContents = append([]byte(makeGeneratedHeader(path)+"\n\n"), fileContents...) + + // rename the package by modifying the ast + fset := token.NewFileSet() + + fileAst, err := parser.ParseFile(fset, filepath.Base(path), fileContents, parser.ParseComments) + if err != nil { + return nil, fmt.Errorf("could not parse ast. This could indicate an invalid source file: %w", err) + } + + newAst := astutil.Apply(fileAst, nil, func(cursor *astutil.Cursor) bool { + if ident, ok := cursor.Node().(*ast.Ident); ok { + cursor.Replace(&ast.Ident{ + NamePos: ident.NamePos, + Name: newPackageName, + Obj: ident.Obj, + }) + return false + } + return true + }) + + fileBuffer := bytes.NewBuffer([]byte{}) + err = printer.Fprint(fileBuffer, fset, newAst) + if err != nil { + return nil, fmt.Errorf("could not write resulting ast: %w", err) + } + + // TODO: use golangci-lint + formatted, err := format.Source(fileBuffer.Bytes()) + if err != nil { + return nil, fmt.Errorf("could not format: %w", err) + } + + return formatted, nil +} + +// makeGenerated header makes the code generation header +// note: this must conform to https://github.com/golangci/golangci-lint/blob/1fb67fe448da8a3fb525ecef28decceb23b42d7a/pkg/result/processors/autogenerated_exclude.go#L76 +// to bypass linters. +func makeGeneratedHeader(origin string) string { + return fmt.Sprintf("// Code copied from %s for testing by synapse modulecopier DO NOT EDIT.\"", origin) +} diff --git a/tools/modulecopier/internal/copy_test.go b/tools/modulecopier/internal/copy_test.go new file mode 100644 index 0000000000..89cd9c2c92 --- /dev/null +++ b/tools/modulecopier/internal/copy_test.go @@ -0,0 +1,73 @@ +package internal_test + +import ( + "bytes" + "github.com/Flaque/filet" + "github.com/brianvoe/gofakeit/v6" + . "github.com/stretchr/testify/assert" + "github.com/synapsecns/sanguine/tools/modulecopier/internal" + "go/parser" + "go/token" + "io/fs" + "os" + "path/filepath" +) + +// TestCopyModule runs some sanity checks on the copy process. +func (s GeneratorSuite) TestCopyModule() { + newPackageName := gofakeit.Word() + + destDir := filet.TmpDir(s.T(), "") + err := internal.CopyModule("github.com/ethereum/go-ethereum/accounts/abi/bind/backends", destDir, newPackageName) + Nil(s.T(), err) + + // run some sanity checks on the resulting dir. This is by no means complete, but this is a testutil + err = filepath.WalkDir(destDir, func(path string, d fs.DirEntry, err error) error { + Nil(s.T(), err) + // skip the tld + if d.IsDir() { + return nil + } + + // make sure file is not empty + //nolint: staticcheck + fileInfo, err := d.Info() + Nil(s.T(), err) + + NotZero(s.T(), fileInfo.Size()) + + s.validateGoFile(path, newPackageName) + + return nil + }) + Nil(s.T(), err) +} + +func (s GeneratorSuite) TestCopyFile() { + newPackageName := gofakeit.Word() + destDir := filet.TmpDir(s.T(), "") + err := internal.CopyFile("github.com/ethereum/go-ethereum/ethclient/signer.go", destDir, newPackageName) + Nil(s.T(), err) + + path := filepath.Join(destDir, "signer_gen.go") + + s.validateGoFile(path, newPackageName) +} + +// validateGoFile validates that the file was correctly copied with the correct prefix. +func (s GeneratorSuite) validateGoFile(path, packageName string) { + //nolint: gosec + src, err := os.ReadFile(path) + Nil(s.T(), err) + + True(s.T(), bytes.Contains(src, []byte("DO NOT EDIT"))) + + fset := token.NewFileSet() + + // verify package name was correctly changed + ast, err := parser.ParseFile(fset, filepath.Base(path), src, parser.PackageClauseOnly) + Nil(s.T(), err) + + realPackageName := ast.Name.Name + Equal(s.T(), realPackageName, packageName) +} diff --git a/tools/modulecopier/internal/doc.go b/tools/modulecopier/internal/doc.go new file mode 100644 index 0000000000..d959a7567f --- /dev/null +++ b/tools/modulecopier/internal/doc.go @@ -0,0 +1,2 @@ +// Package internal contains the internal implementation of our module copier +package internal diff --git a/tools/modulecopier/internal/module.go b/tools/modulecopier/internal/module.go new file mode 100644 index 0000000000..46a477007c --- /dev/null +++ b/tools/modulecopier/internal/module.go @@ -0,0 +1,143 @@ +package internal + +import ( + "fmt" + "github.com/markbates/pkger" + "github.com/pkg/errors" + "golang.org/x/mod/modfile" + "os" + "path" + "path/filepath" +) + +const modFileName = "go.mod" + +// GetModulePath gets the module path for a dependency +// for example, for ethereum, dependencyName would be github.com/ethereum/go-ethereum +// note: we keep this in place w/ packager so tests can determine valid resolution +// pkger is safe to use directly with the tests in place. +func GetModulePath(dependencyName string) (modPath string, err error) { + modFile, err := getModfile() + if err != nil { + return "", err + } + + // make sure the module is not a replace which we don't have functionality for yet + if _, err := hasUnsupportedDirective(modFile, dependencyName); err != nil { + return "", fmt.Errorf("module has unupoorted directive: %w", err) + } + + var resolvedModule *modfile.Require + for _, mod := range modFile.Require { + // this is our module + if mod.Mod.Path == dependencyName { + resolvedModule = mod + } + } + + if resolvedModule == nil { + return "", fmt.Errorf("could not find module at %s in go.mod", dependencyName) + } + + // now we use pkger to resolve the module name. If we could've done this the whole time, why didn't we? + // a) we need the module included in the go.mod so we don't have to run go mod tidy after generation. + // pkger is go module aware, but it's user friendliness comes at a cost. It'll try to import + // things that aren't in the modules file + // b) pkger will not handle replaces: see the above check + depModFile, err := pkger.Open(fmt.Sprintf("%s/:go.mod", dependencyName)) + if err != nil { + return "", fmt.Errorf("pkger could not resolve go.mod file: %w", err) + } + resolvedModFile := path.Join(depModFile.Info().Dir, modFileName) + + //nolint: gosec + depModFileContents, err := os.ReadFile(resolvedModFile) + if err != nil { + return "", fmt.Errorf("could not read resolved module file at %s: %w", depModFile.Path().String(), err) + } + + // parse the resolved module file + parsedFile, err := modfile.Parse(depModFile.Path().String(), depModFileContents, nil) + if err != nil { + return "", fmt.Errorf("could not read mod file: %w", err) + } + + if parsedFile.Module.Mod.Path != depModFile.Info().Module.Path { + return "", fmt.Errorf("incorrect module resolved at path %s, expected: %s got %s", depModFile.Path().String(), + parsedFile.Module.Mod.String(), + resolvedModule.Mod.String()) + } + + return depModFile.Info().Dir, nil +} + +// hasUnsupportedDirective checks if the module is either a replace or exclude which are not currently supported +// note: there's no reason they can't be. We just don't use them at all yet. +func hasUnsupportedDirective(modFile *modfile.File, dependencyName string) (ok bool, err error) { + for _, mod := range modFile.Replace { + if mod.Old.Path == dependencyName { + return true, errors.New("replaced modules are not currently supported") + } + } + + for _, mod := range modFile.Exclude { + if mod.Mod.Path == dependencyName { + return true, errors.New("excluded modules are not currently supported") + } + } + return false, nil +} + +// findModPath recursively searches parent directories for the module path. +// Throws an error if it hits a breakpoint (either due to permissions or getting to repo root). +func findModPath() (string, error) { + currentPath, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("could not get current path: %w", err) + } + + for { + exists := true + + prospectiveFile := filepath.Join(currentPath, modFileName) + + if _, err := os.Stat(prospectiveFile); os.IsNotExist(err) { + exists = false + } + + if !exists { + lastPath := currentPath + currentPath = filepath.Dir(currentPath) + + if lastPath == currentPath { + return "", errors.New("could not find go.mod file") + } + + continue + } + + return prospectiveFile, nil + } +} + +// getModFile gets the module file from the root of the repo. It returns an error if the module cannot be found. +func getModfile() (*modfile.File, error) { + modFile, err := findModPath() + if err != nil { + return nil, fmt.Errorf("could not get modfile: %w", err) + } + + // read the file + //nolint: gosec + modContents, err := os.ReadFile(modFile) + if err != nil { + return nil, fmt.Errorf("could not read modfile: %w", err) + } + + parsedFile, err := modfile.Parse(modFile, modContents, nil) + if err != nil { + return nil, fmt.Errorf("could not parse mod file") + } + + return parsedFile, nil +} diff --git a/tools/modulecopier/internal/module_test.go b/tools/modulecopier/internal/module_test.go new file mode 100644 index 0000000000..8c430f73fd --- /dev/null +++ b/tools/modulecopier/internal/module_test.go @@ -0,0 +1,28 @@ +package internal_test + +import ( + . "github.com/stretchr/testify/assert" + "github.com/synapsecns/sanguine/tools/modulecopier/internal" + "os" + "path" + // required for copy test. + _ "github.com/ethereum/go-ethereum/common" +) + +// fileCheck is the file to check for. We check for .mailmap in ethereum because +// 1) we don't use it +// 2) it's relatively uncommon depiste being a git feature (https://git-scm.com/docs/git-check-mailmap) +// 3) it hasn't changed in 4 years. +// if you're seeing this test break, make sure this file wasn't deleted from ethereum. +const fileCheck = ".mailmap" +const ethModule = "github.com/ethereum/go-ethereum" + +// TestGetEthModulePath tests a fetch of the ethereum module path. +func (s GeneratorSuite) TestGetEthModulePath() { + ethModulePath, err := internal.GetModulePath(ethModule) + Nil(s.T(), err) + + if _, err := os.Stat(path.Join(ethModulePath, fileCheck)); os.IsNotExist(err) { + s.T().Errorf("expected to find module path for %s%s, did not find any. Used eth module path %s", ethModule, fileCheck, ethModulePath) + } +} diff --git a/tools/modulecopier/internal/suite_test.go b/tools/modulecopier/internal/suite_test.go new file mode 100644 index 0000000000..7acf0b4d2d --- /dev/null +++ b/tools/modulecopier/internal/suite_test.go @@ -0,0 +1,23 @@ +package internal_test + +import ( + "github.com/stretchr/testify/suite" + "github.com/synapsecns/sanguine/core/testsuite" + "testing" +) + +type GeneratorSuite struct { + *testsuite.TestSuite +} + +// NewGeneratorSuite creates a end-to-end test suite. +func NewGeneratorSuite(tb testing.TB) *GeneratorSuite { + tb.Helper() + return &GeneratorSuite{ + TestSuite: testsuite.NewTestSuite(tb), + } +} + +func TestGeneratorSuite(t *testing.T) { + suite.Run(t, NewGeneratorSuite(t)) +} diff --git a/tools/modulecopier/main.go b/tools/modulecopier/main.go new file mode 100644 index 0000000000..3052fc4567 --- /dev/null +++ b/tools/modulecopier/main.go @@ -0,0 +1,20 @@ +// Package main provides a command line tool for copying modules. +package main + +import ( + "github.com/synapsecns/sanguine/core/config" + "github.com/synapsecns/sanguine/tools/modulecopier/cmd" + "os" +) + +var ( + version = config.DefaultVersion + commit = config.DefaultCommit + date = config.DefaultDate +) + +func main() { + buildInfo := config.NewBuildInfo(version, commit, "modulecopier", date) + + cmd.Run(os.Args, buildInfo) +} From e9835f911cb09018e57eb6db715761443dd4a577 Mon Sep 17 00:00:00 2001 From: Trajan0x Date: Sat, 8 Jun 2024 14:52:45 -0400 Subject: [PATCH 4/7] complete up dependencies --- .devcontainer/devcontainer.json | 3 - .github/workflows/goreleaser-actions.yml | 29 ------ .github/workflows/lint.yml | 2 +- README.md | 5 - docker/release-copier-action.Dockerfile | 28 ------ go.work | 5 - go.work.sum | 117 ++++++++++++++++++++++- tools/go.mod | 2 +- 8 files changed, 116 insertions(+), 75 deletions(-) delete mode 100644 docker/release-copier-action.Dockerfile diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 5e48ff9866..6fa0346a29 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -16,9 +16,6 @@ "ghcr.io/devcontainers/features/github-cli:1": { "version": "latest", }, - "ghcr.io/devcontainers/features/terraform:1": { - "version": "latest", - }, "ghcr.io/devcontainers/features/node:1": { "version": "latest", } diff --git a/.github/workflows/goreleaser-actions.yml b/.github/workflows/goreleaser-actions.yml index 4e3b6681a8..28e902e3f9 100644 --- a/.github/workflows/goreleaser-actions.yml +++ b/.github/workflows/goreleaser-actions.yml @@ -258,35 +258,6 @@ jobs: name: ${{steps.project_id.outputs.project_name}}.zip path: ${{steps.project_id.outputs.project_name}}.zip - - name: Copy Releases - if: ${{ steps.branch-name.outputs.is_default == 'true' && contains( steps.tag_version.outputs.new_tag, 'terraform-provider-iap') }} - uses: docker://ghcr.io/synapsecns/sanguine/release-copier-action:latest - with: - github_token: ${{ secrets.PUBLISH_TOKEN }} - # TODO: will change with new org - destination_repo: 'trajan0x/terraform-provider-iap' - tag_name: ${{ steps.tag_version.outputs.new_tag }} - strip_prefix: 'contrib/terraform-provider-iap/' - - - name: Copy Releases - if: ${{ steps.branch-name.outputs.is_default == 'true' && contains( steps.tag_version.outputs.new_tag, 'terraform-provider-helmproxy') }} - uses: docker://ghcr.io/synapsecns/sanguine/release-copier-action:latest - with: - github_token: ${{ secrets.PUBLISH_TOKEN }} - # TODO: will change with new org - destination_repo: 'trajan0x/terraform-provider-helmproxy' - tag_name: ${{ steps.tag_version.outputs.new_tag }} - strip_prefix: 'contrib/terraform-provider-helmproxy/' - - - name: Copy Releases - if: ${{ steps.branch-name.outputs.is_default == 'true' && contains( steps.tag_version.outputs.new_tag, 'terraform-provider-kubeproxy') }} - uses: docker://ghcr.io/synapsecns/sanguine/release-copier-action:latest - with: - github_token: ${{ secrets.PUBLISH_TOKEN }} - # TODO: will change with new org - destination_repo: 'trajan0x/terraform-provider-kubeproxy' - tag_name: ${{ steps.tag_version.outputs.new_tag }} - strip_prefix: 'contrib/terraform-provider-kubeproxy/' - name: Refresh Report Card if: steps.branch-name.outputs.is_default == 'true' diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index cb0237887e..314ccdaafc 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -21,7 +21,7 @@ jobs: - name: Run ShellCheck uses: ludeeus/action-shellcheck@master with: - ignore_paths: ./contrib/terraform-provider-iap/scripts/add-tfmac.sh ./contrib/terraform-provider-helmproxy/scripts/add-tfmac.sh ./contrib/terraform-provider-kubeproxy/scripts/add-tfmac.sh ./contrib/scripts/txdecoder.sh + ignore_paths: ./contrib/scripts/txdecoder.sh - name: Validate renovate uses: rinchsan/renovate-config-validator@v0.0.12 diff --git a/README.md b/README.md index 07a53ceff2..7056e194d4 100644 --- a/README.md +++ b/README.md @@ -54,11 +54,6 @@ root │ ├── git-changes-action: Github action for identifying changes in dependent modules in a go workspace │ ├── promexporter: Multi-service prometheus exporter │ ├── screener-api: Optional address screening api -│ ├── release-copier-action: Github action for copying releases from one repo to another -│ ├── terraform-provider-iap: Terraform provider used for bastion proxy tunneling -│ ├── terraform-provider-helmproxy: Terraform provider that allows helm to be proxied through an iap bastion proxy -│ ├── terraform-provider-kubeproxy: Terraform provider that allows kube to be proxied through an iap bastion proxy -│ ├── tfcore: Terraform core utilities + iap utilities ├── core: The Go core library with common utilities for use across the monorepo ├── ethergo: Go-based ethereum testing + common library ├── packages diff --git a/docker/release-copier-action.Dockerfile b/docker/release-copier-action.Dockerfile deleted file mode 100644 index 3bc951ee08..0000000000 --- a/docker/release-copier-action.Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM alpine:latest as builder - -RUN apk add --no-cache ca-certificates -RUN update-ca-certificates - -# add a user here because addgroup and adduser are not available in scratch -RUN addgroup -S releasecopier \ - && adduser -S -u 10000 -g releasecopier releasecopier - - -FROM scratch - -LABEL org.label-schema.description="Release Copier Action Docker Image" -LABEL org.label-schema.name="ghcr.io/synapsecns/sanguine/contrib/release-copier-action" -LABEL org.label-schema.schema-version="1.0.0" -LABEL org.label-schema.vcs-url="https://github.com/synapsecns/sanguine" -LABEL org.opencontainers.image.source="https://github.com/synapsecns/sanguine" -LABEL org.opencontainers.image.description="Release Copier Docker image" - -COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ - -# copy users from builder -COPY --from=builder /etc/passwd /etc/passwd - -WORKDIR /release-copier-action -COPY release-copier-action /app/release-copier-action - -ENTRYPOINT ["/app/release-copier-action"] diff --git a/go.work b/go.work index 6ddc11062d..c851f0ee8c 100644 --- a/go.work +++ b/go.work @@ -5,12 +5,7 @@ use ( ./agents ./contrib/git-changes-action ./contrib/promexporter - ./contrib/release-copier-action ./contrib/screener-api - ./contrib/terraform-provider-helmproxy - ./contrib/terraform-provider-iap - ./contrib/terraform-provider-kubeproxy - ./contrib/tfcore ./core ./ethergo ./services/cctp-relayer diff --git a/go.work.sum b/go.work.sum index fc80eb6c99..365540f81c 100644 --- a/go.work.sum +++ b/go.work.sum @@ -180,6 +180,7 @@ cloud.google.com/go/bigquery v1.53.0 h1:K3wLbjbnSlxhuG5q4pntHv5AEbQM1QqHKGYgwFIq cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= cloud.google.com/go/bigquery v1.57.1 h1:FiULdbbzUxWD0Y4ZGPSVCDLvqRSyCIO6zKV7E2nf5uA= cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= +cloud.google.com/go/bigtable v1.2.0 h1:F4cCmA4nuV84V5zYQ3MKY+M1Cw1avHDuf3S/LcZPA9c= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= @@ -603,6 +604,8 @@ cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFV cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= +cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= +cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0 h1:ZRQ4k21/jAhrHBVKl/AY7SjgzeJwG1iZa+mJ82P+VNg= @@ -954,6 +957,7 @@ cloud.google.com/go/speech v1.19.0 h1:MCagaq8ObV2tr1kZJcJYgXYbIn8Ai5rp42tyGYw9rl cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/speech v1.21.0 h1:qkxNao58oF8ghAHE1Eghen7XepawYEN5zuZXYWaUTA4= cloud.google.com/go/speech v1.21.0/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= +cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU= cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= @@ -1146,6 +1150,13 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= @@ -1158,6 +1169,7 @@ github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mo github.com/CloudyKit/jet/v3 v3.0.0 h1:1PwO5w5VCtlUUl+KTOBsTGZlhjWkcybsGaAau52tOy8= github.com/CloudyKit/jet/v6 v6.1.0 h1:hvO96X345XagdH1fAoBjpBYG4a1ghhL/QzalkduPuXk= github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4= +github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf h1:8F6fjL5iQP6sArGtPuXh0l6hggdcIpAm4ChjVJE4oTs= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= @@ -1170,6 +1182,7 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/vcs v1.13.3 h1:IIA2aBdXvfbIM+yl/eTnL4hb1XwdpvuQLglAix1gweE= github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8= +github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3 h1:4FA+QBaydEHlwxg0lMN3rhwoDaQy6LKhVWR4qvq4BuA= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -1178,6 +1191,7 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06 h1:KkH3I3sJuOLP3TjA/dfr4NAY8bghDwnXiU7cTKxQqo0= github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/sarama v1.23.1 h1:XxJBCZEoWJtoWjf/xRbmGUpAmTZGnuuF0ON0EvxxBrs= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= @@ -1229,8 +1243,10 @@ github.com/aristanetworks/splunk-hec-go v0.3.3 h1:O7zlcm4ve7JvqTyEK3vSBh1LngLezr github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/ashanbrown/forbidigo v1.1.0 h1:SJOPJyqsrVL3CvR0veFZFmIM0fXS/Kvyikqvfphd0Z4= github.com/ashanbrown/makezero v0.0.0-20210308000810-4155955488a0 h1:27owMIbvO33XL56BKWPy+SCU69I9wPwPXuMf5mAbVGU= github.com/awalterschulze/gographviz v2.0.3+incompatible h1:9sVEXJBJLwGX7EQVhLm2elIKCm7P2YHFC8v6096G09E= @@ -1250,6 +1266,7 @@ github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQ github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc= github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -1262,12 +1279,16 @@ github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625 h1:ckJgFhFWywOx+YLEMIJsTb+NV6NexWICk5+AMSuz3ss= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/goleveldb v1.0.0 h1:Tvd0BfvqX9o823q1j2UZ/epQo09eJh6dTcRp79ilIN4= github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJGQE= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA= github.com/bwesterb/go-ristretto v1.2.3 h1:1w53tCkGhCQ5djbat3+MH0BAQ5Kfgbt56UZQ/JMzngw= github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= @@ -1291,10 +1312,12 @@ github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmt github.com/chromedp/sysutil v1.0.0 h1:+ZxhTpfpZlmchB58ih/LBHX52ky7w2VhQVKQMucy3Ic= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= @@ -1305,10 +1328,7 @@ github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= -github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= @@ -1320,7 +1340,10 @@ github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c h1:llSLg4 github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c/go.mod h1:CkbdF9hbRidRJYMRzmfX8TMOr95I2pYXRHF18MzRrvA= github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY= github.com/containerd/btrfs v1.0.0 h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= +github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/go-cni v1.1.6 h1:el5WPymG5nRRLQF1EfB97FWob4Tdc8INg8RZMaXWZlo= github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0= @@ -1339,6 +1362,7 @@ github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMu github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk= github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a h1:W8b4lQ4tFF21aspRGoBuCNV6V2fFJBF+pm1J6OY8Lys= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -1380,7 +1404,13 @@ github.com/djherbis/atime v1.1.0 h1:rgwVbP/5by8BvvjBNrbh64Qz33idKT3pSnMSJsxhi0g= github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dmarkham/enumer v1.5.7 h1:xYJA/lGoniiuhZLASBUbpPjScUslfyDHUAMczeflCeg= +github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= @@ -1408,6 +1438,8 @@ github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQ github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 h1:DddqAaWDpywytcG8w/qoQ5sAN8X12d3Z3koB0C3Rxsc= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk= @@ -1430,6 +1462,7 @@ github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzP github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= github.com/fullstorydev/grpcurl v1.6.0 h1:p8BB6VZF8O7w6MxGr3KJ9E6EVKaswCevSALK6FBtMzA= github.com/fvbommel/sortorder v1.0.1 h1:dSnXLt4mJYH25uDDGa3biZNQsozaUWDSWeKJ0qqFfzE= @@ -1442,9 +1475,11 @@ github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8= github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732 h1:AB7YjNrzlVHsYz06zCULVV2zYCEft82P86dSmtwxKL0= github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I= +github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo7zko= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9 h1:r5GgOLGbza2wVHRzK7aAj6lWZjfbAwiu/RDCVOKjRyM= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= @@ -1485,6 +1520,8 @@ github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= @@ -1496,10 +1533,13 @@ github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUD github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198 h1:FSii2UQeSLngl3jFoR4tUKZLprO7qUlh/TKKticc0BM= @@ -1520,12 +1560,14 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9 github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/golangci-lint v1.40.1 h1:pBrCqt9BgI9LfGCTKRTSe1DfMjR6BkOPERPaXJYXA6Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219 h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 h1:c9Mqqrm/Clj5biNaG7rABrmwUq88nHh0uABo2b/WYmc= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e h1:KhcknUwkWHKZPbFy2P7jH5LKJ3La+0ZeknkkmrSgqb0= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38 h1:y0Wmhvml7cGnzPa9nocn/fMraMH/lMDdeG+rkx4VgYY= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18= @@ -1536,22 +1578,29 @@ github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 h1:7qnwS9+oeSiOIsiUMa github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4= github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/cel-go v0.12.5 h1:DmzaiSgoaqGCjtpPQWl26/gND+yRpim56H1jCVev6d8= github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/certificate-transparency-go v1.1.1 h1:6JHXZhXEvilMcTjR4MGZn5KV0IRkcFl4CJx5iHVhjFE= github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9 h1:OF1IPgv+F4NmqmJ98KTjdN97Vs1JxDPB3vbmYzV2dpk= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/safehtml v0.0.2 h1:ZOt2VXg4x24bW0m2jtzAOkhoXV0iM8vNKc0paByCZqM= github.com/google/safehtml v0.0.2/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= @@ -1570,12 +1619,14 @@ github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 h1:Nb2aRlC github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY= github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gostaticanalysis/analysisutil v0.4.1 h1:/7clKqrVfiVwiBQLM0Uke4KvXnO6JcCTS7HwF2D6wG8= github.com/gostaticanalysis/comment v1.4.1 h1:xHopR5L2lRz6OsjH4R2HG5wRhW9ySl3FsHIvi5pcXwc= github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 h1:rx8127mFPqXXsfPSo8BwnIU97MKFZc89WHAHt8PwDVY= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -1584,8 +1635,10 @@ github.com/guptarohit/asciigraph v0.5.5/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtX github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-getter v1.5.0 h1:ciWJaeZWSMbc5OiLMpKp40MKFPqO44i0h3uyfXPBkkk= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= @@ -1594,11 +1647,13 @@ github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82k github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/mdns v1.0.0 h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs= github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= @@ -1612,9 +1667,11 @@ github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:q github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91 h1:KyZDvZ/GGn+r+Y3DKZ7UOQ/TP4xV6HNkrwiVMB1GnNY= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab h1:BA4a7pe6ZTd9F8kXETBoijjFJ/ntaa//1wiH9BZu4zU= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc= github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385 h1:ED4e5Cc3z5vSN2Tz2GkOHN7vs4Sxe2yds6CXvDnvZFE= @@ -1682,12 +1739,14 @@ github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+U github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1 h1:ujPKutqRlJtcfWk6toYVYagwra7HQHbXOaS171b4Tg8= +github.com/jessevdk/go-flags v1.4.1-0.20181029123624-5de817a9aa20 h1:dAOsPLhnBzIyxu0VvmnKjlNcIlgMK+erD6VRHDtweMI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jgautheron/goconst v1.4.0 h1:hp9XKUpe/MPyDamUbfsrGpe+3dnY2whNK4EtB86dvLM= github.com/jhump/gopoet v0.1.0 h1:gYjOPnzHd2nzB37xYQZxj4EIQNpBrBskRqQQ3q4ZgSg= github.com/jhump/goprotoc v0.5.0 h1:Y1UgUX+txUznfqcGdDef8ZOVlyQvnV0pKWZH08RmZuo= github.com/jingyugao/rowserrcheck v0.0.0-20210315055705-d907ca737bb1 h1:4Rlb26NqzNtbDH69CRpr0vZooj3jAlXTycWCX3xRYAY= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= +github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= github.com/joefitzgerald/rainbow-reporter v0.1.0 h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= @@ -1738,6 +1797,7 @@ github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba h1:NARVGAAgEXvoM github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.6.0 h1:YTDO4pNy7AUN/021p+JGHycQyYNIyMoenM1YDVK6RlY= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= @@ -1929,6 +1989,7 @@ github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= github.com/mitchellh/cli v1.1.4 h1:qj8czE26AU4PbiaPXK5uVmMSM+V5BYsFBiM9HhGRLUA= github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= @@ -1941,12 +2002,16 @@ github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqky github.com/mmcloughlin/avo v0.5.0 h1:nAco9/aI9Lg2kiuROBY6BhCI/z0t5jEvJfjWbL8qXLU= github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM= github.com/mmcloughlin/profile v0.1.1 h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY= github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5 h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1 h1:29NKShH4TWd3lxCDUhS4Xe16EWMA753dtIxYtwddklU= github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880 h1:DXaIt8v4XXkFoVZXkG/PjLS5Rz3I2yoflOQrnuGgJeA= @@ -1960,6 +2025,7 @@ github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEn github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-proto-validators v0.2.0 h1:F6LFfmgVnfULfaRsQWBbe7F7ocuHCr9+7m+GAeDzNbQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= @@ -1994,6 +2060,8 @@ github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c h1:a380JP+B7xlMbEQOlha1buKhzBPXFqgFXplyWCEIGEY= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696 h1:yHCGAHg2zMaW8olLrqEt3SAHGcEx2aJPEQWMRCyravY= @@ -2020,6 +2088,7 @@ github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaF github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/phpdave11/gofpdf v1.4.2 h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ= @@ -2102,10 +2171,12 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgc github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE= github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= github.com/polyfloyd/go-errorlint v0.0.0-20210418123303-74da32850375 h1:uuOfAQo7em74dKh41UzjlQ6dXmE9wYxjvUcfg2EHTDw= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= @@ -2159,6 +2230,7 @@ github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4 h1:BN/Nyn2nWMo github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20= github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245 h1:K1Xf3bKttbF+koVGaX5xngRIZ5bVjbmPnaxE/dR08uY= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= @@ -2220,7 +2292,13 @@ github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0H github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= @@ -2326,23 +2404,33 @@ github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCO github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= go.etcd.io/etcd/client/v2 v2.305.4 h1:Dcx3/MYyfKcPNLpR4VVQUP5KgYrBeJtktBwEKkw08Ao= go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= +go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= go.etcd.io/etcd/pkg/v3 v3.5.4 h1:V5Dvl7S39ZDwjkKqJG2BfXgxZ3QREqqKifWQgIw5IM0= go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0= +go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= go.etcd.io/etcd/raft/v3 v3.5.4 h1:YGrnAgRfgXloBNuqa+oBI/aRZMcK/1GS6trJePJ/Gqc= go.etcd.io/etcd/raft/v3 v3.5.4/go.mod h1:SCuunjYvZFC0fBX0vxMSPjuZmpcSk+XaAcMrD6Do03w= +go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E= go.etcd.io/etcd/server/v3 v3.5.4 h1:CMAZd0g8Bn5NRhynW6pKhc4FRg41/0QYy3d7aNm9874= go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuuGbb65c= go.mongodb.org/mongo-driver v1.11.1 h1:QP0znIRTuL0jf1oBQoAoM0C6ZJfBK4kx0Uumtv1A7w8= @@ -2459,6 +2547,8 @@ golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= @@ -2478,6 +2568,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go. google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c= google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405 h1:o4S3HvTUEXgRsNSUQsALDVog0O9F/U1JJlHmmUN8Uas= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= @@ -2494,6 +2586,7 @@ gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNat gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a h1:stTHdEoWg1pQ8riaP5ROrjS6zy6wewH/Q2iwnLCQUXY= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25 h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= @@ -2501,6 +2594,7 @@ gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= @@ -2518,14 +2612,23 @@ gotest.tools/gotestsum v1.8.2 h1:szU3TaSz8wMx/uG+w/A2+4JUPwH903YYaMI9yOOYAyI= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 h1:tmXTu+dfa+d9Evp8NpJdgOy6+rt8/x4yG7qPBrtNfLY= honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +k8s.io/api v0.22.5 h1:xk7C+rMjF/EGELiD560jdmwzrB788mfcHiNbMQLIVI8= +k8s.io/apiserver v0.22.5 h1:71krQxCUz218ecb+nPhfDsNB6QgP1/4EMvi1a2uYBlg= +k8s.io/client-go v0.22.5 h1:I8Zn/UqIdi2r02aZmhaJ1hqMxcpfJ3t5VqvHtctHYFo= +k8s.io/code-generator v0.19.7 h1:kM/68Y26Z/u//TFc1ggVVcg62te8A2yQh57jBfD0FWQ= k8s.io/code-generator v0.25.5 h1:K3MSqc27VT6fGJtVlE037N2dGmtqyhZi3S+1GkrKH+c= k8s.io/code-generator v0.25.5/go.mod h1:aDxzxJynLKQkaa117y0FFcgZ5jG8+GobxZ2JUntmvKk= +k8s.io/component-base v0.22.5 h1:U0eHqZm7mAFE42hFwYhY6ze/MmVaW00JpMrzVsQmzYE= k8s.io/component-helpers v0.24.2 h1:gtXmI/TjVINtkAdZn7m5p8+Vd0Mk4d1q8kwJMMLBdwY= +k8s.io/cri-api v0.23.1 h1:0DHL/hpTf4Fp+QkUXFefWcp1fhjXr9OlNdY9X99c+O8= k8s.io/cri-api v0.25.0 h1:INwdXsCDSA/0hGNdPxdE2dQD6ft/5K1EaKXZixvSQxg= k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8= k8s.io/metrics v0.24.2 h1:3lgEq973VGPWAEaT9VI/p0XmI0R5kJgb/r9Ufr5fz8k= moul.io/http2curl v1.0.0 h1:6XwpyZOYsgZJrU8exnG87ncVkU1FVCcTRpwzOkTDUi8= @@ -2537,14 +2640,22 @@ mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 h1:HT3e4Krq+IE44tiN36RvVEb6t nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= nullprogram.com/x/optparse v1.0.0 h1:xGFgVi5ZaWOnYdac2foDT3vg0ZZC9ErXFV57mr4OHrI= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/cmd/config v0.10.6 h1:Qjs7z/Q1NrVmW86tavmhM7wZtgWJ7aitLMARlUKrj98= sigs.k8s.io/kustomize/kustomize/v4 v4.5.4 h1:rzGrL+DA4k8bT6SMz7/U+2z3iiZf1t2RaYJWx8OeTmE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/go-diff v0.5.0 h1:eTiIR0CoWjGzJcnQ3OkhIl/b9GJovq4lSAVRt0ZFEG8= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c= diff --git a/tools/go.mod b/tools/go.mod index f07034b722..310d2f57d4 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -16,7 +16,6 @@ require ( github.com/synapsecns/sanguine/core v0.0.0-00010101000000-000000000000 github.com/thoas/go-funk v0.9.0 github.com/urfave/cli/v2 v2.27.1 - golang.org/x/exp v0.0.0-20240213143201-ec583247a57a golang.org/x/mod v0.15.0 golang.org/x/tools v0.18.0 ) @@ -84,6 +83,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.19.0 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect From 756326716c0a738a516be2b2aabd1ffea44c6471 Mon Sep 17 00:00:00 2001 From: Trajan0x Date: Sat, 8 Jun 2024 14:56:07 -0400 Subject: [PATCH 5/7] cleanup --- .github/workflows/go.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 8cc340c9af..e0b67c2c7f 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -523,18 +523,18 @@ jobs: # Foundry is required for flattening - name: Install Foundry uses: foundry-rs/foundry-toolchain@v1 - if: ${{ contains(matrix.package, 'agents') || contains(matrix.package, 'services/rfq') + if: ${{ contains(matrix.package, 'agents') || contains(matrix.package, 'services/rfq') }} with: version: nightly - name: Install Node Dependencies run: yarn install --immutable - if: ${{ contains(matrix.package, 'agents') || contains(matrix.package, 'services/rfq') + if: ${{ contains(matrix.package, 'agents') || contains(matrix.package, 'services/rfq') }} # Generate flattened files - name: Run flattener run: npx lerna exec npm run build:go - if: ${{ contains(matrix.package, 'agents') || contains(matrix.package, 'services/rfq') + if: ${{ contains(matrix.package, 'agents') || contains(matrix.package, 'services/rfq') }} # Setup Go - uses: actions/setup-go@v4 From ab88486509da62ecc743182b6160620fda3d74e5 Mon Sep 17 00:00:00 2001 From: Trajan0x Date: Sat, 8 Jun 2024 14:57:21 -0400 Subject: [PATCH 6/7] stop building bundle [goreleaser] --- tools/.goreleaser.yml | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/tools/.goreleaser.yml b/tools/.goreleaser.yml index 56b547a7fc..bec1e4a56d 100644 --- a/tools/.goreleaser.yml +++ b/tools/.goreleaser.yml @@ -40,24 +40,6 @@ builds: goarch: - amd64 - - id: bundle-amd64 - binary: bundle-amd64 - env: - # probably not neccesary for this module - - CGO_CFLAGS=-I/usr/local/include - - CGO_ENABLED=1 - - CGO_LDFLAGS=-L/usr/local/lib -lrocksdb -lstdc++ -lm -lz -lsnappy -lzstd -lbz2 - # see: https://github.com/cosmos/iavl/pull/465/files - ldflags: - - -s -w - main: bundle/main.go - flags: - - -tags=builtin_static,rocksdb,static # probably not neccesary for this module - goos: - - linux - goarch: - - amd64 - - id: resolver-amd64 binary: resolver-amd64 env: From c8ec58b8a637f9bcd9c48d67a6c15c17ff229c9f Mon Sep 17 00:00:00 2001 From: Trajan0x Date: Sat, 8 Jun 2024 14:58:56 -0400 Subject: [PATCH 7/7] add move notice [goreleaser] --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7056e194d4..d1ec28cea0 100644 --- a/README.md +++ b/README.md @@ -77,11 +77,12 @@ root │ ├── omnirpc: Latency aware RPC Client used across multiple-chains at once ├── tools │ ├── abigen: Used to generate abigen bindings for go -│ ├── bundle: Modified version of go bundler with improved shadowing support │ ├── module copier: Used to copy internal modules and export methods for testing │ ├── revertresolver: Converts hexified solidity errors into their underlying errors. +Note: Terraform related modules have been moved to [this repository](https://github.com/synapsecns/tf-providers) + ## Setup Clone the repository, open it, and install nodejs packages with `yarn`: